From 94e87610c4ce9bbb1c614a61bab29c1422fed11b Mon Sep 17 00:00:00 2001 From: kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com> Date: Fri, 2 Aug 2024 11:46:41 +0000 Subject: [PATCH] [chore] add back exif-terminator and use only for jpeg,png,webp (#3161) * add back exif-terminator and use only for jpeg,png,webp * fix arguments passed to terminateExif() * pull in latest exif-terminator * fix test * update processed img --------- Co-authored-by: tobi --- go.mod | 11 + go.sum | 34 + internal/media/ffmpeg.go | 62 +- internal/media/manager_test.go | 2 +- internal/media/metadata.go | 96 + internal/media/processingmedia.go | 4 +- .../test/test-png-alphachannel-processed.png | Bin 18904 -> 18832 bytes internal/media/util.go | 19 - .../exif-terminator/LICENSE | 661 ++++++ .../exif-terminator/README.md | 122 ++ .../exif-terminator/exif.go | 53 + .../exif-terminator/jpeg.go | 132 ++ .../exif-terminator/png.go | 85 + .../exif-terminator/terminator.go | 146 ++ .../exif-terminator/webp.go | 101 + .../dsoprea/go-exif/v3/.MODULE_ROOT | 0 vendor/github.com/dsoprea/go-exif/v3/LICENSE | 9 + .../dsoprea/go-exif/v3/common/ifd.go | 651 ++++++ .../dsoprea/go-exif/v3/common/parser.go | 280 +++ .../go-exif/v3/common/testing_common.go | 88 + .../dsoprea/go-exif/v3/common/type.go | 482 +++++ .../dsoprea/go-exif/v3/common/utility.go | 148 ++ .../go-exif/v3/common/value_context.go | 464 +++++ .../go-exif/v3/common/value_encoder.go | 273 +++ .../dsoprea/go-exif/v3/data_layer.go | 50 + vendor/github.com/dsoprea/go-exif/v3/error.go | 14 + vendor/github.com/dsoprea/go-exif/v3/exif.go | 333 +++ vendor/github.com/dsoprea/go-exif/v3/gps.go | 117 ++ .../dsoprea/go-exif/v3/ifd_builder.go | 1199 +++++++++++ .../dsoprea/go-exif/v3/ifd_builder_encode.go | 532 +++++ .../dsoprea/go-exif/v3/ifd_enumerate.go | 1672 +++++++++++++++ .../dsoprea/go-exif/v3/ifd_tag_entry.go | 298 +++ .../github.com/dsoprea/go-exif/v3/package.go | 8 + vendor/github.com/dsoprea/go-exif/v3/tags.go | 475 +++++ .../dsoprea/go-exif/v3/tags_data.go | 968 +++++++++ .../dsoprea/go-exif/v3/testing_common.go | 188 ++ .../dsoprea/go-exif/v3/undefined/README.md | 4 + .../dsoprea/go-exif/v3/undefined/accessor.go | 62 + .../go-exif/v3/undefined/exif_8828_oecf.go | 148 ++ .../v3/undefined/exif_9000_exif_version.go | 69 + .../exif_9101_components_configuration.go | 124 ++ .../v3/undefined/exif_927C_maker_note.go | 114 ++ .../v3/undefined/exif_9286_user_comment.go | 142 ++ .../undefined/exif_A000_flashpix_version.go | 69 + .../exif_A20C_spatial_frequency_response.go | 160 ++ .../v3/undefined/exif_A300_file_source.go | 79 + .../v3/undefined/exif_A301_scene_type.go | 76 + .../v3/undefined/exif_A302_cfa_pattern.go | 97 + .../exif_iop_0002_interop_version.go | 69 + .../gps_001B_gps_processing_method.go | 65 + .../gps_001C_gps_area_information.go | 65 + .../go-exif/v3/undefined/registration.go | 42 + .../dsoprea/go-exif/v3/undefined/type.go | 44 + .../github.com/dsoprea/go-exif/v3/utility.go | 237 +++ .../github.com/dsoprea/go-iptc/.MODULE_ROOT | 0 vendor/github.com/dsoprea/go-iptc/LICENSE | 21 + vendor/github.com/dsoprea/go-iptc/README.md | 3 + vendor/github.com/dsoprea/go-iptc/standard.go | 99 + vendor/github.com/dsoprea/go-iptc/tag.go | 277 +++ .../dsoprea/go-iptc/testing_common.go | 70 + vendor/github.com/dsoprea/go-iptc/utility.go | 25 + .../github.com/dsoprea/go-logging/.travis.yml | 12 + vendor/github.com/dsoprea/go-logging/LICENSE | 9 + .../github.com/dsoprea/go-logging/README.md | 223 ++ .../github.com/dsoprea/go-logging/config.go | 246 +++ .../dsoprea/go-logging/console_adapter.go | 36 + vendor/github.com/dsoprea/go-logging/log.go | 537 +++++ .../go-photoshop-info-format/.MODULE_ROOT | 0 .../dsoprea/go-photoshop-info-format/LICENSE | 21 + .../go-photoshop-info-format/README.md | 3 + .../dsoprea/go-photoshop-info-format/info.go | 119 ++ .../testing_common.go | 70 + .../github.com/dsoprea/go-utility/v2/LICENSE | 7 + .../go-utility/v2/filesystem/README.md | 64 + .../go-utility/v2/filesystem/bounceback.go | 273 +++ .../filesystem/boundedreadwriteseekcloser.go | 95 + .../v2/filesystem/boundedreadwriteseeker.go | 156 ++ .../v2/filesystem/calculate_seek.go | 52 + .../go-utility/v2/filesystem/common.go | 15 + .../copy_bytes_between_positions.go | 40 + .../go-utility/v2/filesystem/does_exist.go | 19 + .../go-utility/v2/filesystem/graceful_copy.go | 54 + .../go-utility/v2/filesystem/list_files.go | 143 ++ .../v2/filesystem/progress_wrapper.go | 93 + .../go-utility/v2/filesystem/read_counter.go | 36 + .../v2/filesystem/readseeker_to_readerat.go | 63 + .../v2/filesystem/readwriteseekcloser.go | 29 + .../v2/filesystem/seekable_buffer.go | 146 ++ .../v2/filesystem/simplefileinfo.go | 69 + .../go-utility/v2/filesystem/utility.go | 17 + .../go-utility/v2/filesystem/write_counter.go | 36 + .../dsoprea/go-utility/v2/image/README.md | 9 + .../go-utility/v2/image/media_parser_type.go | 34 + .../github.com/go-errors/errors/.travis.yml | 7 + .../github.com/go-errors/errors/LICENSE.MIT | 7 + vendor/github.com/go-errors/errors/README.md | 69 + vendor/github.com/go-errors/errors/cover.out | 89 + vendor/github.com/go-errors/errors/error.go | 205 ++ .../github.com/go-errors/errors/error_1_13.go | 26 + .../go-errors/errors/error_backward.go | 22 + .../go-errors/errors/parse_panic.go | 127 ++ .../github.com/go-errors/errors/stackframe.go | 114 ++ vendor/github.com/go-xmlfmt/xmlfmt/LICENSE | 21 + vendor/github.com/go-xmlfmt/xmlfmt/README.md | 178 ++ vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go | 56 + vendor/github.com/golang/geo/LICENSE | 202 ++ vendor/github.com/golang/geo/r1/doc.go | 20 + vendor/github.com/golang/geo/r1/interval.go | 177 ++ vendor/github.com/golang/geo/r2/doc.go | 20 + vendor/github.com/golang/geo/r2/rect.go | 255 +++ vendor/github.com/golang/geo/r3/doc.go | 20 + .../github.com/golang/geo/r3/precisevector.go | 198 ++ vendor/github.com/golang/geo/r3/vector.go | 183 ++ vendor/github.com/golang/geo/s1/angle.go | 120 ++ vendor/github.com/golang/geo/s1/chordangle.go | 250 +++ vendor/github.com/golang/geo/s1/doc.go | 20 + vendor/github.com/golang/geo/s1/interval.go | 462 +++++ vendor/github.com/golang/geo/s2/bits_go18.go | 53 + vendor/github.com/golang/geo/s2/bits_go19.go | 39 + vendor/github.com/golang/geo/s2/cap.go | 519 +++++ vendor/github.com/golang/geo/s2/cell.go | 698 +++++++ vendor/github.com/golang/geo/s2/cellid.go | 942 +++++++++ vendor/github.com/golang/geo/s2/cellunion.go | 590 ++++++ vendor/github.com/golang/geo/s2/centroids.go | 133 ++ .../golang/geo/s2/contains_point_query.go | 190 ++ .../golang/geo/s2/contains_vertex_query.go | 63 + .../golang/geo/s2/convex_hull_query.go | 239 +++ .../golang/geo/s2/crossing_edge_query.go | 409 ++++ .../golang/geo/s2/distance_target.go | 149 ++ vendor/github.com/golang/geo/s2/doc.go | 29 + .../github.com/golang/geo/s2/edge_clipping.go | 672 ++++++ .../github.com/golang/geo/s2/edge_crosser.go | 227 +++ .../golang/geo/s2/edge_crossings.go | 396 ++++ .../golang/geo/s2/edge_distances.go | 408 ++++ vendor/github.com/golang/geo/s2/edge_query.go | 512 +++++ .../golang/geo/s2/edge_tessellator.go | 167 ++ vendor/github.com/golang/geo/s2/encode.go | 237 +++ vendor/github.com/golang/geo/s2/interleave.go | 143 ++ vendor/github.com/golang/geo/s2/latlng.go | 101 + vendor/github.com/golang/geo/s2/lexicon.go | 175 ++ vendor/github.com/golang/geo/s2/loop.go | 1816 +++++++++++++++++ vendor/github.com/golang/geo/s2/matrix3x3.go | 127 ++ .../golang/geo/s2/max_distance_targets.go | 306 +++ vendor/github.com/golang/geo/s2/metric.go | 164 ++ .../golang/geo/s2/min_distance_targets.go | 362 ++++ .../github.com/golang/geo/s2/nthderivative.go | 88 + vendor/github.com/golang/geo/s2/paddedcell.go | 252 +++ vendor/github.com/golang/geo/s2/point.go | 258 +++ .../golang/geo/s2/point_measures.go | 149 ++ .../github.com/golang/geo/s2/point_vector.go | 42 + .../golang/geo/s2/pointcompression.go | 319 +++ vendor/github.com/golang/geo/s2/polygon.go | 1212 +++++++++++ vendor/github.com/golang/geo/s2/polyline.go | 589 ++++++ .../golang/geo/s2/polyline_measures.go | 53 + vendor/github.com/golang/geo/s2/predicates.go | 701 +++++++ .../github.com/golang/geo/s2/projections.go | 203 ++ .../github.com/golang/geo/s2/query_options.go | 196 ++ vendor/github.com/golang/geo/s2/rect.go | 710 +++++++ .../github.com/golang/geo/s2/rect_bounder.go | 352 ++++ vendor/github.com/golang/geo/s2/region.go | 71 + .../github.com/golang/geo/s2/regioncoverer.go | 477 +++++ vendor/github.com/golang/geo/s2/shape.go | 263 +++ vendor/github.com/golang/geo/s2/shapeindex.go | 1507 ++++++++++++++ vendor/github.com/golang/geo/s2/shapeutil.go | 228 +++ .../golang/geo/s2/shapeutil_edge_iterator.go | 72 + vendor/github.com/golang/geo/s2/stuv.go | 427 ++++ vendor/github.com/golang/geo/s2/util.go | 125 ++ .../golang/geo/s2/wedge_relations.go | 97 + .../go-jpeg-image-structure/v2/.MODULE_ROOT | 0 .../go-jpeg-image-structure/v2/LICENSE | 9 + .../go-jpeg-image-structure/v2/README.md | 10 + .../go-jpeg-image-structure/v2/markers.go | 212 ++ .../v2/media_parser.go | 139 ++ .../go-jpeg-image-structure/v2/segment.go | 352 ++++ .../v2/segment_list.go | 416 ++++ .../go-jpeg-image-structure/v2/splitter.go | 437 ++++ .../v2/testing_common.go | 73 + .../go-jpeg-image-structure/v2/utility.go | 110 + .../go-png-image-structure/v2/.MODULE_ROOT | 0 .../go-png-image-structure/v2/LICENSE | 9 + .../v2/chunk_decoder.go | 81 + .../go-png-image-structure/v2/media_parser.go | 85 + .../go-png-image-structure/v2/png.go | 386 ++++ .../v2/testing_common.go | 77 + .../go-png-image-structure/v2/utility.go | 67 + vendor/golang.org/x/net/context/context.go | 56 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 +++ vendor/golang.org/x/net/context/pre_go19.go | 109 + vendor/modules.txt | 41 + 191 files changed, 38772 insertions(+), 58 deletions(-) create mode 100644 internal/media/metadata.go create mode 100644 vendor/codeberg.org/superseriousbusiness/exif-terminator/LICENSE create mode 100644 vendor/codeberg.org/superseriousbusiness/exif-terminator/README.md create mode 100644 vendor/codeberg.org/superseriousbusiness/exif-terminator/exif.go create mode 100644 vendor/codeberg.org/superseriousbusiness/exif-terminator/jpeg.go create mode 100644 vendor/codeberg.org/superseriousbusiness/exif-terminator/png.go create mode 100644 vendor/codeberg.org/superseriousbusiness/exif-terminator/terminator.go create mode 100644 vendor/codeberg.org/superseriousbusiness/exif-terminator/webp.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/.MODULE_ROOT create mode 100644 vendor/github.com/dsoprea/go-exif/v3/LICENSE create mode 100644 vendor/github.com/dsoprea/go-exif/v3/common/ifd.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/common/parser.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/common/type.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/common/utility.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/common/value_context.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/data_layer.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/error.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/exif.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/gps.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/ifd_builder.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/ifd_builder_encode.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/ifd_enumerate.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/ifd_tag_entry.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/package.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/tags.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/tags_data.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/testing_common.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/README.md create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/accessor.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_8828_oecf.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9000_exif_version.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9101_components_configuration.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_927C_maker_note.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9286_user_comment.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A000_flashpix_version.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A20C_spatial_frequency_response.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A300_file_source.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A301_scene_type.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A302_cfa_pattern.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/exif_iop_0002_interop_version.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001B_gps_processing_method.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001C_gps_area_information.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/registration.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/undefined/type.go create mode 100644 vendor/github.com/dsoprea/go-exif/v3/utility.go create mode 100644 vendor/github.com/dsoprea/go-iptc/.MODULE_ROOT create mode 100644 vendor/github.com/dsoprea/go-iptc/LICENSE create mode 100644 vendor/github.com/dsoprea/go-iptc/README.md create mode 100644 vendor/github.com/dsoprea/go-iptc/standard.go create mode 100644 vendor/github.com/dsoprea/go-iptc/tag.go create mode 100644 vendor/github.com/dsoprea/go-iptc/testing_common.go create mode 100644 vendor/github.com/dsoprea/go-iptc/utility.go create mode 100644 vendor/github.com/dsoprea/go-logging/.travis.yml create mode 100644 vendor/github.com/dsoprea/go-logging/LICENSE create mode 100644 vendor/github.com/dsoprea/go-logging/README.md create mode 100644 vendor/github.com/dsoprea/go-logging/config.go create mode 100644 vendor/github.com/dsoprea/go-logging/console_adapter.go create mode 100644 vendor/github.com/dsoprea/go-logging/log.go create mode 100644 vendor/github.com/dsoprea/go-photoshop-info-format/.MODULE_ROOT create mode 100644 vendor/github.com/dsoprea/go-photoshop-info-format/LICENSE create mode 100644 vendor/github.com/dsoprea/go-photoshop-info-format/README.md create mode 100644 vendor/github.com/dsoprea/go-photoshop-info-format/info.go create mode 100644 vendor/github.com/dsoprea/go-photoshop-info-format/testing_common.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/LICENSE create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/README.md create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/bounceback.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseekcloser.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseeker.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/calculate_seek.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/common.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/copy_bytes_between_positions.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/does_exist.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/graceful_copy.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/list_files.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/progress_wrapper.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/read_counter.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/readseeker_to_readerat.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/readwriteseekcloser.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/seekable_buffer.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/simplefileinfo.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/utility.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/filesystem/write_counter.go create mode 100644 vendor/github.com/dsoprea/go-utility/v2/image/README.md create mode 100644 vendor/github.com/dsoprea/go-utility/v2/image/media_parser_type.go create mode 100644 vendor/github.com/go-errors/errors/.travis.yml create mode 100644 vendor/github.com/go-errors/errors/LICENSE.MIT create mode 100644 vendor/github.com/go-errors/errors/README.md create mode 100644 vendor/github.com/go-errors/errors/cover.out create mode 100644 vendor/github.com/go-errors/errors/error.go create mode 100644 vendor/github.com/go-errors/errors/error_1_13.go create mode 100644 vendor/github.com/go-errors/errors/error_backward.go create mode 100644 vendor/github.com/go-errors/errors/parse_panic.go create mode 100644 vendor/github.com/go-errors/errors/stackframe.go create mode 100644 vendor/github.com/go-xmlfmt/xmlfmt/LICENSE create mode 100644 vendor/github.com/go-xmlfmt/xmlfmt/README.md create mode 100644 vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go create mode 100644 vendor/github.com/golang/geo/LICENSE create mode 100644 vendor/github.com/golang/geo/r1/doc.go create mode 100644 vendor/github.com/golang/geo/r1/interval.go create mode 100644 vendor/github.com/golang/geo/r2/doc.go create mode 100644 vendor/github.com/golang/geo/r2/rect.go create mode 100644 vendor/github.com/golang/geo/r3/doc.go create mode 100644 vendor/github.com/golang/geo/r3/precisevector.go create mode 100644 vendor/github.com/golang/geo/r3/vector.go create mode 100644 vendor/github.com/golang/geo/s1/angle.go create mode 100644 vendor/github.com/golang/geo/s1/chordangle.go create mode 100644 vendor/github.com/golang/geo/s1/doc.go create mode 100644 vendor/github.com/golang/geo/s1/interval.go create mode 100644 vendor/github.com/golang/geo/s2/bits_go18.go create mode 100644 vendor/github.com/golang/geo/s2/bits_go19.go create mode 100644 vendor/github.com/golang/geo/s2/cap.go create mode 100644 vendor/github.com/golang/geo/s2/cell.go create mode 100644 vendor/github.com/golang/geo/s2/cellid.go create mode 100644 vendor/github.com/golang/geo/s2/cellunion.go create mode 100644 vendor/github.com/golang/geo/s2/centroids.go create mode 100644 vendor/github.com/golang/geo/s2/contains_point_query.go create mode 100644 vendor/github.com/golang/geo/s2/contains_vertex_query.go create mode 100644 vendor/github.com/golang/geo/s2/convex_hull_query.go create mode 100644 vendor/github.com/golang/geo/s2/crossing_edge_query.go create mode 100644 vendor/github.com/golang/geo/s2/distance_target.go create mode 100644 vendor/github.com/golang/geo/s2/doc.go create mode 100644 vendor/github.com/golang/geo/s2/edge_clipping.go create mode 100644 vendor/github.com/golang/geo/s2/edge_crosser.go create mode 100644 vendor/github.com/golang/geo/s2/edge_crossings.go create mode 100644 vendor/github.com/golang/geo/s2/edge_distances.go create mode 100644 vendor/github.com/golang/geo/s2/edge_query.go create mode 100644 vendor/github.com/golang/geo/s2/edge_tessellator.go create mode 100644 vendor/github.com/golang/geo/s2/encode.go create mode 100644 vendor/github.com/golang/geo/s2/interleave.go create mode 100644 vendor/github.com/golang/geo/s2/latlng.go create mode 100644 vendor/github.com/golang/geo/s2/lexicon.go create mode 100644 vendor/github.com/golang/geo/s2/loop.go create mode 100644 vendor/github.com/golang/geo/s2/matrix3x3.go create mode 100644 vendor/github.com/golang/geo/s2/max_distance_targets.go create mode 100644 vendor/github.com/golang/geo/s2/metric.go create mode 100644 vendor/github.com/golang/geo/s2/min_distance_targets.go create mode 100644 vendor/github.com/golang/geo/s2/nthderivative.go create mode 100644 vendor/github.com/golang/geo/s2/paddedcell.go create mode 100644 vendor/github.com/golang/geo/s2/point.go create mode 100644 vendor/github.com/golang/geo/s2/point_measures.go create mode 100644 vendor/github.com/golang/geo/s2/point_vector.go create mode 100644 vendor/github.com/golang/geo/s2/pointcompression.go create mode 100644 vendor/github.com/golang/geo/s2/polygon.go create mode 100644 vendor/github.com/golang/geo/s2/polyline.go create mode 100644 vendor/github.com/golang/geo/s2/polyline_measures.go create mode 100644 vendor/github.com/golang/geo/s2/predicates.go create mode 100644 vendor/github.com/golang/geo/s2/projections.go create mode 100644 vendor/github.com/golang/geo/s2/query_options.go create mode 100644 vendor/github.com/golang/geo/s2/rect.go create mode 100644 vendor/github.com/golang/geo/s2/rect_bounder.go create mode 100644 vendor/github.com/golang/geo/s2/region.go create mode 100644 vendor/github.com/golang/geo/s2/regioncoverer.go create mode 100644 vendor/github.com/golang/geo/s2/shape.go create mode 100644 vendor/github.com/golang/geo/s2/shapeindex.go create mode 100644 vendor/github.com/golang/geo/s2/shapeutil.go create mode 100644 vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go create mode 100644 vendor/github.com/golang/geo/s2/stuv.go create mode 100644 vendor/github.com/golang/geo/s2/util.go create mode 100644 vendor/github.com/golang/geo/s2/wedge_relations.go create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/.MODULE_ROOT create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/LICENSE create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/README.md create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/markers.go create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/media_parser.go create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment.go create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment_list.go create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/splitter.go create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/testing_common.go create mode 100644 vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/utility.go create mode 100644 vendor/github.com/superseriousbusiness/go-png-image-structure/v2/.MODULE_ROOT create mode 100644 vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE create mode 100644 vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go create mode 100644 vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go create mode 100644 vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go create mode 100644 vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go create mode 100644 vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go diff --git a/go.mod b/go.mod index 28a231368..e2da4d8d5 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( codeberg.org/gruf/go-sched v1.2.3 codeberg.org/gruf/go-storage v0.1.2 codeberg.org/gruf/go-structr v0.8.7 + codeberg.org/superseriousbusiness/exif-terminator v0.9.0 github.com/DmitriyVTitov/size v1.5.0 github.com/KimMachineGun/automemlimit v0.6.1 github.com/buckket/go-blurhash v1.1.0 @@ -107,11 +108,17 @@ require ( github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b // indirect + github.com/dsoprea/go-iptc v0.0.0-20200609062250-162ae6b44feb // indirect + github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd // indirect + github.com/dsoprea/go-photoshop-info-format v0.0.0-20200609050348-3db9b63b202c // indirect + github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-errors/errors v1.1.1 // indirect github.com/go-fed/httpsig v1.1.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v4 v4.0.2 // indirect @@ -131,9 +138,11 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.20.0 // indirect + github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang/geo v0.0.0-20200319012246-673a6f80352d // indirect github.com/gorilla/context v1.1.2 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/gorilla/handlers v1.5.2 // indirect @@ -188,6 +197,8 @@ require ( github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe // indirect + github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB // indirect github.com/tdewolff/parse/v2 v2.7.15 // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect diff --git a/go.sum b/go.sum index 09860d9c1..51b086600 100644 --- a/go.sum +++ b/go.sum @@ -82,6 +82,8 @@ codeberg.org/gruf/go-storage v0.1.2 h1:dIOVOKq1CJpRmuhbB8Zok3mmo8V6VV/nX5GLIm6hy codeberg.org/gruf/go-storage v0.1.2/go.mod h1:LRDpFHqRJi0f+35c3ltBH2e/pGfwY5dGlNlgCJ/R1DA= codeberg.org/gruf/go-structr v0.8.7 h1:agYCI6tSXU4JHVYPwZk3Og5rrBePNVv5iPWsDu7ZJIw= codeberg.org/gruf/go-structr v0.8.7/go.mod h1:O0FTNgzUnUKwWey4dEW99QD8rPezKPi5sxCVxYOJ1Fg= +codeberg.org/superseriousbusiness/exif-terminator v0.9.0 h1:/EfyGI6HIrbkhFwgXGSjZ9o1kr/+k8v4mKdfXTH02Go= +codeberg.org/superseriousbusiness/exif-terminator v0.9.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -147,6 +149,22 @@ github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1 github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dsoprea/go-exif/v2 v2.0.0-20200321225314-640175a69fe4/go.mod h1:Lm2lMM2zx8p4a34ZemkaUV95AnMl4ZvLbCUbwOvLC2E= +github.com/dsoprea/go-exif/v3 v3.0.0-20200717053412-08f1b6708903/go.mod h1:0nsO1ce0mh5czxGeLo4+OCZ/C6Eo6ZlMWsz7rH/Gxv8= +github.com/dsoprea/go-exif/v3 v3.0.0-20210428042052-dca55bf8ca15/go.mod h1:cg5SNYKHMmzxsr9X6ZeLh/nfBRHHp5PngtEPcujONtk= +github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b h1:NgNuLvW/gAFKU30ULWW0gtkCt56JfB7FrZ2zyo0wT8I= +github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b/go.mod h1:cg5SNYKHMmzxsr9X6ZeLh/nfBRHHp5PngtEPcujONtk= +github.com/dsoprea/go-iptc v0.0.0-20200609062250-162ae6b44feb h1:gwjJjUr6FY7zAWVEueFPrcRHhd9+IK81TcItbqw2du4= +github.com/dsoprea/go-iptc v0.0.0-20200609062250-162ae6b44feb/go.mod h1:kYIdx9N9NaOyD7U6D+YtExN7QhRm+5kq7//yOsRXQtM= +github.com/dsoprea/go-logging v0.0.0-20190624164917-c4f10aab7696/go.mod h1:Nm/x2ZUNRW6Fe5C3LxdY1PyZY5wmDv/s5dkPJ/VB3iA= +github.com/dsoprea/go-logging v0.0.0-20200517223158-a10564966e9d/go.mod h1:7I+3Pe2o/YSU88W0hWlm9S22W7XI1JFNJ86U0zPKMf8= +github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd h1:l+vLbuxptsC6VQyQsfD7NnEC8BZuFpz45PgY+pH8YTg= +github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd/go.mod h1:7I+3Pe2o/YSU88W0hWlm9S22W7XI1JFNJ86U0zPKMf8= +github.com/dsoprea/go-photoshop-info-format v0.0.0-20200609050348-3db9b63b202c h1:7j5aWACOzROpr+dvMtu8GnI97g9ShLWD72XIELMgn+c= +github.com/dsoprea/go-photoshop-info-format v0.0.0-20200609050348-3db9b63b202c/go.mod h1:pqKB+ijp27cEcrHxhXVgUUMlSDRuGJJp1E+20Lj5H0E= +github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf/go.mod h1:95+K3z2L0mqsVYd6yveIv1lmtT3tcQQ3dVakPySffW8= +github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e h1:IxIbA7VbCNrwumIYjDoMOdf4KOSkMC6NJE4s8oRbE7E= +github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e/go.mod h1:uAzdkPTub5Y9yQwXe8W4m2XuP0tK4a9Q/dantD0+uaU= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -180,6 +198,10 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.0.2/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs= +github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg= +github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs= github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -232,6 +254,8 @@ github.com/go-swagger/go-swagger v0.31.0 h1:H8eOYQnY2u7vNKWDNykv2xJP3pBhRG/R+SOC github.com/go-swagger/go-swagger v0.31.0/go.mod h1:WSigRRWEig8zV6t6Sm8Y+EmUjlzA/HoaZJ5edupq7po= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= @@ -239,6 +263,9 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/geo v0.0.0-20200319012246-673a6f80352d h1:C/hKUcHT483btRbeGkrRjJz+Zbcj8audldIi9tRJDCc= +github.com/golang/geo v0.0.0-20200319012246-673a6f80352d/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -345,6 +372,7 @@ github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -507,6 +535,10 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/superseriousbusiness/activity v1.8.0-gts h1:CMSN1eZUwNfIX1DFo4YxRCzSeT4jmGoIdakt/ZuDkQM= github.com/superseriousbusiness/activity v1.8.0-gts/go.mod h1:AZw0Xb4Oju8rmaJCZ21gc5CPg47MmNgyac+Hx5jo8VM= +github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe h1:ksl2oCx/Qo8sNDc3Grb8WGKBM9nkvhCm25uvlT86azE= +github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe/go.mod h1:gH4P6gN1V+wmIw5o97KGaa1RgXB/tVpC2UNzijhg3E4= +github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB h1:8psprYSK1KdOSH7yQ4PbJq0YYaGQY+gzdW/B0ExDb/8= +github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB/go.mod h1:ymKGfy9kg4dIdraeZRAdobMS/flzLk3VcRPLpEWOAXg= github.com/superseriousbusiness/httpsig v1.2.0-SSB h1:BinBGKbf2LSuVT5+MuH0XynHN9f0XVshx2CTDtkaWj0= github.com/superseriousbusiness/httpsig v1.2.0-SSB/go.mod h1:+rxfATjFaDoDIVaJOTSP0gj6UrbicaYPEptvCLC9F28= github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8 h1:nTIhuP157oOFcscuoK1kCme1xTeGIzztSw70lX9NrDQ= @@ -699,6 +731,7 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -934,6 +967,7 @@ gopkg.in/mcuadros/go-syslog.v2 v2.3.0/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/internal/media/ffmpeg.go b/internal/media/ffmpeg.go index e26ec78d7..72ee1bc33 100644 --- a/internal/media/ffmpeg.go +++ b/internal/media/ffmpeg.go @@ -21,7 +21,6 @@ import ( "context" "encoding/json" "errors" - "os" "path" "strconv" "strings" @@ -36,28 +35,21 @@ import ( "github.com/tetratelabs/wazero" ) -// ffmpegClearMetadata generates a copy (in-place) of input media with all metadata cleared. -func ffmpegClearMetadata(ctx context.Context, filepath string) error { - var outpath string - +// ffmpegClearMetadata generates a copy of input media with all metadata cleared. +// NOTE: given that we are not performing an encode, this only clears global level metadata, +// any metadata encoded into the media stream itself will not be cleared. This is the best we +// can do without absolutely tanking performance by requiring transcodes :( +func ffmpegClearMetadata(ctx context.Context, outpath, inpath string) error { // Get directory from filepath. - dirpath := path.Dir(filepath) + dirpath := path.Dir(inpath) - // Generate cleaned output path MAINTAINING extension. - if i := strings.IndexByte(filepath, '.'); i != -1 { - outpath = filepath[:i] + "_cleaned" + filepath[i:] - } else { - return gtserror.New("input file missing extension") - } - - // Clear metadata with ffmpeg. - if err := ffmpeg(ctx, dirpath, + return ffmpeg(ctx, dirpath, // Only log errors. "-loglevel", "error", // Input file path. - "-i", filepath, + "-i", inpath, // Drop all metadata. "-map_metadata", "-1", @@ -71,16 +63,7 @@ func ffmpegClearMetadata(ctx context.Context, filepath string) error { // Output. outpath, - ); err != nil { - return err - } - - // Move the new output file path to original location. - if err := os.Rename(outpath, filepath); err != nil { - return gtserror.Newf("error renaming %s -> %s: %w", outpath, filepath, err) - } - - return nil + ) } // ffmpegGenerateThumb generates a thumbnail webp from input media of any type, useful for any media. @@ -390,18 +373,33 @@ func (res *result) GetFileType() (gtsmodel.FileType, string) { // ImageMeta extracts image metadata contained within ffprobe'd media result streams. func (res *result) ImageMeta() (width int, height int, framerate float32) { for _, stream := range res.video { + // Use widest found width. if stream.width > width { width = stream.width } + + // Use tallest found height. if stream.height > height { height = stream.height } + + // Use lowest non-zero (valid) framerate. if fr := float32(stream.framerate); fr > 0 { if framerate == 0 || fr < framerate { framerate = fr } } } + + // If image is rotated by + // any odd multiples of 90, + // flip width / height to + // get the correct scale. + switch res.rotation { + case -90, 90, -270, 270: + width, height = height, width + } + return } @@ -486,14 +484,6 @@ func (res *ffprobeResult) Process() (*result, error) { stream: stream{codec: s.CodecName}, }) case "video": - // Determine proper display dimensions, - // taking account of rotation data. - width, height := displayDimensions( - s.Width, - s.Height, - r.rotation, - ) - // Parse stream framerate, bearing in // mind that some static container formats // (e.g. jpeg) still return a framerate, so @@ -521,8 +511,8 @@ func (res *ffprobeResult) Process() (*result, error) { // Append video stream data to result. r.video = append(r.video, videoStream{ stream: stream{codec: s.CodecName}, - width: width, - height: height, + width: s.Width, + height: s.Height, framerate: framerate, }) } diff --git a/internal/media/manager_test.go b/internal/media/manager_test.go index 68de74dd6..7a4c865f4 100644 --- a/internal/media/manager_test.go +++ b/internal/media/manager_test.go @@ -711,7 +711,7 @@ func (suite *ManagerTestSuite) TestPngAlphaChannelProcess() { }, attachment.FileMeta.Small) suite.Equal("image/png", attachment.File.ContentType) suite.Equal("image/webp", attachment.Thumbnail.ContentType) - suite.Equal(18904, attachment.File.FileSize) + suite.Equal(18832, attachment.File.FileSize) suite.Equal(2630, attachment.Thumbnail.FileSize) suite.Equal("LBOW$@%i-=aj%go#RSRP_1av~Tt2", attachment.Blurhash) diff --git a/internal/media/metadata.go b/internal/media/metadata.go new file mode 100644 index 000000000..3816b2826 --- /dev/null +++ b/internal/media/metadata.go @@ -0,0 +1,96 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package media + +import ( + "context" + "os" + "strings" + + terminator "codeberg.org/superseriousbusiness/exif-terminator" + "github.com/superseriousbusiness/gotosocial/internal/gtserror" + "github.com/superseriousbusiness/gotosocial/internal/log" +) + +// clearMetadata performs our best-attempt at cleaning metadata from +// input file. Depending on file type this may perform a full EXIF clean, +// or just a clean of global container level metadata records. +func clearMetadata(ctx context.Context, filepath string) error { + var ext, outpath string + + // Generate cleaned output path MAINTAINING extension. + if i := strings.IndexByte(filepath, '.'); i != -1 { + outpath = filepath[:i] + "_cleaned" + filepath[i:] + ext = filepath[i+1:] + } else { + return gtserror.New("input file missing extension") + } + + switch ext { + case "jpeg", "png", "webp": + // For these few file types, we actually support + // cleaning exif data using a native Go library. + log.Debug(ctx, "cleaning with exif-terminator") + err := terminateExif(outpath, filepath, ext) + if err != nil { + return err + } + default: + // For all other types, best-effort clean with ffmpeg. + log.Debug(ctx, "cleaning with ffmpeg -map_metadata -1") + err := ffmpegClearMetadata(ctx, outpath, filepath) + if err != nil { + return err + } + } + + // Move the new output file path to original location. + if err := os.Rename(outpath, filepath); err != nil { + return gtserror.Newf("error renaming %s -> %s: %w", outpath, filepath, err) + } + + return nil +} + +// terminateExif cleans exif data from file at input path, into file +// at output path, exusing given file extension to determine cleaning. +func terminateExif(outpath, inpath string, ext string) error { + // Open input file at given path. + inFile, err := os.Open(inpath) + if err != nil { + return gtserror.Newf("error opening input file %s: %w", inpath, err) + } + + // Open output file at given path. + outFile, err := os.Create(outpath) + if err != nil { + return gtserror.Newf("error opening output file %s: %w", outpath, err) + } + + // Terminate EXIF data from 'inFile' -> 'outFile'. + err = terminator.TerminateInto(outFile, inFile, ext) + if err != nil { + return gtserror.Newf("error terminating exif data: %w", err) + } + + // Done with files. + _ = inFile.Close() + _ = outFile.Close() + + return nil +} diff --git a/internal/media/processingmedia.go b/internal/media/processingmedia.go index 5d1d47b97..504cda11e 100644 --- a/internal/media/processingmedia.go +++ b/internal/media/processingmedia.go @@ -203,8 +203,8 @@ func (p *ProcessingMedia) store(ctx context.Context) error { switch p.media.Type { case gtsmodel.FileTypeImage, gtsmodel.FileTypeVideo: - // Pass file through ffmpeg clearing metadata (e.g. EXIF). - if err := ffmpegClearMetadata(ctx, temppath); err != nil { + // Attempt to clean as metadata from file as possible. + if err := clearMetadata(ctx, temppath); err != nil { return gtserror.Newf("error cleaning metadata: %w", err) } diff --git a/internal/media/test/test-png-alphachannel-processed.png b/internal/media/test/test-png-alphachannel-processed.png index cb3857e9c9b7d79ff6500f8facd0c35b9c411858..164507cedd6a2e9774fc5e9ffe71041075700e22 100644 GIT binary patch delta 38 qcmcaHnQ_8o#tp38;(VzQo@u_m3|b5f3>-iV0+)g(Z%*MZu>%0XE(tmS delta 110 zcmbO*neoPC#tp38@nNYEo@u_m3|b5f3>*xM?U@WLKo%nqO93$h1M{Rd26iAV1jH^2 pnBcOz7BIuvyFm&agPlD+fpl;NBT6qo=2 diff --git a/internal/media/util.go b/internal/media/util.go index 7e84b4cdc..fa5c2bfd6 100644 --- a/internal/media/util.go +++ b/internal/media/util.go @@ -35,25 +35,6 @@ import ( "github.com/disintegration/imaging" ) -// displayDimensions takes account of the -// given rotation data to return width and -// height values as the image will be displayed. -func displayDimensions( - width, height int, - rotation int, -) (int, int) { - // If image is rotated by - // any odd multiples of 90, - // flip width / height to - // get the correct scale. - switch rotation { - case -90, 90, -270, 270: - width, height = height, width - } - - return width, height -} - // thumbSize returns the dimensions to use for an input // image of given width / height, for its outgoing thumbnail. // This attempts to maintains the original image aspect ratio. diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/LICENSE b/vendor/codeberg.org/superseriousbusiness/exif-terminator/LICENSE new file mode 100644 index 000000000..dba13ed2d --- /dev/null +++ b/vendor/codeberg.org/superseriousbusiness/exif-terminator/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/README.md b/vendor/codeberg.org/superseriousbusiness/exif-terminator/README.md new file mode 100644 index 000000000..ddb97fbb6 --- /dev/null +++ b/vendor/codeberg.org/superseriousbusiness/exif-terminator/README.md @@ -0,0 +1,122 @@ +# exif-terminator + +`exif-terminator` removes exif data from images (jpeg and png currently supported) in a streaming manner. All you need to do is provide a reader of the image in, and exif-terminator will provide a reader of the image out. + +Hasta la vista, baby! + +```text + .,lddxococ. + ..',lxO0Oo;'. + . .. .,coodO0klc:. + .,. ..','. .. .,..'. .':llxKXk' + .;c:cc;;,... .''.,l:cc. .....:l:,,:oo:.. + .,:ll'. .,;cox0OxOKKXX0kOOxlcld0X0d;,,,'. + .:xkl. .':cdKNWWWWMMMMMMMMMMWWNXK0KWNd. + .coxo,..:ollk0KKXNWMMMMMMMMMMWWXXXOoOM0; + ,oc,. .;cloxOKXXWWMMMMMMMMMMMWNXk;;OWO' + . ..;cdOKXNNWWMMMMMMMMMMMMWO,,ONO' + ...... ....;okOO000XWWMMMMMMMMMWXx;,ONNx. +.;c;. .:l'ckl. ..';looooolldolloooodolcc:;'.;oo:. +.oxl. ;:..OO. .. .. .,' .;. +.oko. .cc.'Ok. .:; .:,..';. +.cdc. .;;lc.,Ox. . .',,'..','. .dN0; .. .c:,,':. +.:oc. ,dxkl.,0x. . .. . .oNMMKc.. ...:l. +.:o:. cKXKl.,Ox. .. .lKWMMMXo,. ...''. +.:l; c0KKo.,0x. ...........';:lk0OKNNXKkl,..,;cxd' +.::' ;k00l.;0d. .. .,cloooddddxxddol;:ddloxdc,:odOWNc +.;,. ,ONKc.;0d. 'l,.. .:clllllllokKOl::cllclkKx'.lolxx' +.,. '0W0:.;0d. .:l,. .,:ccc:::oOXNXOkxdook0NWNx,,;c;. +... .kX0c.;0d. .loc' .,::;;;;lk0kddoooooddooO0o',ld; +.. .oOkk:cKd. .... .;:,',;cxK0o::ldkOkkOkxod:';oKx. +.. :dlOolKO, '::'.';:oOK0xdddoollooxOx::ccOx. +.. ';:o,.xKo. .,;'...';lddolooodkkkdol:,::lc. +.. ...:..oOl. ........';:codxxOXKKKk;':;:kl +.. .,..lOc. .. ....,codxkxxxxxo:,,;lKO. .,;'.. +... .. ck: ';,'. .;:cllloc,;;;colOK; .;odxxoc;. +...,.... . :x; .;:cc;'. .,;::c:'..,kXk:xNc .':oook00x:. + . cKx. .'.. ':clllc,...'';:::cc:;.,kOo:xNx. .'codddoox + .. ,xxl;',col:;. .:cccccc;;;:lxkkOOkdc,,lolcxWO' ;kNKc.' + .,. .c' ':dkO0O; .. .;ccccccc:::cldxkxoll:;oolcdN0:.. .xWNk; + .:' .c',xXNKkOXo .,. .,:cccccllc::lloooolc:;lo:;oXKc,::. .kWWX + ,' .cONMWMWkco, ', .';::ccclolc:llolollcccodo;:KXl..cl,. ;KWN + '. .xWWWWMKc;; ....;' ',;::::coolclloooollc:,:o;;0Xx, .,:;... ,0Ko + . ,kKNWWXd,cdd0NXKk:,;;;'';::::coollllllllllc;;ccl0Nkc. ..';loOx' + 'lxXWMXOOXNMMMMWWNNNWXkc;;;;;:cllccccccccc::lllkNWXd,. .cxO0Ol' + ,xKNWWXkkXWM0dxKNWWWMWNX0OOkl;;:c::cccc:,...:oONMMXOo;. :kOkOkl; + .;,;:;...,::. .;lokXKKNMMMWNOc,;;;,::;'...lOKNWNKkol:,..cKdcO0do + .:;... .. .,:okO0KNN0:.',,''''. ':xNMWKkxxOKXd,.cNk,:l:o +``` + +## Why? + +Exif removal is a pain in the arse. Most other libraries seem to parse the whole image into memory, then remove the exif data, then encode the image again. + +`exif-terminator` differs in that it removes exif data *while scanning through the image bytes*, and it doesn't do any reencoding of the image. Bytes of exif data are simply all set to 0, and the image data is piped back out again into the returned reader. + +The only exception is orientation data: if an image contains orientation data, this and only this data will be preserved since it's *actually useful*. + +## Example + +You can run the following example with `go run ./example/main.go`: + +```go +package main + +import ( + "io" + "os" + + terminator "codeberg.org/superseriousbusiness/exif-terminator" +) + +func main() { + // open a file + sloth, err := os.Open("./images/sloth.jpg") + if err != nil { + panic(err) + } + defer sloth.Close() + + // get the length of the file + stat, err := sloth.Stat() + if err != nil { + panic(err) + } + + // terminate! + out, err := terminator.Terminate(sloth, int(stat.Size()), "jpeg") + if err != nil { + panic(err) + } + + // read the bytes from the reader + b, err := io.ReadAll(out) + if err != nil { + panic(err) + } + + // save the file somewhere + if err := os.WriteFile("./images/sloth-clean.jpg", b, 0666); err != nil { + panic(err) + } +} +``` + +## Credits + +### Libraries + +`exif-terminator` borrows heavily from the two [`dsoprea`](https://github.com/dsoprea) libraries credited below. In fact, it's basically a hack on top of those libraries. Thanks `dsoprea`! + +- [dsoprea/go-exif](https://github.com/dsoprea/go-exif): exif header reconstruction. [MIT License](https://spdx.org/licenses/MIT.html). +- [dsoprea/go-jpeg-image-structure](https://github.com/dsoprea/go-jpeg-image-structure): jpeg structure parsing. [MIT License](https://spdx.org/licenses/MIT.html). +- [dsoprea/go-png-image-structure](https://github.com/dsoprea/go-png-image-structure): png structure parsing. [MIT License](https://spdx.org/licenses/MIT.html). +- [stretchr/testify](https://github.com/stretchr/testify); test framework. [MIT License](https://spdx.org/licenses/MIT.html). + +## License + +![the gnu AGPL logo](https://www.gnu.org/graphics/agplv3-155x51.png) + +`exif-terminator` is free software, licensed under the [GNU AGPL v3 LICENSE](LICENSE). + +Copyright (C) 2022-2024 SuperSeriousBusiness. diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/exif.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/exif.go new file mode 100644 index 000000000..02dacc654 --- /dev/null +++ b/vendor/codeberg.org/superseriousbusiness/exif-terminator/exif.go @@ -0,0 +1,53 @@ +/* + exif-terminator + Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . +*/ + +package terminator + +import ( + "strings" + + exif "github.com/dsoprea/go-exif/v3" + exifcommon "github.com/dsoprea/go-exif/v3/common" +) + +type withEXIF interface { + Exif() (rootIfd *exif.Ifd, data []byte, err error) + SetExif(ib *exif.IfdBuilder) (err error) +} + +func terminateEXIF(data withEXIF) error { + ifd, _, err := data.Exif() + if err != nil { + if strings.Contains(err.Error(), "no exif data") { + err = nil + } + return err + } + + ifdb := exif.NewIfdBuilderFromExistingChain(ifd) + orientation, _ := ifdb.FindTagWithName("Orientation") + + im, ti := exifcommon.NewIfdMapping(), exif.NewTagIndex() + ifdb = exif.NewIfdBuilder(im, ti, ifd.IfdIdentity(), ifd.ByteOrder()) + + if orientation != nil { + ifdb.Add(orientation) + } + + return data.SetExif(ifdb) +} diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/jpeg.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/jpeg.go new file mode 100644 index 000000000..3c8b7035f --- /dev/null +++ b/vendor/codeberg.org/superseriousbusiness/exif-terminator/jpeg.go @@ -0,0 +1,132 @@ +/* + exif-terminator + Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . +*/ + +package terminator + +import ( + "encoding/binary" + "fmt" + "io" + + jpegstructure "github.com/superseriousbusiness/go-jpeg-image-structure/v2" +) + +var markerLen = map[byte]int{ + 0x00: 0, + 0x01: 0, + 0xd0: 0, + 0xd1: 0, + 0xd2: 0, + 0xd3: 0, + 0xd4: 0, + 0xd5: 0, + 0xd6: 0, + 0xd7: 0, + 0xd8: 0, + 0xd9: 0, + 0xda: 0, + + // J2C + 0x30: 0, + 0x31: 0, + 0x32: 0, + 0x33: 0, + 0x34: 0, + 0x35: 0, + 0x36: 0, + 0x37: 0, + 0x38: 0, + 0x39: 0, + 0x3a: 0, + 0x3b: 0, + 0x3c: 0, + 0x3d: 0, + 0x3e: 0, + 0x3f: 0, + 0x4f: 0, + 0x92: 0, + 0x93: 0, + + // J2C extensions + 0x74: 4, + 0x75: 4, + 0x77: 4, +} + +type jpegVisitor struct { + js *jpegstructure.JpegSplitter + writer io.Writer +} + +// HandleSegment satisfies the visitor interface{} of the jpegstructure library. +// +// We don't really care about many of the parameters, since all we're interested +// in here is the very last segment that was scanned. +func (v *jpegVisitor) HandleSegment(segmentMarker byte, _ string, _ int, _ bool) error { + segmentList := v.js.Segments() + segments := segmentList.Segments() + mostRecentSegment := segments[len(segments)-1] + return v.writeSegment(mostRecentSegment) +} + +func (v *jpegVisitor) writeSegment(s *jpegstructure.Segment) error { + w := v.writer + + defer func() { + // whatever happens, when we finished then evict data from the segment; + // once we've written it we don't want it in memory anymore + s.Data = nil + }() + + if s.IsExif() { + // Segment contains exif data, terminate! + if err := terminateEXIF(s); err != nil { + return err + } + } + + // The scan-data will have a marker-ID of (0) because it doesn't have a marker-ID or length. + if s.MarkerId != 0 { + _, err := w.Write([]byte{0xff, s.MarkerId}) + if err != nil { + return err + } + + sizeLen, found := markerLen[s.MarkerId] + if !found || sizeLen == 2 { + sizeLen = 2 + l := uint16(len(s.Data) + sizeLen) + + if err := binary.Write(w, binary.BigEndian, &l); err != nil { + return err + } + + } else if sizeLen == 4 { + l := uint32(len(s.Data) + sizeLen) + + if err := binary.Write(w, binary.BigEndian, &l); err != nil { + return err + } + } else if sizeLen != 0 { + return fmt.Errorf("not a supported marker-size: MARKER-ID=(0x%02x) MARKER-SIZE-LEN=(%d)", s.MarkerId, sizeLen) + } + } + + _, err := w.Write(s.Data) + return err +} diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/png.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/png.go new file mode 100644 index 000000000..b8b94efb7 --- /dev/null +++ b/vendor/codeberg.org/superseriousbusiness/exif-terminator/png.go @@ -0,0 +1,85 @@ +/* + exif-terminator + Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . +*/ + +package terminator + +import ( + "io" + + pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2" +) + +type pngVisitor struct { + ps *pngstructure.PngSplitter + writer io.Writer + lastWrittenChunk int +} + +func (v *pngVisitor) split(data []byte, atEOF bool) (int, []byte, error) { + // execute the ps split function to read in data + advance, token, err := v.ps.Split(data, atEOF) + if err != nil { + return advance, token, err + } + + // if we haven't written anything at all yet, then + // write the png header back into the writer first + if v.lastWrittenChunk == -1 { + if _, err := v.writer.Write(pngstructure.PngSignature[:]); err != nil { + return advance, token, err + } + } + + // Check if the splitter now has + // any new chunks in it for us. + chunkSlice, err := v.ps.Chunks() + if err != nil { + return advance, token, err + } + + // Write each chunk by passing it + // through our custom write func, + // which strips out exif and fixes + // the CRC of each chunk. + chunks := chunkSlice.Chunks() + for i := v.lastWrittenChunk + 1; i < len(chunks); i++ { + chunk := chunks[i] + + if chunk.Type == pngstructure.EXifChunkType { + // Finally, some exif data! Terminate it!! + if err := terminateEXIF(chunkSlice); err != nil { + return advance, token, err + } + + // Update chunk crc. + chunk.UpdateCrc32() + } + + // Write this new chunk. + if _, err := chunk.WriteTo(v.writer); err != nil { + return advance, token, err + } + v.lastWrittenChunk = i + + // Zero data; here you + // go garbage collector. + chunk.Data = nil + } + + return advance, token, err +} diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/terminator.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/terminator.go new file mode 100644 index 000000000..5793b8599 --- /dev/null +++ b/vendor/codeberg.org/superseriousbusiness/exif-terminator/terminator.go @@ -0,0 +1,146 @@ +/* + exif-terminator + Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . +*/ + +package terminator + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + + jpegstructure "github.com/superseriousbusiness/go-jpeg-image-structure/v2" + pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2" +) + +func Terminate(in io.Reader, mediaType string) (io.Reader, error) { + // To avoid keeping too much stuff + // in memory we want to pipe data + // directly to the reader. + pr, pw := io.Pipe() + + // Setup scanner to terminate exif into pipe writer. + scanner, err := terminatingScanner(pw, in, mediaType) + if err != nil { + _ = pw.Close() + return nil, err + } + + go func() { + var err error + + defer func() { + // Always close writer, using returned + // scanner error (if any). If err is nil + // then the standard io.EOF will be used. + // (this will not overwrite existing). + pw.CloseWithError(err) + }() + + // Scan through input. + for scanner.Scan() { + } + + // Set error on return. + err = scanner.Err() + }() + + return pr, nil +} + +func TerminateInto(out io.Writer, in io.Reader, mediaType string) error { + // Setup scanner to terminate exif from 'in' to 'out'. + scanner, err := terminatingScanner(out, in, mediaType) + if err != nil { + return err + } + + // Scan through input. + for scanner.Scan() { + } + + // Return scan errors. + return scanner.Err() +} + +func terminatingScanner(out io.Writer, in io.Reader, mediaType string) (*bufio.Scanner, error) { + scanner := bufio.NewScanner(in) + + // 40mb buffer size should be enough + // to scan through most file chunks + // without running into issues, they're + // usually chunked smaller than this... + scanner.Buffer(nil, 40*1024*1024) + + switch mediaType { + case "image/jpeg", "jpeg", "jpg": + v := &jpegVisitor{ + writer: out, + } + + // Provide the visitor to the splitter so + // that it triggers on every section scan. + js := jpegstructure.NewJpegSplitter(v) + + // The visitor also needs to read back the + // list of segments: for this it needs to + // know what jpeg splitter it's attached to, + // so give it a pointer to the splitter. + v.js = js + + // Jpeg visitor's 'split' function + // satisfies bufio.SplitFunc{}. + scanner.Split(js.Split) + + case "image/webp", "webp": + // Webp visitor's 'split' function + // satisfies bufio.SplitFunc{}. + scanner.Split((&webpVisitor{ + writer: out, + }).split) + + case "image/png", "png": + // For pngs we need to skip the header bytes, so read + // them in and check we're really dealing with a png. + header := make([]byte, len(pngstructure.PngSignature)) + if _, headerError := in.Read(header); headerError != nil { + return nil, headerError + } else if !bytes.Equal(header, pngstructure.PngSignature[:]) { + return nil, errors.New("could not decode png: invalid header") + } + + // Don't bother checking CRC; + // we're overwriting it anyway. + ps := pngstructure.NewPngSplitter() + ps.DoCheckCrc(false) + + // Png visitor's 'split' function + // satisfies bufio.SplitFunc{}. + scanner.Split((&pngVisitor{ + ps: ps, + writer: out, + lastWrittenChunk: -1, + }).split) + + default: + return nil, fmt.Errorf("mediaType %s cannot be processed", mediaType) + } + + return scanner, nil +} diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/webp.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/webp.go new file mode 100644 index 000000000..392c4871d --- /dev/null +++ b/vendor/codeberg.org/superseriousbusiness/exif-terminator/webp.go @@ -0,0 +1,101 @@ +/* + exif-terminator + Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . +*/ + +package terminator + +import ( + "encoding/binary" + "errors" + "io" +) + +const ( + riffHeaderSize = 4 * 3 +) + +var ( + riffHeader = [4]byte{'R', 'I', 'F', 'F'} + webpHeader = [4]byte{'W', 'E', 'B', 'P'} + exifFourcc = [4]byte{'E', 'X', 'I', 'F'} + xmpFourcc = [4]byte{'X', 'M', 'P', ' '} + + errNoRiffHeader = errors.New("no RIFF header") + errNoWebpHeader = errors.New("not a WEBP file") +) + +type webpVisitor struct { + writer io.Writer + doneHeader bool +} + +func fourCC(b []byte) [4]byte { + return [4]byte{b[0], b[1], b[2], b[3]} +} + +func (v *webpVisitor) split(data []byte, atEOF bool) (advance int, token []byte, err error) { + // parse/write the header first + if !v.doneHeader { + if len(data) < riffHeaderSize { + // need the full header + return + } + if fourCC(data) != riffHeader { + err = errNoRiffHeader + return + } + if fourCC(data[8:]) != webpHeader { + err = errNoWebpHeader + return + } + if _, err = v.writer.Write(data[:riffHeaderSize]); err != nil { + return + } + advance += riffHeaderSize + data = data[riffHeaderSize:] + v.doneHeader = true + } + + // need enough for fourcc and size + if len(data) < 8 { + return + } + size := int64(binary.LittleEndian.Uint32(data[4:])) + if (size & 1) != 0 { + // odd chunk size - extra padding byte + size++ + } + // wait until there is enough + if int64(len(data)-8) < size { + return + } + + fourcc := fourCC(data) + rawChunkData := data[8 : 8+size] + if fourcc == exifFourcc || fourcc == xmpFourcc { + // replace exif/xmp with blank + rawChunkData = make([]byte, size) + } + + if _, err = v.writer.Write(data[:8]); err == nil { + if _, err = v.writer.Write(rawChunkData); err == nil { + advance += 8 + int(size) + } + } + + return +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/.MODULE_ROOT b/vendor/github.com/dsoprea/go-exif/v3/.MODULE_ROOT new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/dsoprea/go-exif/v3/LICENSE b/vendor/github.com/dsoprea/go-exif/v3/LICENSE new file mode 100644 index 000000000..0b9358a3a --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/LICENSE @@ -0,0 +1,9 @@ +MIT LICENSE + +Copyright 2019 Dustin Oprea + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go b/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go new file mode 100644 index 000000000..01886e966 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go @@ -0,0 +1,651 @@ +package exifcommon + +import ( + "errors" + "fmt" + "strings" + + "github.com/dsoprea/go-logging" +) + +var ( + ifdLogger = log.NewLogger("exifcommon.ifd") +) + +var ( + ErrChildIfdNotMapped = errors.New("no child-IFD for that tag-ID under parent") +) + +// MappedIfd is one node in the IFD-mapping. +type MappedIfd struct { + ParentTagId uint16 + Placement []uint16 + Path []string + + Name string + TagId uint16 + Children map[uint16]*MappedIfd +} + +// String returns a descriptive string. +func (mi *MappedIfd) String() string { + pathPhrase := mi.PathPhrase() + return fmt.Sprintf("MappedIfd<(0x%04X) [%s] PATH=[%s]>", mi.TagId, mi.Name, pathPhrase) +} + +// PathPhrase returns a non-fully-qualified IFD path. +func (mi *MappedIfd) PathPhrase() string { + return strings.Join(mi.Path, "/") +} + +// TODO(dustin): Refactor this to use IfdIdentity structs. + +// IfdMapping describes all of the IFDs that we currently recognize. +type IfdMapping struct { + rootNode *MappedIfd +} + +// NewIfdMapping returns a new IfdMapping struct. +func NewIfdMapping() (ifdMapping *IfdMapping) { + rootNode := &MappedIfd{ + Path: make([]string, 0), + Children: make(map[uint16]*MappedIfd), + } + + return &IfdMapping{ + rootNode: rootNode, + } +} + +// NewIfdMappingWithStandard retruns a new IfdMapping struct preloaded with the +// standard IFDs. +func NewIfdMappingWithStandard() (ifdMapping *IfdMapping, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + im := NewIfdMapping() + + err = LoadStandardIfds(im) + log.PanicIf(err) + + return im, nil +} + +// Get returns the node given the path slice. +func (im *IfdMapping) Get(parentPlacement []uint16) (childIfd *MappedIfd, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ptr := im.rootNode + for _, tagId := range parentPlacement { + if descendantPtr, found := ptr.Children[tagId]; found == false { + log.Panicf("ifd child with tag-ID (%04x) not registered: [%s]", tagId, ptr.PathPhrase()) + } else { + ptr = descendantPtr + } + } + + return ptr, nil +} + +// GetWithPath returns the node given the path string. +func (im *IfdMapping) GetWithPath(pathPhrase string) (mi *MappedIfd, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if pathPhrase == "" { + log.Panicf("path-phrase is empty") + } + + path := strings.Split(pathPhrase, "/") + ptr := im.rootNode + + for _, name := range path { + var hit *MappedIfd + for _, mi := range ptr.Children { + if mi.Name == name { + hit = mi + break + } + } + + if hit == nil { + log.Panicf("ifd child with name [%s] not registered: [%s]", name, ptr.PathPhrase()) + } + + ptr = hit + } + + return ptr, nil +} + +// GetChild is a convenience function to get the child path for a given parent +// placement and child tag-ID. +func (im *IfdMapping) GetChild(parentPathPhrase string, tagId uint16) (mi *MappedIfd, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + mi, err = im.GetWithPath(parentPathPhrase) + log.PanicIf(err) + + for _, childMi := range mi.Children { + if childMi.TagId == tagId { + return childMi, nil + } + } + + // Whether or not an IFD is defined in data, such an IFD is not registered + // and would be unknown. + log.Panic(ErrChildIfdNotMapped) + return nil, nil +} + +// IfdTagIdAndIndex represents a specific part of the IFD path. +// +// This is a legacy type. +type IfdTagIdAndIndex struct { + Name string + TagId uint16 + Index int +} + +// String returns a descriptive string. +func (itii IfdTagIdAndIndex) String() string { + return fmt.Sprintf("IfdTagIdAndIndex", itii.Name, itii.TagId, itii.Index) +} + +// ResolvePath takes a list of names, which can also be suffixed with indices +// (to identify the second, third, etc.. sibling IFD) and returns a list of +// tag-IDs and those indices. +// +// Example: +// +// - IFD/Exif/Iop +// - IFD0/Exif/Iop +// +// This is the only call that supports adding the numeric indices. +func (im *IfdMapping) ResolvePath(pathPhrase string) (lineage []IfdTagIdAndIndex, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + pathPhrase = strings.TrimSpace(pathPhrase) + + if pathPhrase == "" { + log.Panicf("can not resolve empty path-phrase") + } + + path := strings.Split(pathPhrase, "/") + lineage = make([]IfdTagIdAndIndex, len(path)) + + ptr := im.rootNode + empty := IfdTagIdAndIndex{} + for i, name := range path { + indexByte := name[len(name)-1] + index := 0 + if indexByte >= '0' && indexByte <= '9' { + index = int(indexByte - '0') + name = name[:len(name)-1] + } + + itii := IfdTagIdAndIndex{} + for _, mi := range ptr.Children { + if mi.Name != name { + continue + } + + itii.Name = name + itii.TagId = mi.TagId + itii.Index = index + + ptr = mi + + break + } + + if itii == empty { + log.Panicf("ifd child with name [%s] not registered: [%s]", name, pathPhrase) + } + + lineage[i] = itii + } + + return lineage, nil +} + +// FqPathPhraseFromLineage returns the fully-qualified IFD path from the slice. +func (im *IfdMapping) FqPathPhraseFromLineage(lineage []IfdTagIdAndIndex) (fqPathPhrase string) { + fqPathParts := make([]string, len(lineage)) + for i, itii := range lineage { + if itii.Index > 0 { + fqPathParts[i] = fmt.Sprintf("%s%d", itii.Name, itii.Index) + } else { + fqPathParts[i] = itii.Name + } + } + + return strings.Join(fqPathParts, "/") +} + +// PathPhraseFromLineage returns the non-fully-qualified IFD path from the +// slice. +func (im *IfdMapping) PathPhraseFromLineage(lineage []IfdTagIdAndIndex) (pathPhrase string) { + pathParts := make([]string, len(lineage)) + for i, itii := range lineage { + pathParts[i] = itii.Name + } + + return strings.Join(pathParts, "/") +} + +// StripPathPhraseIndices returns a non-fully-qualified path-phrase (no +// indices). +func (im *IfdMapping) StripPathPhraseIndices(pathPhrase string) (strippedPathPhrase string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + lineage, err := im.ResolvePath(pathPhrase) + log.PanicIf(err) + + strippedPathPhrase = im.PathPhraseFromLineage(lineage) + return strippedPathPhrase, nil +} + +// Add puts the given IFD at the given position of the tree. The position of the +// tree is referred to as the placement and is represented by a set of tag-IDs, +// where the leftmost is the root tag and the tags going to the right are +// progressive descendants. +func (im *IfdMapping) Add(parentPlacement []uint16, tagId uint16, name string) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): !! It would be nicer to provide a list of names in the placement rather than tag-IDs. + + ptr, err := im.Get(parentPlacement) + log.PanicIf(err) + + path := make([]string, len(parentPlacement)+1) + if len(parentPlacement) > 0 { + copy(path, ptr.Path) + } + + path[len(path)-1] = name + + placement := make([]uint16, len(parentPlacement)+1) + if len(placement) > 0 { + copy(placement, ptr.Placement) + } + + placement[len(placement)-1] = tagId + + childIfd := &MappedIfd{ + ParentTagId: ptr.TagId, + Path: path, + Placement: placement, + Name: name, + TagId: tagId, + Children: make(map[uint16]*MappedIfd), + } + + if _, found := ptr.Children[tagId]; found == true { + log.Panicf("child IFD with tag-ID (%04x) already registered under IFD [%s] with tag-ID (%04x)", tagId, ptr.Name, ptr.TagId) + } + + ptr.Children[tagId] = childIfd + + return nil +} + +func (im *IfdMapping) dumpLineages(stack []*MappedIfd, input []string) (output []string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + currentIfd := stack[len(stack)-1] + + output = input + for _, childIfd := range currentIfd.Children { + stackCopy := make([]*MappedIfd, len(stack)+1) + + copy(stackCopy, stack) + stackCopy[len(stack)] = childIfd + + // Add to output, but don't include the obligatory root node. + parts := make([]string, len(stackCopy)-1) + for i, mi := range stackCopy[1:] { + parts[i] = mi.Name + } + + output = append(output, strings.Join(parts, "/")) + + output, err = im.dumpLineages(stackCopy, output) + log.PanicIf(err) + } + + return output, nil +} + +// DumpLineages returns a slice of strings representing all mappings. +func (im *IfdMapping) DumpLineages() (output []string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + stack := []*MappedIfd{im.rootNode} + output = make([]string, 0) + + output, err = im.dumpLineages(stack, output) + log.PanicIf(err) + + return output, nil +} + +// LoadStandardIfds loads the standard IFDs into the mapping. +func LoadStandardIfds(im *IfdMapping) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + err = im.Add( + []uint16{}, + IfdStandardIfdIdentity.TagId(), IfdStandardIfdIdentity.Name()) + + log.PanicIf(err) + + err = im.Add( + []uint16{IfdStandardIfdIdentity.TagId()}, + IfdExifStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.Name()) + + log.PanicIf(err) + + err = im.Add( + []uint16{IfdStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.TagId()}, + IfdExifIopStandardIfdIdentity.TagId(), IfdExifIopStandardIfdIdentity.Name()) + + log.PanicIf(err) + + err = im.Add( + []uint16{IfdStandardIfdIdentity.TagId()}, + IfdGpsInfoStandardIfdIdentity.TagId(), IfdGpsInfoStandardIfdIdentity.Name()) + + log.PanicIf(err) + + return nil +} + +// IfdTag describes a single IFD tag and its parent (if any). +type IfdTag struct { + parentIfdTag *IfdTag + tagId uint16 + name string +} + +func NewIfdTag(parentIfdTag *IfdTag, tagId uint16, name string) IfdTag { + return IfdTag{ + parentIfdTag: parentIfdTag, + tagId: tagId, + name: name, + } +} + +// ParentIfd returns the IfdTag of this IFD's parent. +func (it IfdTag) ParentIfd() *IfdTag { + return it.parentIfdTag +} + +// TagId returns the tag-ID of this IFD. +func (it IfdTag) TagId() uint16 { + return it.tagId +} + +// Name returns the simple name of this IFD. +func (it IfdTag) Name() string { + return it.name +} + +// String returns a descriptive string. +func (it IfdTag) String() string { + parentIfdPhrase := "" + if it.parentIfdTag != nil { + parentIfdPhrase = fmt.Sprintf(" PARENT=(0x%04x)[%s]", it.parentIfdTag.tagId, it.parentIfdTag.name) + } + + return fmt.Sprintf("IfdTag", it.tagId, it.name, parentIfdPhrase) +} + +var ( + // rootStandardIfd is the standard root IFD. + rootStandardIfd = NewIfdTag(nil, 0x0000, "IFD") // IFD + + // exifStandardIfd is the standard "Exif" IFD. + exifStandardIfd = NewIfdTag(&rootStandardIfd, 0x8769, "Exif") // IFD/Exif + + // iopStandardIfd is the standard "Iop" IFD. + iopStandardIfd = NewIfdTag(&exifStandardIfd, 0xA005, "Iop") // IFD/Exif/Iop + + // gpsInfoStandardIfd is the standard "GPS" IFD. + gpsInfoStandardIfd = NewIfdTag(&rootStandardIfd, 0x8825, "GPSInfo") // IFD/GPSInfo +) + +// IfdIdentityPart represents one component in an IFD path. +type IfdIdentityPart struct { + Name string + Index int +} + +// String returns a fully-qualified IFD path. +func (iip IfdIdentityPart) String() string { + if iip.Index > 0 { + return fmt.Sprintf("%s%d", iip.Name, iip.Index) + } else { + return iip.Name + } +} + +// UnindexedString returned a non-fully-qualified IFD path. +func (iip IfdIdentityPart) UnindexedString() string { + return iip.Name +} + +// IfdIdentity represents a single IFD path and provides access to various +// information and representations. +// +// Only global instances can be used for equality checks. +type IfdIdentity struct { + ifdTag IfdTag + parts []IfdIdentityPart + ifdPath string + fqIfdPath string +} + +// NewIfdIdentity returns a new IfdIdentity struct. +func NewIfdIdentity(ifdTag IfdTag, parts ...IfdIdentityPart) (ii *IfdIdentity) { + ii = &IfdIdentity{ + ifdTag: ifdTag, + parts: parts, + } + + ii.ifdPath = ii.getIfdPath() + ii.fqIfdPath = ii.getFqIfdPath() + + return ii +} + +// NewIfdIdentityFromString parses a string like "IFD/Exif" or "IFD1" or +// something more exotic with custom IFDs ("SomeIFD4/SomeChildIFD6"). Note that +// this will valid the unindexed IFD structure (because the standard tags from +// the specification are unindexed), but not, obviously, any indices (e.g. +// the numbers in "IFD0", "IFD1", "SomeIFD4/SomeChildIFD6"). It is +// required for the caller to check whether these specific instances +// were actually parsed out of the stream. +func NewIfdIdentityFromString(im *IfdMapping, fqIfdPath string) (ii *IfdIdentity, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + lineage, err := im.ResolvePath(fqIfdPath) + log.PanicIf(err) + + var lastIt *IfdTag + identityParts := make([]IfdIdentityPart, len(lineage)) + for i, itii := range lineage { + // Build out the tag that will eventually point to the IFD represented + // by the right-most part in the IFD path. + + it := &IfdTag{ + parentIfdTag: lastIt, + tagId: itii.TagId, + name: itii.Name, + } + + lastIt = it + + // Create the next IfdIdentity part. + + iip := IfdIdentityPart{ + Name: itii.Name, + Index: itii.Index, + } + + identityParts[i] = iip + } + + ii = NewIfdIdentity(*lastIt, identityParts...) + return ii, nil +} + +func (ii *IfdIdentity) getFqIfdPath() string { + partPhrases := make([]string, len(ii.parts)) + for i, iip := range ii.parts { + partPhrases[i] = iip.String() + } + + return strings.Join(partPhrases, "/") +} + +func (ii *IfdIdentity) getIfdPath() string { + partPhrases := make([]string, len(ii.parts)) + for i, iip := range ii.parts { + partPhrases[i] = iip.UnindexedString() + } + + return strings.Join(partPhrases, "/") +} + +// String returns a fully-qualified IFD path. +func (ii *IfdIdentity) String() string { + return ii.fqIfdPath +} + +// UnindexedString returns a non-fully-qualified IFD path. +func (ii *IfdIdentity) UnindexedString() string { + return ii.ifdPath +} + +// IfdTag returns the tag struct behind this IFD. +func (ii *IfdIdentity) IfdTag() IfdTag { + return ii.ifdTag +} + +// TagId returns the tag-ID of the IFD. +func (ii *IfdIdentity) TagId() uint16 { + return ii.ifdTag.TagId() +} + +// LeafPathPart returns the last right-most path-part, which represents the +// current IFD. +func (ii *IfdIdentity) LeafPathPart() IfdIdentityPart { + return ii.parts[len(ii.parts)-1] +} + +// Name returns the simple name of this IFD. +func (ii *IfdIdentity) Name() string { + return ii.LeafPathPart().Name +} + +// Index returns the index of this IFD (more then one IFD under a parent IFD +// will be numbered [0..n]). +func (ii *IfdIdentity) Index() int { + return ii.LeafPathPart().Index +} + +// Equals returns true if the two IfdIdentity instances are effectively +// identical. +// +// Since there's no way to get a specific fully-qualified IFD path without a +// certain slice of parts and all other fields are also derived from this, +// checking that the fully-qualified IFD path is equals is sufficient. +func (ii *IfdIdentity) Equals(ii2 *IfdIdentity) bool { + return ii.String() == ii2.String() +} + +// NewChild creates an IfdIdentity for an IFD that is a child of the current +// IFD. +func (ii *IfdIdentity) NewChild(childIfdTag IfdTag, index int) (iiChild *IfdIdentity) { + if *childIfdTag.parentIfdTag != ii.ifdTag { + log.Panicf("can not add child; we are not the parent:\nUS=%v\nCHILD=%v", ii.ifdTag, childIfdTag) + } + + childPart := IfdIdentityPart{childIfdTag.name, index} + childParts := append(ii.parts, childPart) + + iiChild = NewIfdIdentity(childIfdTag, childParts...) + return iiChild +} + +// NewSibling creates an IfdIdentity for an IFD that is a sibling to the current +// one. +func (ii *IfdIdentity) NewSibling(index int) (iiSibling *IfdIdentity) { + parts := make([]IfdIdentityPart, len(ii.parts)) + + copy(parts, ii.parts) + parts[len(parts)-1].Index = index + + iiSibling = NewIfdIdentity(ii.ifdTag, parts...) + return iiSibling +} + +var ( + // IfdStandardIfdIdentity represents the IFD path for IFD0. + IfdStandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 0}) + + // IfdExifStandardIfdIdentity represents the IFD path for IFD0/Exif0. + IfdExifStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(exifStandardIfd, 0) + + // IfdExifIopStandardIfdIdentity represents the IFD path for IFD0/Exif0/Iop0. + IfdExifIopStandardIfdIdentity = IfdExifStandardIfdIdentity.NewChild(iopStandardIfd, 0) + + // IfdGPSInfoStandardIfdIdentity represents the IFD path for IFD0/GPSInfo0. + IfdGpsInfoStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(gpsInfoStandardIfd, 0) + + // Ifd1StandardIfdIdentity represents the IFD path for IFD1. + Ifd1StandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 1}) +) diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/parser.go b/vendor/github.com/dsoprea/go-exif/v3/common/parser.go new file mode 100644 index 000000000..76e8ef425 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/parser.go @@ -0,0 +1,280 @@ +package exifcommon + +import ( + "bytes" + "errors" + "math" + + "encoding/binary" + + "github.com/dsoprea/go-logging" +) + +var ( + parserLogger = log.NewLogger("exifcommon.parser") +) + +var ( + ErrParseFail = errors.New("parse failure") +) + +// Parser knows how to parse all well-defined, encoded EXIF types. +type Parser struct { +} + +// ParseBytesknows how to parse a byte-type value. +func (p *Parser) ParseBytes(data []byte, unitCount uint32) (value []uint8, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + count := int(unitCount) + + if len(data) < (TypeByte.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + value = []uint8(data[:count]) + + return value, nil +} + +// ParseAscii returns a string and auto-strips the trailing NUL character that +// should be at the end of the encoding. +func (p *Parser) ParseAscii(data []byte, unitCount uint32) (value string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + count := int(unitCount) + + if len(data) < (TypeAscii.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + if len(data) == 0 || data[count-1] != 0 { + s := string(data[:count]) + parserLogger.Warningf(nil, "ASCII not terminated with NUL as expected: [%v]", s) + + for i, c := range s { + if c > 127 { + // Binary + + t := s[:i] + parserLogger.Warningf(nil, "ASCII also had binary characters. Truncating: [%v]->[%s]", s, t) + + return t, nil + } + } + + return s, nil + } + + // Auto-strip the NUL from the end. It serves no purpose outside of + // encoding semantics. + + return string(data[:count-1]), nil +} + +// ParseAsciiNoNul returns a string without any consideration for a trailing NUL +// character. +func (p *Parser) ParseAsciiNoNul(data []byte, unitCount uint32) (value string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + count := int(unitCount) + + if len(data) < (TypeAscii.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + return string(data[:count]), nil +} + +// ParseShorts knows how to parse an encoded list of shorts. +func (p *Parser) ParseShorts(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint16, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + count := int(unitCount) + + if len(data) < (TypeShort.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + value = make([]uint16, count) + for i := 0; i < count; i++ { + value[i] = byteOrder.Uint16(data[i*2:]) + } + + return value, nil +} + +// ParseLongs knows how to encode an encoded list of unsigned longs. +func (p *Parser) ParseLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + count := int(unitCount) + + if len(data) < (TypeLong.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + value = make([]uint32, count) + for i := 0; i < count; i++ { + value[i] = byteOrder.Uint32(data[i*4:]) + } + + return value, nil +} + +// ParseFloats knows how to encode an encoded list of floats. +func (p *Parser) ParseFloats(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []float32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + count := int(unitCount) + + if len(data) != (TypeFloat.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + value = make([]float32, count) + for i := 0; i < count; i++ { + value[i] = math.Float32frombits(byteOrder.Uint32(data[i*4 : (i+1)*4])) + } + + return value, nil +} + +// ParseDoubles knows how to encode an encoded list of doubles. +func (p *Parser) ParseDoubles(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []float64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + count := int(unitCount) + + if len(data) != (TypeDouble.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + value = make([]float64, count) + for i := 0; i < count; i++ { + value[i] = math.Float64frombits(byteOrder.Uint64(data[i*8 : (i+1)*8])) + } + + return value, nil +} + +// ParseRationals knows how to parse an encoded list of unsigned rationals. +func (p *Parser) ParseRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []Rational, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + count := int(unitCount) + + if len(data) < (TypeRational.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + value = make([]Rational, count) + for i := 0; i < count; i++ { + value[i].Numerator = byteOrder.Uint32(data[i*8:]) + value[i].Denominator = byteOrder.Uint32(data[i*8+4:]) + } + + return value, nil +} + +// ParseSignedLongs knows how to parse an encoded list of signed longs. +func (p *Parser) ParseSignedLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []int32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + count := int(unitCount) + + if len(data) < (TypeSignedLong.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + b := bytes.NewBuffer(data) + + value = make([]int32, count) + for i := 0; i < count; i++ { + err := binary.Read(b, byteOrder, &value[i]) + log.PanicIf(err) + } + + return value, nil +} + +// ParseSignedRationals knows how to parse an encoded list of signed +// rationals. +func (p *Parser) ParseSignedRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []SignedRational, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + count := int(unitCount) + + if len(data) < (TypeSignedRational.Size() * count) { + log.Panic(ErrNotEnoughData) + } + + b := bytes.NewBuffer(data) + + value = make([]SignedRational, count) + for i := 0; i < count; i++ { + err = binary.Read(b, byteOrder, &value[i].Numerator) + log.PanicIf(err) + + err = binary.Read(b, byteOrder, &value[i].Denominator) + log.PanicIf(err) + } + + return value, nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go b/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go new file mode 100644 index 000000000..f04fa22b6 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go @@ -0,0 +1,88 @@ +package exifcommon + +import ( + "os" + "path" + + "encoding/binary" + "io/ioutil" + + "github.com/dsoprea/go-logging" +) + +var ( + moduleRootPath = "" + + testExifData []byte = nil + + // EncodeDefaultByteOrder is the default byte-order for encoding operations. + EncodeDefaultByteOrder = binary.BigEndian + + // Default byte order for tests. + TestDefaultByteOrder = binary.BigEndian +) + +func GetModuleRootPath() string { + if moduleRootPath == "" { + moduleRootPath = os.Getenv("EXIF_MODULE_ROOT_PATH") + if moduleRootPath != "" { + return moduleRootPath + } + + currentWd, err := os.Getwd() + log.PanicIf(err) + + currentPath := currentWd + + visited := make([]string, 0) + + for { + tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") + + _, err := os.Stat(tryStampFilepath) + if err != nil && os.IsNotExist(err) != true { + log.Panic(err) + } else if err == nil { + break + } + + visited = append(visited, tryStampFilepath) + + currentPath = path.Dir(currentPath) + if currentPath == "/" { + log.Panicf("could not find module-root: %v", visited) + } + } + + moduleRootPath = currentPath + } + + return moduleRootPath +} + +func GetTestAssetsPath() string { + moduleRootPath := GetModuleRootPath() + assetsPath := path.Join(moduleRootPath, "assets") + + return assetsPath +} + +func getTestImageFilepath() string { + assetsPath := GetTestAssetsPath() + testImageFilepath := path.Join(assetsPath, "NDM_8901.jpg") + return testImageFilepath +} + +func getTestExifData() []byte { + if testExifData == nil { + assetsPath := GetTestAssetsPath() + filepath := path.Join(assetsPath, "NDM_8901.jpg.exif") + + var err error + + testExifData, err = ioutil.ReadFile(filepath) + log.PanicIf(err) + } + + return testExifData +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/type.go b/vendor/github.com/dsoprea/go-exif/v3/common/type.go new file mode 100644 index 000000000..e79bcb9a1 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/type.go @@ -0,0 +1,482 @@ +package exifcommon + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "encoding/binary" + + "github.com/dsoprea/go-logging" +) + +var ( + typeLogger = log.NewLogger("exif.type") +) + +var ( + // ErrNotEnoughData is used when there isn't enough data to accommodate what + // we're trying to parse (sizeof(type) * unit_count). + ErrNotEnoughData = errors.New("not enough data for type") + + // ErrWrongType is used when we try to parse anything other than the + // current type. + ErrWrongType = errors.New("wrong type, can not parse") + + // ErrUnhandledUndefinedTypedTag is used when we try to parse a tag that's + // recorded as an "unknown" type but not a documented tag (therefore + // leaving us not knowning how to read it). + ErrUnhandledUndefinedTypedTag = errors.New("not a standard unknown-typed tag") +) + +// TagTypePrimitive is a type-alias that let's us easily lookup type properties. +type TagTypePrimitive uint16 + +const ( + // TypeByte describes an encoded list of bytes. + TypeByte TagTypePrimitive = 1 + + // TypeAscii describes an encoded list of characters that is terminated + // with a NUL in its encoded form. + TypeAscii TagTypePrimitive = 2 + + // TypeShort describes an encoded list of shorts. + TypeShort TagTypePrimitive = 3 + + // TypeLong describes an encoded list of longs. + TypeLong TagTypePrimitive = 4 + + // TypeRational describes an encoded list of rationals. + TypeRational TagTypePrimitive = 5 + + // TypeUndefined describes an encoded value that has a complex/non-clearcut + // interpretation. + TypeUndefined TagTypePrimitive = 7 + + // We've seen type-8, but have no documentation on it. + + // TypeSignedLong describes an encoded list of signed longs. + TypeSignedLong TagTypePrimitive = 9 + + // TypeSignedRational describes an encoded list of signed rationals. + TypeSignedRational TagTypePrimitive = 10 + + // TypeFloat describes an encoded list of floats + TypeFloat TagTypePrimitive = 11 + + // TypeDouble describes an encoded list of doubles. + TypeDouble TagTypePrimitive = 12 + + // TypeAsciiNoNul is just a pseudo-type, for our own purposes. + TypeAsciiNoNul TagTypePrimitive = 0xf0 +) + +// String returns the name of the type +func (typeType TagTypePrimitive) String() string { + return TypeNames[typeType] +} + +// Size returns the size of one atomic unit of the type. +func (tagType TagTypePrimitive) Size() int { + switch tagType { + case TypeByte, TypeAscii, TypeAsciiNoNul: + return 1 + case TypeShort: + return 2 + case TypeLong, TypeSignedLong, TypeFloat: + return 4 + case TypeRational, TypeSignedRational, TypeDouble: + return 8 + default: + log.Panicf("can not determine tag-value size for type (%d): [%s]", + tagType, + TypeNames[tagType]) + // Never called. + return 0 + } +} + +// IsValid returns true if tagType is a valid type. +func (tagType TagTypePrimitive) IsValid() bool { + + // TODO(dustin): Add test + + return tagType == TypeByte || + tagType == TypeAscii || + tagType == TypeAsciiNoNul || + tagType == TypeShort || + tagType == TypeLong || + tagType == TypeRational || + tagType == TypeSignedLong || + tagType == TypeSignedRational || + tagType == TypeFloat || + tagType == TypeDouble || + tagType == TypeUndefined +} + +var ( + // TODO(dustin): Rename TypeNames() to typeNames() and add getter. + TypeNames = map[TagTypePrimitive]string{ + TypeByte: "BYTE", + TypeAscii: "ASCII", + TypeShort: "SHORT", + TypeLong: "LONG", + TypeRational: "RATIONAL", + TypeUndefined: "UNDEFINED", + TypeSignedLong: "SLONG", + TypeSignedRational: "SRATIONAL", + TypeFloat: "FLOAT", + TypeDouble: "DOUBLE", + + TypeAsciiNoNul: "_ASCII_NO_NUL", + } + + typeNamesR = map[string]TagTypePrimitive{} +) + +// Rational describes an unsigned rational value. +type Rational struct { + // Numerator is the numerator of the rational value. + Numerator uint32 + + // Denominator is the numerator of the rational value. + Denominator uint32 +} + +// SignedRational describes a signed rational value. +type SignedRational struct { + // Numerator is the numerator of the rational value. + Numerator int32 + + // Denominator is the numerator of the rational value. + Denominator int32 +} + +func isPrintableText(s string) bool { + for _, c := range s { + // unicode.IsPrint() returns false for newline characters. + if c == 0x0d || c == 0x0a { + continue + } else if unicode.IsPrint(rune(c)) == false { + return false + } + } + + return true +} + +// Format returns a stringified value for the given encoding. Automatically +// parses. Automatically calculates count based on type size. This function +// also supports undefined-type values (the ones that we support, anyway) by +// way of the String() method that they all require. We can't be more specific +// because we're a base package and we can't refer to it. +func FormatFromType(value interface{}, justFirst bool) (phrase string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): !! Add test + + switch t := value.(type) { + case []byte: + return DumpBytesToString(t), nil + case string: + for i, c := range t { + if c == 0 { + t = t[:i] + break + } + } + + if isPrintableText(t) == false { + phrase = fmt.Sprintf("string with binary data (%d bytes)", len(t)) + return phrase, nil + } + + return t, nil + case []uint16, []uint32, []int32, []float64, []float32: + val := reflect.ValueOf(t) + + if val.Len() == 0 { + return "", nil + } + + if justFirst == true { + var valueSuffix string + if val.Len() > 1 { + valueSuffix = "..." + } + + return fmt.Sprintf("%v%s", val.Index(0), valueSuffix), nil + } + + return fmt.Sprintf("%v", val), nil + case []Rational: + if len(t) == 0 { + return "", nil + } + + parts := make([]string, len(t)) + for i, r := range t { + parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator) + + if justFirst == true { + break + } + } + + if justFirst == true { + var valueSuffix string + if len(t) > 1 { + valueSuffix = "..." + } + + return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil + } + + return fmt.Sprintf("%v", parts), nil + case []SignedRational: + if len(t) == 0 { + return "", nil + } + + parts := make([]string, len(t)) + for i, r := range t { + parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator) + + if justFirst == true { + break + } + } + + if justFirst == true { + var valueSuffix string + if len(t) > 1 { + valueSuffix = "..." + } + + return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil + } + + return fmt.Sprintf("%v", parts), nil + case fmt.Stringer: + s := t.String() + if isPrintableText(s) == false { + phrase = fmt.Sprintf("stringable with binary data (%d bytes)", len(s)) + return phrase, nil + } + + // An undefined value that is documented (or that we otherwise support). + return s, nil + default: + // Affects only "unknown" values, in general. + log.Panicf("type can not be formatted into string: %v", reflect.TypeOf(value).Name()) + + // Never called. + return "", nil + } +} + +// Format returns a stringified value for the given encoding. Automatically +// parses. Automatically calculates count based on type size. +func FormatFromBytes(rawBytes []byte, tagType TagTypePrimitive, justFirst bool, byteOrder binary.ByteOrder) (phrase string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): !! Add test + + typeSize := tagType.Size() + + if len(rawBytes)%typeSize != 0 { + log.Panicf("byte-count (%d) does not align for [%s] type with a size of (%d) bytes", len(rawBytes), TypeNames[tagType], typeSize) + } + + // unitCount is the calculated unit-count. This should equal the original + // value from the tag (pre-resolution). + unitCount := uint32(len(rawBytes) / typeSize) + + // Truncate the items if it's not bytes or a string and we just want the first. + + var value interface{} + + switch tagType { + case TypeByte: + var err error + + value, err = parser.ParseBytes(rawBytes, unitCount) + log.PanicIf(err) + case TypeAscii: + var err error + + value, err = parser.ParseAscii(rawBytes, unitCount) + log.PanicIf(err) + case TypeAsciiNoNul: + var err error + + value, err = parser.ParseAsciiNoNul(rawBytes, unitCount) + log.PanicIf(err) + case TypeShort: + var err error + + value, err = parser.ParseShorts(rawBytes, unitCount, byteOrder) + log.PanicIf(err) + case TypeLong: + var err error + + value, err = parser.ParseLongs(rawBytes, unitCount, byteOrder) + log.PanicIf(err) + case TypeFloat: + var err error + + value, err = parser.ParseFloats(rawBytes, unitCount, byteOrder) + log.PanicIf(err) + case TypeDouble: + var err error + + value, err = parser.ParseDoubles(rawBytes, unitCount, byteOrder) + log.PanicIf(err) + case TypeRational: + var err error + + value, err = parser.ParseRationals(rawBytes, unitCount, byteOrder) + log.PanicIf(err) + case TypeSignedLong: + var err error + + value, err = parser.ParseSignedLongs(rawBytes, unitCount, byteOrder) + log.PanicIf(err) + case TypeSignedRational: + var err error + + value, err = parser.ParseSignedRationals(rawBytes, unitCount, byteOrder) + log.PanicIf(err) + default: + // Affects only "unknown" values, in general. + log.Panicf("value of type [%s] can not be formatted into string", tagType.String()) + + // Never called. + return "", nil + } + + phrase, err = FormatFromType(value, justFirst) + log.PanicIf(err) + + return phrase, nil +} + +// TranslateStringToType converts user-provided strings to properly-typed +// values. If a string, returns a string. Else, assumes that it's a single +// number. If a list needs to be processed, it is the caller's responsibility to +// split it (according to whichever convention has been established). +func TranslateStringToType(tagType TagTypePrimitive, valueString string) (value interface{}, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if tagType == TypeUndefined { + // The caller should just call String() on the decoded type. + log.Panicf("undefined-type values are not supported") + } + + if tagType == TypeByte { + wide, err := strconv.ParseInt(valueString, 16, 8) + log.PanicIf(err) + + return byte(wide), nil + } else if tagType == TypeAscii || tagType == TypeAsciiNoNul { + // Whether or not we're putting an NUL on the end is only relevant for + // byte-level encoding. This function really just supports a user + // interface. + + return valueString, nil + } else if tagType == TypeShort { + n, err := strconv.ParseUint(valueString, 10, 16) + log.PanicIf(err) + + return uint16(n), nil + } else if tagType == TypeLong { + n, err := strconv.ParseUint(valueString, 10, 32) + log.PanicIf(err) + + return uint32(n), nil + } else if tagType == TypeRational { + parts := strings.SplitN(valueString, "/", 2) + + numerator, err := strconv.ParseUint(parts[0], 10, 32) + log.PanicIf(err) + + denominator, err := strconv.ParseUint(parts[1], 10, 32) + log.PanicIf(err) + + return Rational{ + Numerator: uint32(numerator), + Denominator: uint32(denominator), + }, nil + } else if tagType == TypeSignedLong { + n, err := strconv.ParseInt(valueString, 10, 32) + log.PanicIf(err) + + return int32(n), nil + } else if tagType == TypeFloat { + n, err := strconv.ParseFloat(valueString, 32) + log.PanicIf(err) + + return float32(n), nil + } else if tagType == TypeDouble { + n, err := strconv.ParseFloat(valueString, 64) + log.PanicIf(err) + + return float64(n), nil + } else if tagType == TypeSignedRational { + parts := strings.SplitN(valueString, "/", 2) + + numerator, err := strconv.ParseInt(parts[0], 10, 32) + log.PanicIf(err) + + denominator, err := strconv.ParseInt(parts[1], 10, 32) + log.PanicIf(err) + + return SignedRational{ + Numerator: int32(numerator), + Denominator: int32(denominator), + }, nil + } + + log.Panicf("from-string encoding for type not supported; this shouldn't happen: [%s]", tagType.String()) + return nil, nil +} + +// GetTypeByName returns the `TagTypePrimitive` for the given type name. +// Returns (0) if not valid. +func GetTypeByName(typeName string) (tagType TagTypePrimitive, found bool) { + tagType, found = typeNamesR[typeName] + return tagType, found +} + +// BasicTag describes a single tag for any purpose. +type BasicTag struct { + // FqIfdPath is the fully-qualified IFD-path. + FqIfdPath string + + // IfdPath is the unindexed IFD-path. + IfdPath string + + // TagId is the tag-ID. + TagId uint16 +} + +func init() { + for typeId, typeName := range TypeNames { + typeNamesR[typeName] = typeId + } +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/utility.go b/vendor/github.com/dsoprea/go-exif/v3/common/utility.go new file mode 100644 index 000000000..575049706 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/utility.go @@ -0,0 +1,148 @@ +package exifcommon + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/dsoprea/go-logging" +) + +var ( + timeType = reflect.TypeOf(time.Time{}) +) + +// DumpBytes prints a list of hex-encoded bytes. +func DumpBytes(data []byte) { + fmt.Printf("DUMP: ") + for _, x := range data { + fmt.Printf("%02x ", x) + } + + fmt.Printf("\n") +} + +// DumpBytesClause prints a list like DumpBytes(), but encapsulated in +// "[]byte { ... }". +func DumpBytesClause(data []byte) { + fmt.Printf("DUMP: ") + + fmt.Printf("[]byte { ") + + for i, x := range data { + fmt.Printf("0x%02x", x) + + if i < len(data)-1 { + fmt.Printf(", ") + } + } + + fmt.Printf(" }\n") +} + +// DumpBytesToString returns a stringified list of hex-encoded bytes. +func DumpBytesToString(data []byte) string { + b := new(bytes.Buffer) + + for i, x := range data { + _, err := b.WriteString(fmt.Sprintf("%02x", x)) + log.PanicIf(err) + + if i < len(data)-1 { + _, err := b.WriteRune(' ') + log.PanicIf(err) + } + } + + return b.String() +} + +// DumpBytesClauseToString returns a comma-separated list of hex-encoded bytes. +func DumpBytesClauseToString(data []byte) string { + b := new(bytes.Buffer) + + for i, x := range data { + _, err := b.WriteString(fmt.Sprintf("0x%02x", x)) + log.PanicIf(err) + + if i < len(data)-1 { + _, err := b.WriteString(", ") + log.PanicIf(err) + } + } + + return b.String() +} + +// ExifFullTimestampString produces a string like "2018:11:30 13:01:49" from a +// `time.Time` struct. It will attempt to convert to UTC first. +func ExifFullTimestampString(t time.Time) (fullTimestampPhrase string) { + t = t.UTC() + + return fmt.Sprintf("%04d:%02d:%02d %02d:%02d:%02d", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) +} + +// ParseExifFullTimestamp parses dates like "2018:11:30 13:01:49" into a UTC +// `time.Time` struct. +func ParseExifFullTimestamp(fullTimestampPhrase string) (timestamp time.Time, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + parts := strings.Split(fullTimestampPhrase, " ") + datestampValue, timestampValue := parts[0], parts[1] + + // Normalize the separators. + datestampValue = strings.ReplaceAll(datestampValue, "-", ":") + timestampValue = strings.ReplaceAll(timestampValue, "-", ":") + + dateParts := strings.Split(datestampValue, ":") + + year, err := strconv.ParseUint(dateParts[0], 10, 16) + if err != nil { + log.Panicf("could not parse year") + } + + month, err := strconv.ParseUint(dateParts[1], 10, 8) + if err != nil { + log.Panicf("could not parse month") + } + + day, err := strconv.ParseUint(dateParts[2], 10, 8) + if err != nil { + log.Panicf("could not parse day") + } + + timeParts := strings.Split(timestampValue, ":") + + hour, err := strconv.ParseUint(timeParts[0], 10, 8) + if err != nil { + log.Panicf("could not parse hour") + } + + minute, err := strconv.ParseUint(timeParts[1], 10, 8) + if err != nil { + log.Panicf("could not parse minute") + } + + second, err := strconv.ParseUint(timeParts[2], 10, 8) + if err != nil { + log.Panicf("could not parse second") + } + + timestamp = time.Date(int(year), time.Month(month), int(day), int(hour), int(minute), int(second), 0, time.UTC) + return timestamp, nil +} + +// IsTime returns true if the value is a `time.Time`. +func IsTime(v interface{}) bool { + + // TODO(dustin): Add test + + return reflect.TypeOf(v) == timeType +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go b/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go new file mode 100644 index 000000000..b9e634106 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go @@ -0,0 +1,464 @@ +package exifcommon + +import ( + "errors" + "io" + + "encoding/binary" + + "github.com/dsoprea/go-logging" +) + +var ( + parser *Parser +) + +var ( + // ErrNotFarValue indicates that an offset-based lookup was attempted for a + // non-offset-based (embedded) value. + ErrNotFarValue = errors.New("not a far value") +) + +// ValueContext embeds all of the parameters required to find and extract the +// actual tag value. +type ValueContext struct { + unitCount uint32 + valueOffset uint32 + rawValueOffset []byte + rs io.ReadSeeker + + tagType TagTypePrimitive + byteOrder binary.ByteOrder + + // undefinedValueTagType is the effective type to use if this is an + // "undefined" value. + undefinedValueTagType TagTypePrimitive + + ifdPath string + tagId uint16 +} + +// TODO(dustin): We can update newValueContext() to derive `valueOffset` itself (from `rawValueOffset`). + +// NewValueContext returns a new ValueContext struct. +func NewValueContext(ifdPath string, tagId uint16, unitCount, valueOffset uint32, rawValueOffset []byte, rs io.ReadSeeker, tagType TagTypePrimitive, byteOrder binary.ByteOrder) *ValueContext { + return &ValueContext{ + unitCount: unitCount, + valueOffset: valueOffset, + rawValueOffset: rawValueOffset, + rs: rs, + + tagType: tagType, + byteOrder: byteOrder, + + ifdPath: ifdPath, + tagId: tagId, + } +} + +// SetUndefinedValueType sets the effective type if this is an unknown-type tag. +func (vc *ValueContext) SetUndefinedValueType(tagType TagTypePrimitive) { + if vc.tagType != TypeUndefined { + log.Panicf("can not set effective type for unknown-type tag because this is *not* an unknown-type tag") + } + + vc.undefinedValueTagType = tagType +} + +// UnitCount returns the embedded unit-count. +func (vc *ValueContext) UnitCount() uint32 { + return vc.unitCount +} + +// ValueOffset returns the value-offset decoded as a `uint32`. +func (vc *ValueContext) ValueOffset() uint32 { + return vc.valueOffset +} + +// RawValueOffset returns the uninterpreted value-offset. This is used for +// embedded values (values small enough to fit within the offset bytes rather +// than needing to be stored elsewhere and referred to by an actual offset). +func (vc *ValueContext) RawValueOffset() []byte { + return vc.rawValueOffset +} + +// AddressableData returns the block of data that we can dereference into. +func (vc *ValueContext) AddressableData() io.ReadSeeker { + + // RELEASE)dustin): Rename from AddressableData() to ReadSeeker() + + return vc.rs +} + +// ByteOrder returns the byte-order of numbers. +func (vc *ValueContext) ByteOrder() binary.ByteOrder { + return vc.byteOrder +} + +// IfdPath returns the path of the IFD containing this tag. +func (vc *ValueContext) IfdPath() string { + return vc.ifdPath +} + +// TagId returns the ID of the tag that we represent. +func (vc *ValueContext) TagId() uint16 { + return vc.tagId +} + +// isEmbedded returns whether the value is embedded or a reference. This can't +// be precalculated since the size is not defined for all types (namely the +// "undefined" types). +func (vc *ValueContext) isEmbedded() bool { + tagType := vc.effectiveValueType() + + return (tagType.Size() * int(vc.unitCount)) <= 4 +} + +// SizeInBytes returns the number of bytes that this value requires. The +// underlying call will panic if the type is UNDEFINED. It is the +// responsibility of the caller to preemptively check that. +func (vc *ValueContext) SizeInBytes() int { + tagType := vc.effectiveValueType() + + return tagType.Size() * int(vc.unitCount) +} + +// effectiveValueType returns the effective type of the unknown-type tag or, if +// not unknown, the actual type. +func (vc *ValueContext) effectiveValueType() (tagType TagTypePrimitive) { + if vc.tagType == TypeUndefined { + tagType = vc.undefinedValueTagType + + if tagType == 0 { + log.Panicf("undefined-value type not set") + } + } else { + tagType = vc.tagType + } + + return tagType +} + +// readRawEncoded returns the encoded bytes for the value that we represent. +func (vc *ValueContext) readRawEncoded() (rawBytes []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + tagType := vc.effectiveValueType() + + unitSizeRaw := uint32(tagType.Size()) + + if vc.isEmbedded() == true { + byteLength := unitSizeRaw * vc.unitCount + return vc.rawValueOffset[:byteLength], nil + } + + _, err = vc.rs.Seek(int64(vc.valueOffset), io.SeekStart) + log.PanicIf(err) + + rawBytes = make([]byte, vc.unitCount*unitSizeRaw) + + _, err = io.ReadFull(vc.rs, rawBytes) + log.PanicIf(err) + + return rawBytes, nil +} + +// GetFarOffset returns the offset if the value is not embedded [within the +// pointer itself] or an error if an embedded value. +func (vc *ValueContext) GetFarOffset() (offset uint32, err error) { + if vc.isEmbedded() == true { + return 0, ErrNotFarValue + } + + return vc.valueOffset, nil +} + +// ReadRawEncoded returns the encoded bytes for the value that we represent. +func (vc *ValueContext) ReadRawEncoded() (rawBytes []byte, err error) { + + // TODO(dustin): Remove this method and rename readRawEncoded in its place. + + return vc.readRawEncoded() +} + +// Format returns a string representation for the value. +// +// Where the type is not ASCII, `justFirst` indicates whether to just stringify +// the first item in the slice (or return an empty string if the slice is +// empty). +// +// Since this method lacks the information to process undefined-type tags (e.g. +// byte-order, tag-ID, IFD type), it will return an error if attempted. See +// `Undefined()`. +func (vc *ValueContext) Format() (value string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawBytes, err := vc.readRawEncoded() + log.PanicIf(err) + + phrase, err := FormatFromBytes(rawBytes, vc.effectiveValueType(), false, vc.byteOrder) + log.PanicIf(err) + + return phrase, nil +} + +// FormatFirst is similar to `Format` but only gets and stringifies the first +// item. +func (vc *ValueContext) FormatFirst() (value string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawBytes, err := vc.readRawEncoded() + log.PanicIf(err) + + phrase, err := FormatFromBytes(rawBytes, vc.tagType, true, vc.byteOrder) + log.PanicIf(err) + + return phrase, nil +} + +// ReadBytes parses the encoded byte-array from the value-context. +func (vc *ValueContext) ReadBytes() (value []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseBytes(rawValue, vc.unitCount) + log.PanicIf(err) + + return value, nil +} + +// ReadAscii parses the encoded NUL-terminated ASCII string from the value- +// context. +func (vc *ValueContext) ReadAscii() (value string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseAscii(rawValue, vc.unitCount) + log.PanicIf(err) + + return value, nil +} + +// ReadAsciiNoNul parses the non-NUL-terminated encoded ASCII string from the +// value-context. +func (vc *ValueContext) ReadAsciiNoNul() (value string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseAsciiNoNul(rawValue, vc.unitCount) + log.PanicIf(err) + + return value, nil +} + +// ReadShorts parses the list of encoded shorts from the value-context. +func (vc *ValueContext) ReadShorts() (value []uint16, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseShorts(rawValue, vc.unitCount, vc.byteOrder) + log.PanicIf(err) + + return value, nil +} + +// ReadLongs parses the list of encoded, unsigned longs from the value-context. +func (vc *ValueContext) ReadLongs() (value []uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseLongs(rawValue, vc.unitCount, vc.byteOrder) + log.PanicIf(err) + + return value, nil +} + +// ReadFloats parses the list of encoded, floats from the value-context. +func (vc *ValueContext) ReadFloats() (value []float32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseFloats(rawValue, vc.unitCount, vc.byteOrder) + log.PanicIf(err) + + return value, nil +} + +// ReadDoubles parses the list of encoded, doubles from the value-context. +func (vc *ValueContext) ReadDoubles() (value []float64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseDoubles(rawValue, vc.unitCount, vc.byteOrder) + log.PanicIf(err) + + return value, nil +} + +// ReadRationals parses the list of encoded, unsigned rationals from the value- +// context. +func (vc *ValueContext) ReadRationals() (value []Rational, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseRationals(rawValue, vc.unitCount, vc.byteOrder) + log.PanicIf(err) + + return value, nil +} + +// ReadSignedLongs parses the list of encoded, signed longs from the value-context. +func (vc *ValueContext) ReadSignedLongs() (value []int32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseSignedLongs(rawValue, vc.unitCount, vc.byteOrder) + log.PanicIf(err) + + return value, nil +} + +// ReadSignedRationals parses the list of encoded, signed rationals from the +// value-context. +func (vc *ValueContext) ReadSignedRationals() (value []SignedRational, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawValue, err := vc.readRawEncoded() + log.PanicIf(err) + + value, err = parser.ParseSignedRationals(rawValue, vc.unitCount, vc.byteOrder) + log.PanicIf(err) + + return value, nil +} + +// Values knows how to resolve the given value. This value is always a list +// (undefined-values aside), so we're named accordingly. +// +// Since this method lacks the information to process unknown-type tags (e.g. +// byte-order, tag-ID, IFD type), it will return an error if attempted. See +// `Undefined()`. +func (vc *ValueContext) Values() (values interface{}, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if vc.tagType == TypeByte { + values, err = vc.ReadBytes() + log.PanicIf(err) + } else if vc.tagType == TypeAscii { + values, err = vc.ReadAscii() + log.PanicIf(err) + } else if vc.tagType == TypeAsciiNoNul { + values, err = vc.ReadAsciiNoNul() + log.PanicIf(err) + } else if vc.tagType == TypeShort { + values, err = vc.ReadShorts() + log.PanicIf(err) + } else if vc.tagType == TypeLong { + values, err = vc.ReadLongs() + log.PanicIf(err) + } else if vc.tagType == TypeRational { + values, err = vc.ReadRationals() + log.PanicIf(err) + } else if vc.tagType == TypeSignedLong { + values, err = vc.ReadSignedLongs() + log.PanicIf(err) + } else if vc.tagType == TypeSignedRational { + values, err = vc.ReadSignedRationals() + log.PanicIf(err) + } else if vc.tagType == TypeFloat { + values, err = vc.ReadFloats() + log.PanicIf(err) + } else if vc.tagType == TypeDouble { + values, err = vc.ReadDoubles() + log.PanicIf(err) + } else if vc.tagType == TypeUndefined { + log.Panicf("will not parse undefined-type value") + + // Never called. + return nil, nil + } else { + log.Panicf("value of type [%s] is unparseable", vc.tagType) + // Never called. + return nil, nil + } + + return values, nil +} + +func init() { + parser = new(Parser) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go b/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go new file mode 100644 index 000000000..2cd26cc7b --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go @@ -0,0 +1,273 @@ +package exifcommon + +import ( + "bytes" + "math" + "reflect" + "time" + + "encoding/binary" + + "github.com/dsoprea/go-logging" +) + +var ( + typeEncodeLogger = log.NewLogger("exif.type_encode") +) + +// EncodedData encapsulates the compound output of an encoding operation. +type EncodedData struct { + Type TagTypePrimitive + Encoded []byte + + // TODO(dustin): Is this really necessary? We might have this just to correlate to the incoming stream format (raw bytes and a unit-count both for incoming and outgoing). + UnitCount uint32 +} + +// ValueEncoder knows how to encode values of every type to bytes. +type ValueEncoder struct { + byteOrder binary.ByteOrder +} + +// NewValueEncoder returns a new ValueEncoder. +func NewValueEncoder(byteOrder binary.ByteOrder) *ValueEncoder { + return &ValueEncoder{ + byteOrder: byteOrder, + } +} + +func (ve *ValueEncoder) encodeBytes(value []uint8) (ed EncodedData, err error) { + ed.Type = TypeByte + ed.Encoded = []byte(value) + ed.UnitCount = uint32(len(value)) + + return ed, nil +} + +func (ve *ValueEncoder) encodeAscii(value string) (ed EncodedData, err error) { + ed.Type = TypeAscii + + ed.Encoded = []byte(value) + ed.Encoded = append(ed.Encoded, 0) + + ed.UnitCount = uint32(len(ed.Encoded)) + + return ed, nil +} + +// encodeAsciiNoNul returns a string encoded as a byte-string without a trailing +// NUL byte. +// +// Note that: +// +// 1. This type can not be automatically encoded using `Encode()`. The default +// mode is to encode *with* a trailing NUL byte using `encodeAscii`. Only +// certain undefined-type tags using an unterminated ASCII string and these +// are exceptional in nature. +// +// 2. The presence of this method allows us to completely test the complimentary +// no-nul parser. +// +func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) { + ed.Type = TypeAsciiNoNul + ed.Encoded = []byte(value) + ed.UnitCount = uint32(len(ed.Encoded)) + + return ed, nil +} + +func (ve *ValueEncoder) encodeShorts(value []uint16) (ed EncodedData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ed.UnitCount = uint32(len(value)) + ed.Encoded = make([]byte, ed.UnitCount*2) + + for i := uint32(0); i < ed.UnitCount; i++ { + ve.byteOrder.PutUint16(ed.Encoded[i*2:(i+1)*2], value[i]) + } + + ed.Type = TypeShort + + return ed, nil +} + +func (ve *ValueEncoder) encodeLongs(value []uint32) (ed EncodedData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ed.UnitCount = uint32(len(value)) + ed.Encoded = make([]byte, ed.UnitCount*4) + + for i := uint32(0); i < ed.UnitCount; i++ { + ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], value[i]) + } + + ed.Type = TypeLong + + return ed, nil +} + +func (ve *ValueEncoder) encodeFloats(value []float32) (ed EncodedData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ed.UnitCount = uint32(len(value)) + ed.Encoded = make([]byte, ed.UnitCount*4) + + for i := uint32(0); i < ed.UnitCount; i++ { + ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], math.Float32bits(value[i])) + } + + ed.Type = TypeFloat + + return ed, nil +} + +func (ve *ValueEncoder) encodeDoubles(value []float64) (ed EncodedData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ed.UnitCount = uint32(len(value)) + ed.Encoded = make([]byte, ed.UnitCount*8) + + for i := uint32(0); i < ed.UnitCount; i++ { + ve.byteOrder.PutUint64(ed.Encoded[i*8:(i+1)*8], math.Float64bits(value[i])) + } + + ed.Type = TypeDouble + + return ed, nil +} + +func (ve *ValueEncoder) encodeRationals(value []Rational) (ed EncodedData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ed.UnitCount = uint32(len(value)) + ed.Encoded = make([]byte, ed.UnitCount*8) + + for i := uint32(0); i < ed.UnitCount; i++ { + ve.byteOrder.PutUint32(ed.Encoded[i*8+0:i*8+4], value[i].Numerator) + ve.byteOrder.PutUint32(ed.Encoded[i*8+4:i*8+8], value[i].Denominator) + } + + ed.Type = TypeRational + + return ed, nil +} + +func (ve *ValueEncoder) encodeSignedLongs(value []int32) (ed EncodedData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ed.UnitCount = uint32(len(value)) + + b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount)) + + for i := uint32(0); i < ed.UnitCount; i++ { + err := binary.Write(b, ve.byteOrder, value[i]) + log.PanicIf(err) + } + + ed.Type = TypeSignedLong + ed.Encoded = b.Bytes() + + return ed, nil +} + +func (ve *ValueEncoder) encodeSignedRationals(value []SignedRational) (ed EncodedData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ed.UnitCount = uint32(len(value)) + + b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount)) + + for i := uint32(0); i < ed.UnitCount; i++ { + err := binary.Write(b, ve.byteOrder, value[i].Numerator) + log.PanicIf(err) + + err = binary.Write(b, ve.byteOrder, value[i].Denominator) + log.PanicIf(err) + } + + ed.Type = TypeSignedRational + ed.Encoded = b.Bytes() + + return ed, nil +} + +// Encode returns bytes for the given value, infering type from the actual +// value. This does not support `TypeAsciiNoNull` (all strings are encoded as +// `TypeAscii`). +func (ve *ValueEncoder) Encode(value interface{}) (ed EncodedData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + switch t := value.(type) { + case []byte: + ed, err = ve.encodeBytes(t) + log.PanicIf(err) + case string: + ed, err = ve.encodeAscii(t) + log.PanicIf(err) + case []uint16: + ed, err = ve.encodeShorts(t) + log.PanicIf(err) + case []uint32: + ed, err = ve.encodeLongs(t) + log.PanicIf(err) + case []float32: + ed, err = ve.encodeFloats(t) + log.PanicIf(err) + case []float64: + ed, err = ve.encodeDoubles(t) + log.PanicIf(err) + case []Rational: + ed, err = ve.encodeRationals(t) + log.PanicIf(err) + case []int32: + ed, err = ve.encodeSignedLongs(t) + log.PanicIf(err) + case []SignedRational: + ed, err = ve.encodeSignedRationals(t) + log.PanicIf(err) + case time.Time: + // For convenience, if the user doesn't want to deal with translation + // semantics with timestamps. + + s := ExifFullTimestampString(t) + + ed, err = ve.encodeAscii(s) + log.PanicIf(err) + default: + log.Panicf("value not encodable: [%s] [%v]", reflect.TypeOf(value), value) + } + + return ed, nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/data_layer.go b/vendor/github.com/dsoprea/go-exif/v3/data_layer.go new file mode 100644 index 000000000..7883752cc --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/data_layer.go @@ -0,0 +1,50 @@ +package exif + +import ( + "io" + + "github.com/dsoprea/go-logging" + "github.com/dsoprea/go-utility/v2/filesystem" +) + +type ExifBlobSeeker interface { + GetReadSeeker(initialOffset int64) (rs io.ReadSeeker, err error) +} + +// ExifReadSeeker knows how to retrieve data from the EXIF blob relative to the +// beginning of the blob (so, absolute position (0) is the first byte of the +// EXIF data). +type ExifReadSeeker struct { + rs io.ReadSeeker +} + +func NewExifReadSeeker(rs io.ReadSeeker) *ExifReadSeeker { + return &ExifReadSeeker{ + rs: rs, + } +} + +func NewExifReadSeekerWithBytes(exifData []byte) *ExifReadSeeker { + sb := rifs.NewSeekableBufferWithBytes(exifData) + edbs := NewExifReadSeeker(sb) + + return edbs +} + +// Fork creates a new ReadSeeker instead that wraps a BouncebackReader to +// maintain its own position in the stream. +func (edbs *ExifReadSeeker) GetReadSeeker(initialOffset int64) (rs io.ReadSeeker, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + br, err := rifs.NewBouncebackReader(edbs.rs) + log.PanicIf(err) + + _, err = br.Seek(initialOffset, io.SeekStart) + log.PanicIf(err) + + return br, nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/error.go b/vendor/github.com/dsoprea/go-exif/v3/error.go new file mode 100644 index 000000000..2f00b08a4 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/error.go @@ -0,0 +1,14 @@ +package exif + +import ( + "errors" +) + +var ( + // ErrTagNotFound indicates that the tag was not found. + ErrTagNotFound = errors.New("tag not found") + + // ErrTagNotKnown indicates that the tag is not registered with us as a + // known tag. + ErrTagNotKnown = errors.New("tag is not known") +) diff --git a/vendor/github.com/dsoprea/go-exif/v3/exif.go b/vendor/github.com/dsoprea/go-exif/v3/exif.go new file mode 100644 index 000000000..f66e839d9 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/exif.go @@ -0,0 +1,333 @@ +package exif + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + + "encoding/binary" + "io/ioutil" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +const ( + // ExifAddressableAreaStart is the absolute offset in the file that all + // offsets are relative to. + ExifAddressableAreaStart = uint32(0x0) + + // ExifDefaultFirstIfdOffset is essentially the number of bytes in addition + // to `ExifAddressableAreaStart` that you have to move in order to escape + // the rest of the header and get to the earliest point where we can put + // stuff (which has to be the first IFD). This is the size of the header + // sequence containing the two-character byte-order, two-character fixed- + // bytes, and the four bytes describing the first-IFD offset. + ExifDefaultFirstIfdOffset = uint32(2 + 2 + 4) +) + +const ( + // ExifSignatureLength is the number of bytes in the EXIF signature (which + // customarily includes the first IFD offset). + ExifSignatureLength = 8 +) + +var ( + exifLogger = log.NewLogger("exif.exif") + + ExifBigEndianSignature = [4]byte{'M', 'M', 0x00, 0x2a} + ExifLittleEndianSignature = [4]byte{'I', 'I', 0x2a, 0x00} +) + +var ( + ErrNoExif = errors.New("no exif data") + ErrExifHeaderError = errors.New("exif header error") +) + +// SearchAndExtractExif searches for an EXIF blob in the byte-slice. +func SearchAndExtractExif(data []byte) (rawExif []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + b := bytes.NewBuffer(data) + + rawExif, err = SearchAndExtractExifWithReader(b) + if err != nil { + if err == ErrNoExif { + return nil, err + } + + log.Panic(err) + } + + return rawExif, nil +} + +// SearchAndExtractExifN searches for an EXIF blob in the byte-slice, but skips +// the given number of EXIF blocks first. This is a forensics tool that helps +// identify multiple EXIF blocks in a file. +func SearchAndExtractExifN(data []byte, n int) (rawExif []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + skips := 0 + totalDiscarded := 0 + for { + b := bytes.NewBuffer(data) + + var discarded int + + rawExif, discarded, err = searchAndExtractExifWithReaderWithDiscarded(b) + if err != nil { + if err == ErrNoExif { + return nil, err + } + + log.Panic(err) + } + + exifLogger.Debugf(nil, "Read EXIF block (%d).", skips) + + totalDiscarded += discarded + + if skips >= n { + exifLogger.Debugf(nil, "Reached requested EXIF block (%d).", n) + break + } + + nextOffset := discarded + 1 + exifLogger.Debugf(nil, "Skipping EXIF block (%d) by seeking to position (%d).", skips, nextOffset) + + data = data[nextOffset:] + skips++ + } + + exifLogger.Debugf(nil, "Found EXIF blob (%d) bytes from initial position.", totalDiscarded) + return rawExif, nil +} + +// searchAndExtractExifWithReaderWithDiscarded searches for an EXIF blob using +// an `io.Reader`. We can't know how much long the EXIF data is without parsing +// it, so this will likely grab up a lot of the image-data, too. +// +// This function returned the count of preceding bytes. +func searchAndExtractExifWithReaderWithDiscarded(r io.Reader) (rawExif []byte, discarded int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // Search for the beginning of the EXIF information. The EXIF is near the + // beginning of most JPEGs, so this likely doesn't have a high cost (at + // least, again, with JPEGs). + + br := bufio.NewReader(r) + + for { + window, err := br.Peek(ExifSignatureLength) + if err != nil { + if err == io.EOF { + return nil, 0, ErrNoExif + } + + log.Panic(err) + } + + _, err = ParseExifHeader(window) + if err != nil { + if log.Is(err, ErrNoExif) == true { + // No EXIF. Move forward by one byte. + + _, err := br.Discard(1) + log.PanicIf(err) + + discarded++ + + continue + } + + // Some other error. + log.Panic(err) + } + + break + } + + exifLogger.Debugf(nil, "Found EXIF blob (%d) bytes from initial position.", discarded) + + rawExif, err = ioutil.ReadAll(br) + log.PanicIf(err) + + return rawExif, discarded, nil +} + +// RELEASE(dustin): We should replace the implementation of SearchAndExtractExifWithReader with searchAndExtractExifWithReaderWithDiscarded and drop the latter. + +// SearchAndExtractExifWithReader searches for an EXIF blob using an +// `io.Reader`. We can't know how much long the EXIF data is without parsing it, +// so this will likely grab up a lot of the image-data, too. +func SearchAndExtractExifWithReader(r io.Reader) (rawExif []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rawExif, _, err = searchAndExtractExifWithReaderWithDiscarded(r) + if err != nil { + if err == ErrNoExif { + return nil, err + } + + log.Panic(err) + } + + return rawExif, nil +} + +// SearchFileAndExtractExif returns a slice from the beginning of the EXIF data +// to the end of the file (it's not practical to try and calculate where the +// data actually ends). +func SearchFileAndExtractExif(filepath string) (rawExif []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // Open the file. + + f, err := os.Open(filepath) + log.PanicIf(err) + + defer f.Close() + + rawExif, err = SearchAndExtractExifWithReader(f) + log.PanicIf(err) + + return rawExif, nil +} + +type ExifHeader struct { + ByteOrder binary.ByteOrder + FirstIfdOffset uint32 +} + +func (eh ExifHeader) String() string { + return fmt.Sprintf("ExifHeader", eh.ByteOrder, eh.FirstIfdOffset) +} + +// ParseExifHeader parses the bytes at the very top of the header. +// +// This will panic with ErrNoExif on any data errors so that we can double as +// an EXIF-detection routine. +func ParseExifHeader(data []byte) (eh ExifHeader, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // Good reference: + // + // CIPA DC-008-2016; JEITA CP-3451D + // -> http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf + + if len(data) < ExifSignatureLength { + exifLogger.Warningf(nil, "Not enough data for EXIF header: (%d)", len(data)) + return eh, ErrNoExif + } + + if bytes.Equal(data[:4], ExifBigEndianSignature[:]) == true { + exifLogger.Debugf(nil, "Byte-order is big-endian.") + eh.ByteOrder = binary.BigEndian + } else if bytes.Equal(data[:4], ExifLittleEndianSignature[:]) == true { + eh.ByteOrder = binary.LittleEndian + exifLogger.Debugf(nil, "Byte-order is little-endian.") + } else { + return eh, ErrNoExif + } + + eh.FirstIfdOffset = eh.ByteOrder.Uint32(data[4:8]) + + return eh, nil +} + +// Visit recursively invokes a callback for every tag. +func Visit(rootIfdIdentity *exifcommon.IfdIdentity, ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, exifData []byte, visitor TagVisitorFn, so *ScanOptions) (eh ExifHeader, furthestOffset uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + eh, err = ParseExifHeader(exifData) + log.PanicIf(err) + + ebs := NewExifReadSeekerWithBytes(exifData) + ie := NewIfdEnumerate(ifdMapping, tagIndex, ebs, eh.ByteOrder) + + _, err = ie.Scan(rootIfdIdentity, eh.FirstIfdOffset, visitor, so) + log.PanicIf(err) + + furthestOffset = ie.FurthestOffset() + + return eh, furthestOffset, nil +} + +// Collect recursively builds a static structure of all IFDs and tags. +func Collect(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, exifData []byte) (eh ExifHeader, index IfdIndex, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + eh, err = ParseExifHeader(exifData) + log.PanicIf(err) + + ebs := NewExifReadSeekerWithBytes(exifData) + ie := NewIfdEnumerate(ifdMapping, tagIndex, ebs, eh.ByteOrder) + + index, err = ie.Collect(eh.FirstIfdOffset) + log.PanicIf(err) + + return eh, index, nil +} + +// BuildExifHeader constructs the bytes that go at the front of the stream. +func BuildExifHeader(byteOrder binary.ByteOrder, firstIfdOffset uint32) (headerBytes []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + b := new(bytes.Buffer) + + var signatureBytes []byte + if byteOrder == binary.BigEndian { + signatureBytes = ExifBigEndianSignature[:] + } else { + signatureBytes = ExifLittleEndianSignature[:] + } + + _, err = b.Write(signatureBytes) + log.PanicIf(err) + + err = binary.Write(b, byteOrder, firstIfdOffset) + log.PanicIf(err) + + return b.Bytes(), nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/gps.go b/vendor/github.com/dsoprea/go-exif/v3/gps.go new file mode 100644 index 000000000..7a61cd94d --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/gps.go @@ -0,0 +1,117 @@ +package exif + +import ( + "errors" + "fmt" + "time" + + "github.com/dsoprea/go-logging" + "github.com/golang/geo/s2" + + "github.com/dsoprea/go-exif/v3/common" +) + +var ( + // ErrGpsCoordinatesNotValid means that some part of the geographic data was + // unparseable. + ErrGpsCoordinatesNotValid = errors.New("GPS coordinates not valid") +) + +// GpsDegrees is a high-level struct representing geographic data. +type GpsDegrees struct { + // Orientation describes the N/E/S/W direction that this position is + // relative to. + Orientation byte + + // Degrees is a simple float representing the underlying rational degrees + // amount. + Degrees float64 + + // Minutes is a simple float representing the underlying rational minutes + // amount. + Minutes float64 + + // Seconds is a simple float representing the underlying ration seconds + // amount. + Seconds float64 +} + +// NewGpsDegreesFromRationals returns a GpsDegrees struct given the EXIF-encoded +// information. The refValue is the N/E/S/W direction that this position is +// relative to. +func NewGpsDegreesFromRationals(refValue string, rawCoordinate []exifcommon.Rational) (gd GpsDegrees, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if len(rawCoordinate) != 3 { + log.Panicf("new GpsDegrees struct requires a raw-coordinate with exactly three rationals") + } + + gd = GpsDegrees{ + Orientation: refValue[0], + Degrees: float64(rawCoordinate[0].Numerator) / float64(rawCoordinate[0].Denominator), + Minutes: float64(rawCoordinate[1].Numerator) / float64(rawCoordinate[1].Denominator), + Seconds: float64(rawCoordinate[2].Numerator) / float64(rawCoordinate[2].Denominator), + } + + return gd, nil +} + +// String provides returns a descriptive string. +func (d GpsDegrees) String() string { + return fmt.Sprintf("Degrees", string([]byte{d.Orientation}), d.Degrees, d.Minutes, d.Seconds) +} + +// Decimal calculates and returns the simplified float representation of the +// component degrees. +func (d GpsDegrees) Decimal() float64 { + decimal := float64(d.Degrees) + float64(d.Minutes)/60.0 + float64(d.Seconds)/3600.0 + + if d.Orientation == 'S' || d.Orientation == 'W' { + return -decimal + } + + return decimal +} + +// Raw returns a Rational struct that can be used to *write* coordinates. In +// practice, the denominator are typically (1) in the original EXIF data, and, +// that being the case, this will best preserve precision. +func (d GpsDegrees) Raw() []exifcommon.Rational { + return []exifcommon.Rational{ + {Numerator: uint32(d.Degrees), Denominator: 1}, + {Numerator: uint32(d.Minutes), Denominator: 1}, + {Numerator: uint32(d.Seconds), Denominator: 1}, + } +} + +// GpsInfo encapsulates all of the geographic information in one place. +type GpsInfo struct { + Latitude, Longitude GpsDegrees + Altitude int + Timestamp time.Time +} + +// String returns a descriptive string. +func (gi *GpsInfo) String() string { + return fmt.Sprintf("GpsInfo", + gi.Latitude.Decimal(), gi.Longitude.Decimal(), gi.Altitude, gi.Timestamp) +} + +// S2CellId returns the cell-ID of the geographic location on the earth. +func (gi *GpsInfo) S2CellId() s2.CellID { + latitude := gi.Latitude.Decimal() + longitude := gi.Longitude.Decimal() + + ll := s2.LatLngFromDegrees(latitude, longitude) + cellId := s2.CellIDFromLatLng(ll) + + if cellId.IsValid() == false { + panic(ErrGpsCoordinatesNotValid) + } + + return cellId +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/ifd_builder.go b/vendor/github.com/dsoprea/go-exif/v3/ifd_builder.go new file mode 100644 index 000000000..a404b362a --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/ifd_builder.go @@ -0,0 +1,1199 @@ +package exif + +// NOTES: +// +// The thumbnail offset and length tags shouldn't be set directly. Use the +// (*IfdBuilder).SetThumbnail() method instead. + +import ( + "errors" + "fmt" + "strings" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" + "github.com/dsoprea/go-exif/v3/undefined" +) + +var ( + ifdBuilderLogger = log.NewLogger("exif.ifd_builder") +) + +var ( + ErrTagEntryNotFound = errors.New("tag entry not found") + ErrChildIbNotFound = errors.New("child IB not found") +) + +type IfdBuilderTagValue struct { + valueBytes []byte + ib *IfdBuilder +} + +func (ibtv IfdBuilderTagValue) String() string { + if ibtv.IsBytes() == true { + var valuePhrase string + if len(ibtv.valueBytes) <= 8 { + valuePhrase = fmt.Sprintf("%v", ibtv.valueBytes) + } else { + valuePhrase = fmt.Sprintf("%v...", ibtv.valueBytes[:8]) + } + + return fmt.Sprintf("IfdBuilderTagValue", valuePhrase, len(ibtv.valueBytes)) + } else if ibtv.IsIb() == true { + return fmt.Sprintf("IfdBuilderTagValue", ibtv.ib) + } else { + log.Panicf("IBTV state undefined") + return "" + } +} + +func NewIfdBuilderTagValueFromBytes(valueBytes []byte) *IfdBuilderTagValue { + return &IfdBuilderTagValue{ + valueBytes: valueBytes, + } +} + +func NewIfdBuilderTagValueFromIfdBuilder(ib *IfdBuilder) *IfdBuilderTagValue { + return &IfdBuilderTagValue{ + ib: ib, + } +} + +// IsBytes returns true if the bytes are populated. This is always the case +// when we're loaded from a tag in an existing IFD. +func (ibtv IfdBuilderTagValue) IsBytes() bool { + return ibtv.valueBytes != nil +} + +func (ibtv IfdBuilderTagValue) Bytes() []byte { + if ibtv.IsBytes() == false { + log.Panicf("this tag is not a byte-slice value") + } else if ibtv.IsIb() == true { + log.Panicf("this tag is an IFD-builder value not a byte-slice") + } + + return ibtv.valueBytes +} + +func (ibtv IfdBuilderTagValue) IsIb() bool { + return ibtv.ib != nil +} + +func (ibtv IfdBuilderTagValue) Ib() *IfdBuilder { + if ibtv.IsIb() == false { + log.Panicf("this tag is not an IFD-builder value") + } else if ibtv.IsBytes() == true { + log.Panicf("this tag is a byte-slice, not a IFD-builder") + } + + return ibtv.ib +} + +type BuilderTag struct { + // ifdPath is the path of the IFD that hosts this tag. + ifdPath string + + tagId uint16 + typeId exifcommon.TagTypePrimitive + + // value is either a value that can be encoded, an IfdBuilder instance (for + // child IFDs), or an IfdTagEntry instance representing an existing, + // previously-stored tag. + value *IfdBuilderTagValue + + // byteOrder is the byte order. It's chiefly/originally here to support + // printing the value. + byteOrder binary.ByteOrder +} + +func NewBuilderTag(ifdPath string, tagId uint16, typeId exifcommon.TagTypePrimitive, value *IfdBuilderTagValue, byteOrder binary.ByteOrder) *BuilderTag { + return &BuilderTag{ + ifdPath: ifdPath, + tagId: tagId, + typeId: typeId, + value: value, + byteOrder: byteOrder, + } +} + +func NewChildIfdBuilderTag(ifdPath string, tagId uint16, value *IfdBuilderTagValue) *BuilderTag { + return &BuilderTag{ + ifdPath: ifdPath, + tagId: tagId, + typeId: exifcommon.TypeLong, + value: value, + } +} + +func (bt *BuilderTag) Value() (value *IfdBuilderTagValue) { + return bt.value +} + +func (bt *BuilderTag) String() string { + var valueString string + + if bt.value.IsBytes() == true { + var err error + + valueString, err = exifcommon.FormatFromBytes(bt.value.Bytes(), bt.typeId, false, bt.byteOrder) + log.PanicIf(err) + } else { + valueString = fmt.Sprintf("%v", bt.value) + } + + return fmt.Sprintf("BuilderTag", bt.ifdPath, bt.tagId, bt.typeId.String(), valueString) +} + +func (bt *BuilderTag) SetValue(byteOrder binary.ByteOrder, value interface{}) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): !! Add test. + + var ed exifcommon.EncodedData + if bt.typeId == exifcommon.TypeUndefined { + encodeable := value.(exifundefined.EncodeableValue) + + encoded, unitCount, err := exifundefined.Encode(encodeable, byteOrder) + log.PanicIf(err) + + ed = exifcommon.EncodedData{ + Type: exifcommon.TypeUndefined, + Encoded: encoded, + UnitCount: unitCount, + } + } else { + ve := exifcommon.NewValueEncoder(byteOrder) + + var err error + + ed, err = ve.Encode(value) + log.PanicIf(err) + } + + bt.value = NewIfdBuilderTagValueFromBytes(ed.Encoded) + + return nil +} + +// NewStandardBuilderTag constructs a `BuilderTag` instance. The type is looked +// up. `ii` is the type of IFD that owns this tag. +func NewStandardBuilderTag(ifdPath string, it *IndexedTag, byteOrder binary.ByteOrder, value interface{}) *BuilderTag { + // If there is more than one supported type, we'll go with the larger to + // encode with. It'll use the same amount of fixed-space, and we'll + // eliminate unnecessary overflows/issues. + tagType := it.GetEncodingType(value) + + var rawBytes []byte + if it.DoesSupportType(exifcommon.TypeUndefined) == true { + encodeable := value.(exifundefined.EncodeableValue) + + var err error + + rawBytes, _, err = exifundefined.Encode(encodeable, byteOrder) + log.PanicIf(err) + } else { + ve := exifcommon.NewValueEncoder(byteOrder) + + ed, err := ve.Encode(value) + log.PanicIf(err) + + rawBytes = ed.Encoded + } + + tagValue := NewIfdBuilderTagValueFromBytes(rawBytes) + + return NewBuilderTag( + ifdPath, + it.Id, + tagType, + tagValue, + byteOrder) +} + +type IfdBuilder struct { + ifdIdentity *exifcommon.IfdIdentity + + byteOrder binary.ByteOrder + + // Includes both normal tags and IFD tags (which point to child IFDs). + // TODO(dustin): Keep a separate list of children like with `Ifd`. + // TODO(dustin): Either rename this or `Entries` in `Ifd` to be the same thing. + tags []*BuilderTag + + // existingOffset will be the offset that this IFD is currently found at if + // it represents an IFD that has previously been stored (or 0 if not). + existingOffset uint32 + + // nextIb represents the next link if we're chaining to another. + nextIb *IfdBuilder + + // thumbnailData is populated with thumbnail data if there was thumbnail + // data. Otherwise, it's nil. + thumbnailData []byte + + ifdMapping *exifcommon.IfdMapping + tagIndex *TagIndex +} + +func NewIfdBuilder(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, ii *exifcommon.IfdIdentity, byteOrder binary.ByteOrder) (ib *IfdBuilder) { + ib = &IfdBuilder{ + ifdIdentity: ii, + + byteOrder: byteOrder, + tags: make([]*BuilderTag, 0), + + ifdMapping: ifdMapping, + tagIndex: tagIndex, + } + + return ib +} + +// NewIfdBuilderWithExistingIfd creates a new IB using the same header type +// information as the given IFD. +func NewIfdBuilderWithExistingIfd(ifd *Ifd) (ib *IfdBuilder) { + ib = &IfdBuilder{ + ifdIdentity: ifd.IfdIdentity(), + + byteOrder: ifd.ByteOrder(), + existingOffset: ifd.Offset(), + ifdMapping: ifd.ifdMapping, + tagIndex: ifd.tagIndex, + } + + return ib +} + +// NewIfdBuilderFromExistingChain creates a chain of IB instances from an +// IFD chain generated from real data. +func NewIfdBuilderFromExistingChain(rootIfd *Ifd) (firstIb *IfdBuilder) { + var lastIb *IfdBuilder + i := 0 + for thisExistingIfd := rootIfd; thisExistingIfd != nil; thisExistingIfd = thisExistingIfd.nextIfd { + newIb := NewIfdBuilder( + rootIfd.ifdMapping, + rootIfd.tagIndex, + rootIfd.ifdIdentity, + thisExistingIfd.ByteOrder()) + + if firstIb == nil { + firstIb = newIb + } else { + lastIb.SetNextIb(newIb) + } + + err := newIb.AddTagsFromExisting(thisExistingIfd, nil, nil) + log.PanicIf(err) + + lastIb = newIb + i++ + } + + return firstIb +} + +func (ib *IfdBuilder) IfdIdentity() *exifcommon.IfdIdentity { + return ib.ifdIdentity +} + +func (ib *IfdBuilder) NextIb() (nextIb *IfdBuilder, err error) { + return ib.nextIb, nil +} + +func (ib *IfdBuilder) ChildWithTagId(childIfdTagId uint16) (childIb *IfdBuilder, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + for _, bt := range ib.tags { + if bt.value.IsIb() == false { + continue + } + + childIbThis := bt.value.Ib() + + if childIbThis.IfdIdentity().TagId() == childIfdTagId { + return childIbThis, nil + } + } + + log.Panic(ErrChildIbNotFound) + + // Never reached. + return nil, nil +} + +func getOrCreateIbFromRootIbInner(rootIb *IfdBuilder, parentIb *IfdBuilder, currentLineage []exifcommon.IfdTagIdAndIndex) (ib *IfdBuilder, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): !! Add test. + + thisIb := rootIb + + // Since we're calling ourselves recursively with incrementally different + // paths, the FQ IFD-path of the parent that called us needs to be passed + // in, in order for us to know it. + var parentLineage []exifcommon.IfdTagIdAndIndex + if parentIb != nil { + var err error + + parentLineage, err = thisIb.ifdMapping.ResolvePath(parentIb.IfdIdentity().String()) + log.PanicIf(err) + } + + // Process the current path part. + currentItIi := currentLineage[0] + + // Make sure the leftmost part of the FQ IFD-path agrees with the IB we + // were given. + + expectedFqRootIfdPath := "" + if parentLineage != nil { + expectedLineage := append(parentLineage, currentItIi) + expectedFqRootIfdPath = thisIb.ifdMapping.PathPhraseFromLineage(expectedLineage) + } else { + expectedFqRootIfdPath = thisIb.ifdMapping.PathPhraseFromLineage(currentLineage[:1]) + } + + if expectedFqRootIfdPath != thisIb.IfdIdentity().String() { + log.Panicf("the FQ IFD-path [%s] we were given does not match the builder's FQ IFD-path [%s]", expectedFqRootIfdPath, thisIb.IfdIdentity().String()) + } + + // If we actually wanted a sibling (currentItIi.Index > 0) then seek to it, + // appending new siblings, as required, until we get there. + for i := 0; i < currentItIi.Index; i++ { + if thisIb.nextIb == nil { + // Generate an FQ IFD-path for the sibling. It'll use the same + // non-FQ IFD-path as the current IB. + + iiSibling := thisIb.IfdIdentity().NewSibling(i + 1) + thisIb.nextIb = NewIfdBuilder(thisIb.ifdMapping, thisIb.tagIndex, iiSibling, thisIb.byteOrder) + } + + thisIb = thisIb.nextIb + } + + // There is no child IFD to process. We're done. + if len(currentLineage) == 1 { + return thisIb, nil + } + + // Establish the next child to be processed. + + childItii := currentLineage[1] + + var foundChild *IfdBuilder + for _, bt := range thisIb.tags { + if bt.value.IsIb() == false { + continue + } + + childIb := bt.value.Ib() + + if childIb.IfdIdentity().TagId() == childItii.TagId { + foundChild = childIb + break + } + } + + // If we didn't find the child, add it. + + if foundChild == nil { + currentIfdTag := thisIb.IfdIdentity().IfdTag() + + childIfdTag := + exifcommon.NewIfdTag( + ¤tIfdTag, + childItii.TagId, + childItii.Name) + + iiChild := thisIb.IfdIdentity().NewChild(childIfdTag, 0) + + foundChild = + NewIfdBuilder( + thisIb.ifdMapping, + thisIb.tagIndex, + iiChild, + thisIb.byteOrder) + + err = thisIb.AddChildIb(foundChild) + log.PanicIf(err) + } + + finalIb, err := getOrCreateIbFromRootIbInner(foundChild, thisIb, currentLineage[1:]) + log.PanicIf(err) + + return finalIb, nil +} + +// GetOrCreateIbFromRootIb returns an IB representing the requested IFD, even if +// an IB doesn't already exist for it. This function may call itself +// recursively. +func GetOrCreateIbFromRootIb(rootIb *IfdBuilder, fqIfdPath string) (ib *IfdBuilder, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // lineage is a necessity of our recursion process. It doesn't include any + // parent IFDs on its left-side; it starts with the current IB only. + lineage, err := rootIb.ifdMapping.ResolvePath(fqIfdPath) + log.PanicIf(err) + + ib, err = getOrCreateIbFromRootIbInner(rootIb, nil, lineage) + log.PanicIf(err) + + return ib, nil +} + +func (ib *IfdBuilder) String() string { + nextIfdPhrase := "" + if ib.nextIb != nil { + // TODO(dustin): We were setting this to ii.String(), but we were getting hex-data when printing this after building from an existing chain. + nextIfdPhrase = ib.nextIb.IfdIdentity().UnindexedString() + } + + return fmt.Sprintf("IfdBuilder", ib.IfdIdentity().UnindexedString(), ib.IfdIdentity().TagId(), len(ib.tags), ib.existingOffset, nextIfdPhrase) +} + +func (ib *IfdBuilder) Tags() (tags []*BuilderTag) { + return ib.tags +} + +// SetThumbnail sets thumbnail data. +// +// NOTES: +// +// - We don't manage any facet of the thumbnail data. This is the +// responsibility of the user/developer. +// - This method will fail unless the thumbnail is set on a the root IFD. +// However, in order to be valid, it must be set on the second one, linked to +// by the first, as per the EXIF/TIFF specification. +// - We set the offset to (0) now but will allocate the data and properly assign +// the offset when the IB is encoded (later). +func (ib *IfdBuilder) SetThumbnail(data []byte) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if ib.IfdIdentity().UnindexedString() != exifcommon.IfdStandardIfdIdentity.UnindexedString() { + log.Panicf("thumbnails can only go into a root Ifd (and only the second one)") + } + + // TODO(dustin): !! Add a test for this function. + + if data == nil || len(data) == 0 { + log.Panic("thumbnail is empty") + } + + ib.thumbnailData = data + + ibtvfb := NewIfdBuilderTagValueFromBytes(ib.thumbnailData) + offsetBt := + NewBuilderTag( + ib.IfdIdentity().UnindexedString(), + ThumbnailOffsetTagId, + exifcommon.TypeLong, + ibtvfb, + ib.byteOrder) + + err = ib.Set(offsetBt) + log.PanicIf(err) + + thumbnailSizeIt, err := ib.tagIndex.Get(ib.IfdIdentity(), ThumbnailSizeTagId) + log.PanicIf(err) + + sizeBt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), thumbnailSizeIt, ib.byteOrder, []uint32{uint32(len(ib.thumbnailData))}) + + err = ib.Set(sizeBt) + log.PanicIf(err) + + return nil +} + +func (ib *IfdBuilder) Thumbnail() []byte { + return ib.thumbnailData +} + +func (ib *IfdBuilder) printTagTree(levels int) { + indent := strings.Repeat(" ", levels*2) + + i := 0 + for currentIb := ib; currentIb != nil; currentIb = currentIb.nextIb { + prefix := " " + if i > 0 { + prefix = ">" + } + + if levels == 0 { + fmt.Printf("%s%sIFD: %s INDEX=(%d)\n", indent, prefix, currentIb, i) + } else { + fmt.Printf("%s%sChild IFD: %s\n", indent, prefix, currentIb) + } + + if len(currentIb.tags) > 0 { + fmt.Printf("\n") + + for i, tag := range currentIb.tags { + isChildIb := false + _, err := ib.ifdMapping.GetChild(currentIb.IfdIdentity().UnindexedString(), tag.tagId) + if err == nil { + isChildIb = true + } else if log.Is(err, exifcommon.ErrChildIfdNotMapped) == false { + log.Panic(err) + } + + tagName := "" + + // If a normal tag (not a child IFD) get the name. + if isChildIb == true { + tagName = "" + } else { + it, err := ib.tagIndex.Get(ib.ifdIdentity, tag.tagId) + if log.Is(err, ErrTagNotFound) == true { + tagName = "" + } else if err != nil { + log.Panic(err) + } else { + tagName = it.Name + } + } + + value := tag.Value() + + if value.IsIb() == true { + fmt.Printf("%s (%d): [%s] %s\n", indent, i, tagName, value.Ib()) + } else { + fmt.Printf("%s (%d): [%s] %s\n", indent, i, tagName, tag) + } + + if isChildIb == true { + if tag.value.IsIb() == false { + log.Panicf("tag-ID (0x%04x) is an IFD but the tag value is not an IB instance: %v", tag.tagId, tag) + } + + fmt.Printf("\n") + + childIb := tag.value.Ib() + childIb.printTagTree(levels + 1) + } + } + + fmt.Printf("\n") + } + + i++ + } +} + +func (ib *IfdBuilder) PrintTagTree() { + ib.printTagTree(0) +} + +func (ib *IfdBuilder) printIfdTree(levels int) { + indent := strings.Repeat(" ", levels*2) + + i := 0 + for currentIb := ib; currentIb != nil; currentIb = currentIb.nextIb { + prefix := " " + if i > 0 { + prefix = ">" + } + + fmt.Printf("%s%s%s\n", indent, prefix, currentIb) + + if len(currentIb.tags) > 0 { + for _, tag := range currentIb.tags { + isChildIb := false + _, err := ib.ifdMapping.GetChild(currentIb.IfdIdentity().UnindexedString(), tag.tagId) + if err == nil { + isChildIb = true + } else if log.Is(err, exifcommon.ErrChildIfdNotMapped) == false { + log.Panic(err) + } + + if isChildIb == true { + if tag.value.IsIb() == false { + log.Panicf("tag-ID (0x%04x) is an IFD but the tag value is not an IB instance: %v", tag.tagId, tag) + } + + childIb := tag.value.Ib() + childIb.printIfdTree(levels + 1) + } + } + } + + i++ + } +} + +func (ib *IfdBuilder) PrintIfdTree() { + ib.printIfdTree(0) +} + +func (ib *IfdBuilder) dumpToStrings(thisIb *IfdBuilder, prefix string, tagId uint16, lines []string) (linesOutput []string) { + if lines == nil { + linesOutput = make([]string, 0) + } else { + linesOutput = lines + } + + siblingIfdIndex := 0 + for ; thisIb != nil; thisIb = thisIb.nextIb { + line := fmt.Sprintf("IFD", prefix, thisIb.IfdIdentity().String(), siblingIfdIndex, thisIb.IfdIdentity().TagId(), tagId) + linesOutput = append(linesOutput, line) + + for i, tag := range thisIb.tags { + var childIb *IfdBuilder + childIfdName := "" + if tag.value.IsIb() == true { + childIb = tag.value.Ib() + childIfdName = childIb.IfdIdentity().UnindexedString() + } + + line := fmt.Sprintf("TAG", prefix, thisIb.IfdIdentity().String(), thisIb.IfdIdentity().TagId(), childIfdName, i, tag.tagId) + linesOutput = append(linesOutput, line) + + if childIb == nil { + continue + } + + childPrefix := "" + if prefix == "" { + childPrefix = fmt.Sprintf("%s", thisIb.IfdIdentity().UnindexedString()) + } else { + childPrefix = fmt.Sprintf("%s->%s", prefix, thisIb.IfdIdentity().UnindexedString()) + } + + linesOutput = thisIb.dumpToStrings(childIb, childPrefix, tag.tagId, linesOutput) + } + + siblingIfdIndex++ + } + + return linesOutput +} + +func (ib *IfdBuilder) DumpToStrings() (lines []string) { + return ib.dumpToStrings(ib, "", 0, lines) +} + +func (ib *IfdBuilder) SetNextIb(nextIb *IfdBuilder) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ib.nextIb = nextIb + + return nil +} + +func (ib *IfdBuilder) DeleteN(tagId uint16, n int) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if n < 1 { + log.Panicf("N must be at least 1: (%d)", n) + } + + for n > 0 { + j := -1 + for i, bt := range ib.tags { + if bt.tagId == tagId { + j = i + break + } + } + + if j == -1 { + log.Panic(ErrTagEntryNotFound) + } + + ib.tags = append(ib.tags[:j], ib.tags[j+1:]...) + n-- + } + + return nil +} + +func (ib *IfdBuilder) DeleteFirst(tagId uint16) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + err = ib.DeleteN(tagId, 1) + log.PanicIf(err) + + return nil +} + +func (ib *IfdBuilder) DeleteAll(tagId uint16) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + for { + err = ib.DeleteN(tagId, 1) + if log.Is(err, ErrTagEntryNotFound) == true { + break + } else if err != nil { + log.Panic(err) + } + + n++ + } + + return n, nil +} + +func (ib *IfdBuilder) ReplaceAt(position int, bt *BuilderTag) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if position < 0 { + log.Panicf("replacement position must be 0 or greater") + } else if position >= len(ib.tags) { + log.Panicf("replacement position does not exist") + } + + ib.tags[position] = bt + + return nil +} + +func (ib *IfdBuilder) Replace(tagId uint16, bt *BuilderTag) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + position, err := ib.Find(tagId) + log.PanicIf(err) + + ib.tags[position] = bt + + return nil +} + +// Set will add a new entry or update an existing entry. +func (ib *IfdBuilder) Set(bt *BuilderTag) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + position, err := ib.Find(bt.tagId) + if err == nil { + ib.tags[position] = bt + } else if log.Is(err, ErrTagEntryNotFound) == true { + err = ib.add(bt) + log.PanicIf(err) + } else { + log.Panic(err) + } + + return nil +} + +func (ib *IfdBuilder) FindN(tagId uint16, maxFound int) (found []int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + found = make([]int, 0) + + for i, bt := range ib.tags { + if bt.tagId == tagId { + found = append(found, i) + if maxFound == 0 || len(found) >= maxFound { + break + } + } + } + + return found, nil +} + +func (ib *IfdBuilder) Find(tagId uint16) (position int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + found, err := ib.FindN(tagId, 1) + log.PanicIf(err) + + if len(found) == 0 { + log.Panic(ErrTagEntryNotFound) + } + + return found[0], nil +} + +func (ib *IfdBuilder) FindTag(tagId uint16) (bt *BuilderTag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + found, err := ib.FindN(tagId, 1) + log.PanicIf(err) + + if len(found) == 0 { + log.Panic(ErrTagEntryNotFound) + } + + position := found[0] + + return ib.tags[position], nil +} + +func (ib *IfdBuilder) FindTagWithName(tagName string) (bt *BuilderTag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + it, err := ib.tagIndex.GetWithName(ib.IfdIdentity(), tagName) + log.PanicIf(err) + + found, err := ib.FindN(it.Id, 1) + log.PanicIf(err) + + if len(found) == 0 { + log.Panic(ErrTagEntryNotFound) + } + + position := found[0] + + return ib.tags[position], nil +} + +func (ib *IfdBuilder) add(bt *BuilderTag) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if bt.ifdPath == "" { + log.Panicf("BuilderTag ifdPath is not set: %s", bt) + } else if bt.typeId == 0x0 { + log.Panicf("BuilderTag type-ID is not set: %s", bt) + } else if bt.value == nil { + log.Panicf("BuilderTag value is not set: %s", bt) + } + + ib.tags = append(ib.tags, bt) + return nil +} + +func (ib *IfdBuilder) Add(bt *BuilderTag) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if bt.value.IsIb() == true { + log.Panicf("child IfdBuilders must be added via AddChildIb() or AddTagsFromExisting(), not Add()") + } + + err = ib.add(bt) + log.PanicIf(err) + + return nil +} + +// AddChildIb adds a tag that branches to a new IFD. +func (ib *IfdBuilder) AddChildIb(childIb *IfdBuilder) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if childIb.IfdIdentity().TagId() == 0 { + log.Panicf("IFD can not be used as a child IFD (not associated with a tag-ID): %v", childIb) + } else if childIb.byteOrder != ib.byteOrder { + log.Panicf("Child IFD does not have the same byte-order: [%s] != [%s]", childIb.byteOrder, ib.byteOrder) + } + + // Since no standard IFDs supports occur`ring more than once, check that a + // tag of this type has not been previously added. Note that we just search + // the current IFD and *not every* IFD. + for _, bt := range childIb.tags { + if bt.tagId == childIb.IfdIdentity().TagId() { + log.Panicf("child-IFD already added: %v", childIb.IfdIdentity().UnindexedString()) + } + } + + bt := ib.NewBuilderTagFromBuilder(childIb) + ib.tags = append(ib.tags, bt) + + return nil +} + +func (ib *IfdBuilder) NewBuilderTagFromBuilder(childIb *IfdBuilder) (bt *BuilderTag) { + defer func() { + if state := recover(); state != nil { + err := log.Wrap(state.(error)) + log.Panic(err) + } + }() + + value := NewIfdBuilderTagValueFromIfdBuilder(childIb) + + bt = NewChildIfdBuilderTag( + ib.IfdIdentity().UnindexedString(), + childIb.IfdIdentity().TagId(), + value) + + return bt +} + +// AddTagsFromExisting does a verbatim copy of the entries in `ifd` to this +// builder. It excludes child IFDs. These must be added explicitly via +// `AddChildIb()`. +func (ib *IfdBuilder) AddTagsFromExisting(ifd *Ifd, includeTagIds []uint16, excludeTagIds []uint16) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + thumbnailData, err := ifd.Thumbnail() + if err == nil { + err = ib.SetThumbnail(thumbnailData) + log.PanicIf(err) + } else if log.Is(err, ErrNoThumbnail) == false { + log.Panic(err) + } + + for i, ite := range ifd.Entries() { + if ite.IsThumbnailOffset() == true || ite.IsThumbnailSize() { + // These will be added on-the-fly when we encode. + continue + } + + if excludeTagIds != nil && len(excludeTagIds) > 0 { + found := false + for _, excludedTagId := range excludeTagIds { + if excludedTagId == ite.TagId() { + found = true + } + } + + if found == true { + continue + } + } + + if includeTagIds != nil && len(includeTagIds) > 0 { + // Whether or not there was a list of excludes, if there is a list + // of includes than the current tag has to be in it. + + found := false + for _, includedTagId := range includeTagIds { + if includedTagId == ite.TagId() { + found = true + break + } + } + + if found == false { + continue + } + } + + var bt *BuilderTag + + if ite.ChildIfdPath() != "" { + // If we want to add an IFD tag, we'll have to build it first and + // *then* add it via a different method. + + // Figure out which of the child-IFDs that are associated with + // this IFD represents this specific child IFD. + + var childIfd *Ifd + for _, thisChildIfd := range ifd.Children() { + if thisChildIfd.ParentTagIndex() != i { + continue + } else if thisChildIfd.ifdIdentity.TagId() != 0xffff && thisChildIfd.ifdIdentity.TagId() != ite.TagId() { + log.Panicf("child-IFD tag is not correct: TAG-POSITION=(%d) ITE=%s CHILD-IFD=%s", thisChildIfd.ParentTagIndex(), ite, thisChildIfd) + } + + childIfd = thisChildIfd + break + } + + if childIfd == nil { + childTagIds := make([]string, len(ifd.Children())) + for j, childIfd := range ifd.Children() { + childTagIds[j] = fmt.Sprintf("0x%04x (parent tag-position %d)", childIfd.ifdIdentity.TagId(), childIfd.ParentTagIndex()) + } + + log.Panicf("could not find child IFD for child ITE: IFD-PATH=[%s] TAG-ID=(0x%04x) CURRENT-TAG-POSITION=(%d) CHILDREN=%v", ite.IfdPath(), ite.TagId(), i, childTagIds) + } + + childIb := NewIfdBuilderFromExistingChain(childIfd) + bt = ib.NewBuilderTagFromBuilder(childIb) + } else { + // Non-IFD tag. + + rawBytes, err := ite.GetRawBytes() + log.PanicIf(err) + + value := NewIfdBuilderTagValueFromBytes(rawBytes) + + bt = NewBuilderTag( + ifd.ifdIdentity.UnindexedString(), + ite.TagId(), + ite.TagType(), + value, + ib.byteOrder) + } + + err := ib.add(bt) + log.PanicIf(err) + } + + return nil +} + +// AddStandard quickly and easily composes and adds the tag using the +// information already known about a tag. Only works with standard tags. +func (ib *IfdBuilder) AddStandard(tagId uint16, value interface{}) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + it, err := ib.tagIndex.Get(ib.IfdIdentity(), tagId) + log.PanicIf(err) + + bt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), it, ib.byteOrder, value) + + err = ib.add(bt) + log.PanicIf(err) + + return nil +} + +// AddStandardWithName quickly and easily composes and adds the tag using the +// information already known about a tag (using the name). Only works with +// standard tags. +func (ib *IfdBuilder) AddStandardWithName(tagName string, value interface{}) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + it, err := ib.tagIndex.GetWithName(ib.IfdIdentity(), tagName) + log.PanicIf(err) + + bt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), it, ib.byteOrder, value) + + err = ib.add(bt) + log.PanicIf(err) + + return nil +} + +// SetStandard quickly and easily composes and adds or replaces the tag using +// the information already known about a tag. Only works with standard tags. +func (ib *IfdBuilder) SetStandard(tagId uint16, value interface{}) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): !! Add test for this function. + + it, err := ib.tagIndex.Get(ib.IfdIdentity(), tagId) + log.PanicIf(err) + + bt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), it, ib.byteOrder, value) + + i, err := ib.Find(tagId) + if err != nil { + if log.Is(err, ErrTagEntryNotFound) == false { + log.Panic(err) + } + + ib.tags = append(ib.tags, bt) + } else { + ib.tags[i] = bt + } + + return nil +} + +// SetStandardWithName quickly and easily composes and adds or replaces the +// tag using the information already known about a tag (using the name). Only +// works with standard tags. +func (ib *IfdBuilder) SetStandardWithName(tagName string, value interface{}) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): !! Add test for this function. + + it, err := ib.tagIndex.GetWithName(ib.IfdIdentity(), tagName) + log.PanicIf(err) + + bt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), it, ib.byteOrder, value) + + i, err := ib.Find(bt.tagId) + if err != nil { + if log.Is(err, ErrTagEntryNotFound) == false { + log.Panic(err) + } + + ib.tags = append(ib.tags, bt) + } else { + ib.tags[i] = bt + } + + return nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/ifd_builder_encode.go b/vendor/github.com/dsoprea/go-exif/v3/ifd_builder_encode.go new file mode 100644 index 000000000..a0f4ff79c --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/ifd_builder_encode.go @@ -0,0 +1,532 @@ +package exif + +import ( + "bytes" + "fmt" + "strings" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +const ( + // Tag-ID + Tag-Type + Unit-Count + Value/Offset. + IfdTagEntrySize = uint32(2 + 2 + 4 + 4) +) + +type ByteWriter struct { + b *bytes.Buffer + byteOrder binary.ByteOrder +} + +func NewByteWriter(b *bytes.Buffer, byteOrder binary.ByteOrder) (bw *ByteWriter) { + return &ByteWriter{ + b: b, + byteOrder: byteOrder, + } +} + +func (bw ByteWriter) writeAsBytes(value interface{}) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + err = binary.Write(bw.b, bw.byteOrder, value) + log.PanicIf(err) + + return nil +} + +func (bw ByteWriter) WriteUint32(value uint32) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + err = bw.writeAsBytes(value) + log.PanicIf(err) + + return nil +} + +func (bw ByteWriter) WriteUint16(value uint16) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + err = bw.writeAsBytes(value) + log.PanicIf(err) + + return nil +} + +func (bw ByteWriter) WriteFourBytes(value []byte) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + len_ := len(value) + if len_ != 4 { + log.Panicf("value is not four-bytes: (%d)", len_) + } + + _, err = bw.b.Write(value) + log.PanicIf(err) + + return nil +} + +// ifdOffsetIterator keeps track of where the next IFD should be written by +// keeping track of where the offsets start, the data that has been added, and +// bumping the offset *when* the data is added. +type ifdDataAllocator struct { + offset uint32 + b bytes.Buffer +} + +func newIfdDataAllocator(ifdDataAddressableOffset uint32) *ifdDataAllocator { + return &ifdDataAllocator{ + offset: ifdDataAddressableOffset, + } +} + +func (ida *ifdDataAllocator) Allocate(value []byte) (offset uint32, err error) { + _, err = ida.b.Write(value) + log.PanicIf(err) + + offset = ida.offset + ida.offset += uint32(len(value)) + + return offset, nil +} + +func (ida *ifdDataAllocator) NextOffset() uint32 { + return ida.offset +} + +func (ida *ifdDataAllocator) Bytes() []byte { + return ida.b.Bytes() +} + +// IfdByteEncoder converts an IB to raw bytes (for writing) while also figuring +// out all of the allocations and indirection that is required for extended +// data. +type IfdByteEncoder struct { + // journal holds a list of actions taken while encoding. + journal [][3]string +} + +func NewIfdByteEncoder() (ibe *IfdByteEncoder) { + return &IfdByteEncoder{ + journal: make([][3]string, 0), + } +} + +func (ibe *IfdByteEncoder) Journal() [][3]string { + return ibe.journal +} + +func (ibe *IfdByteEncoder) TableSize(entryCount int) uint32 { + // Tag-Count + (Entry-Size * Entry-Count) + Next-IFD-Offset. + return uint32(2) + (IfdTagEntrySize * uint32(entryCount)) + uint32(4) +} + +func (ibe *IfdByteEncoder) pushToJournal(where, direction, format string, args ...interface{}) { + event := [3]string{ + direction, + where, + fmt.Sprintf(format, args...), + } + + ibe.journal = append(ibe.journal, event) +} + +// PrintJournal prints a hierarchical representation of the steps taken during +// encoding. +func (ibe *IfdByteEncoder) PrintJournal() { + maxWhereLength := 0 + for _, event := range ibe.journal { + where := event[1] + + len_ := len(where) + if len_ > maxWhereLength { + maxWhereLength = len_ + } + } + + level := 0 + for i, event := range ibe.journal { + direction := event[0] + where := event[1] + message := event[2] + + if direction != ">" && direction != "<" && direction != "-" { + log.Panicf("journal operation not valid: [%s]", direction) + } + + if direction == "<" { + if level <= 0 { + log.Panicf("journal operations unbalanced (too many closes)") + } + + level-- + } + + indent := strings.Repeat(" ", level) + + fmt.Printf("%3d %s%s %s: %s\n", i, indent, direction, where, message) + + if direction == ">" { + level++ + } + } + + if level != 0 { + log.Panicf("journal operations unbalanced (too many opens)") + } +} + +// encodeTagToBytes encodes the given tag to a byte stream. If +// `nextIfdOffsetToWrite` is more than (0), recurse into child IFDs +// (`nextIfdOffsetToWrite` is required in order for them to know where the its +// IFD data will be written, in order for them to know the offset of where +// their allocated-data block will start, which follows right behind). +func (ibe *IfdByteEncoder) encodeTagToBytes(ib *IfdBuilder, bt *BuilderTag, bw *ByteWriter, ida *ifdDataAllocator, nextIfdOffsetToWrite uint32) (childIfdBlock []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // Write tag-ID. + err = bw.WriteUint16(bt.tagId) + log.PanicIf(err) + + // Works for both values and child IFDs (which have an official size of + // LONG). + err = bw.WriteUint16(uint16(bt.typeId)) + log.PanicIf(err) + + // Write unit-count. + + if bt.value.IsBytes() == true { + effectiveType := bt.typeId + if bt.typeId == exifcommon.TypeUndefined { + effectiveType = exifcommon.TypeByte + } + + // It's a non-unknown value.Calculate the count of values of + // the type that we're writing and the raw bytes for the whole list. + + typeSize := uint32(effectiveType.Size()) + + valueBytes := bt.value.Bytes() + + len_ := len(valueBytes) + unitCount := uint32(len_) / typeSize + + if _, found := tagsWithoutAlignment[bt.tagId]; found == false { + remainder := uint32(len_) % typeSize + + if remainder > 0 { + log.Panicf("tag (0x%04x) value of (%d) bytes not evenly divisible by type-size (%d)", bt.tagId, len_, typeSize) + } + } + + err = bw.WriteUint32(unitCount) + log.PanicIf(err) + + // Write four-byte value/offset. + + if len_ > 4 { + offset, err := ida.Allocate(valueBytes) + log.PanicIf(err) + + err = bw.WriteUint32(offset) + log.PanicIf(err) + } else { + fourBytes := make([]byte, 4) + copy(fourBytes, valueBytes) + + err = bw.WriteFourBytes(fourBytes) + log.PanicIf(err) + } + } else { + if bt.value.IsIb() == false { + log.Panicf("tag value is not a byte-slice but also not a child IB: %v", bt) + } + + // Write unit-count (one LONG representing one offset). + err = bw.WriteUint32(1) + log.PanicIf(err) + + if nextIfdOffsetToWrite > 0 { + var err error + + ibe.pushToJournal("encodeTagToBytes", ">", "[%s]->[%s]", ib.IfdIdentity().UnindexedString(), bt.value.Ib().IfdIdentity().UnindexedString()) + + // Create the block of IFD data and everything it requires. + childIfdBlock, err = ibe.encodeAndAttachIfd(bt.value.Ib(), nextIfdOffsetToWrite) + log.PanicIf(err) + + ibe.pushToJournal("encodeTagToBytes", "<", "[%s]->[%s]", bt.value.Ib().IfdIdentity().UnindexedString(), ib.IfdIdentity().UnindexedString()) + + // Use the next-IFD offset for it. The IFD will actually get + // attached after we return. + err = bw.WriteUint32(nextIfdOffsetToWrite) + log.PanicIf(err) + + } else { + // No child-IFDs are to be allocated. Finish the entry with a NULL + // pointer. + + ibe.pushToJournal("encodeTagToBytes", "-", "*Not* descending to child: [%s]", bt.value.Ib().IfdIdentity().UnindexedString()) + + err = bw.WriteUint32(0) + log.PanicIf(err) + } + } + + return childIfdBlock, nil +} + +// encodeIfdToBytes encodes the given IB to a byte-slice. We are given the +// offset at which this IFD will be written. This method is used called both to +// pre-determine how big the table is going to be (so that we can calculate the +// address to allocate data at) as well as to write the final table. +// +// It is necessary to fully realize the table in order to predetermine its size +// because it is not enough to know the size of the table: If there are child +// IFDs, we will not be able to allocate them without first knowing how much +// data we need to allocate for the current IFD. +func (ibe *IfdByteEncoder) encodeIfdToBytes(ib *IfdBuilder, ifdAddressableOffset uint32, nextIfdOffsetToWrite uint32, setNextIb bool) (data []byte, tableSize uint32, dataSize uint32, childIfdSizes []uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ibe.pushToJournal("encodeIfdToBytes", ">", "%s", ib) + + tableSize = ibe.TableSize(len(ib.tags)) + + b := new(bytes.Buffer) + bw := NewByteWriter(b, ib.byteOrder) + + // Write tag count. + err = bw.WriteUint16(uint16(len(ib.tags))) + log.PanicIf(err) + + ida := newIfdDataAllocator(ifdAddressableOffset) + + childIfdBlocks := make([][]byte, 0) + + // Write raw bytes for each tag entry. Allocate larger data to be referred + // to in the follow-up data-block as required. Any "unknown"-byte tags that + // we can't parse will not be present here (using AddTagsFromExisting(), at + // least). + for _, bt := range ib.tags { + childIfdBlock, err := ibe.encodeTagToBytes(ib, bt, bw, ida, nextIfdOffsetToWrite) + log.PanicIf(err) + + if childIfdBlock != nil { + // We aren't allowed to have non-nil child IFDs if we're just + // sizing things up. + if nextIfdOffsetToWrite == 0 { + log.Panicf("no IFD offset provided for child-IFDs; no new child-IFDs permitted") + } + + nextIfdOffsetToWrite += uint32(len(childIfdBlock)) + childIfdBlocks = append(childIfdBlocks, childIfdBlock) + } + } + + dataBytes := ida.Bytes() + dataSize = uint32(len(dataBytes)) + + childIfdSizes = make([]uint32, len(childIfdBlocks)) + childIfdsTotalSize := uint32(0) + for i, childIfdBlock := range childIfdBlocks { + len_ := uint32(len(childIfdBlock)) + childIfdSizes[i] = len_ + childIfdsTotalSize += len_ + } + + // N the link from this IFD to the next IFD that will be written in the + // next cycle. + if setNextIb == true { + // Write address of next IFD in chain. This will be the original + // allocation offset plus the size of everything we have allocated for + // this IFD and its child-IFDs. + // + // It is critical that this number is stepped properly. We experienced + // an issue whereby it first looked like we were duplicating the IFD and + // then that we were duplicating the tags in the wrong IFD, and then + // finally we determined that the next-IFD offset for the first IFD was + // accidentally pointing back to the EXIF IFD, so we were visiting it + // twice when visiting through the tags after decoding. It was an + // expensive bug to find. + + ibe.pushToJournal("encodeIfdToBytes", "-", "Setting 'next' IFD to (0x%08x).", nextIfdOffsetToWrite) + + err := bw.WriteUint32(nextIfdOffsetToWrite) + log.PanicIf(err) + } else { + err := bw.WriteUint32(0) + log.PanicIf(err) + } + + _, err = b.Write(dataBytes) + log.PanicIf(err) + + // Append any child IFD blocks after our table and data blocks. These IFDs + // were equipped with the appropriate offset information so it's expected + // that all offsets referred to by these will be correct. + // + // Note that child-IFDs are append after the current IFD and before the + // next IFD, as opposed to the root IFDs, which are chained together but + // will be interrupted by these child-IFDs (which is expected, per the + // standard). + + for _, childIfdBlock := range childIfdBlocks { + _, err = b.Write(childIfdBlock) + log.PanicIf(err) + } + + ibe.pushToJournal("encodeIfdToBytes", "<", "%s", ib) + + return b.Bytes(), tableSize, dataSize, childIfdSizes, nil +} + +// encodeAndAttachIfd is a reentrant function that processes the IFD chain. +func (ibe *IfdByteEncoder) encodeAndAttachIfd(ib *IfdBuilder, ifdAddressableOffset uint32) (data []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ibe.pushToJournal("encodeAndAttachIfd", ">", "%s", ib) + + b := new(bytes.Buffer) + + i := 0 + + for thisIb := ib; thisIb != nil; thisIb = thisIb.nextIb { + + // Do a dry-run in order to pre-determine its size requirement. + + ibe.pushToJournal("encodeAndAttachIfd", ">", "Beginning encoding process: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString()) + + ibe.pushToJournal("encodeAndAttachIfd", ">", "Calculating size: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString()) + + _, tableSize, allocatedDataSize, _, err := ibe.encodeIfdToBytes(thisIb, ifdAddressableOffset, 0, false) + log.PanicIf(err) + + ibe.pushToJournal("encodeAndAttachIfd", "<", "Finished calculating size: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString()) + + ifdAddressableOffset += tableSize + nextIfdOffsetToWrite := ifdAddressableOffset + allocatedDataSize + + ibe.pushToJournal("encodeAndAttachIfd", ">", "Next IFD will be written at offset (0x%08x)", nextIfdOffsetToWrite) + + // Write our IFD as well as any child-IFDs (now that we know the offset + // where new IFDs and their data will be allocated). + + setNextIb := thisIb.nextIb != nil + + ibe.pushToJournal("encodeAndAttachIfd", ">", "Encoding starting: (%d) [%s] NEXT-IFD-OFFSET-TO-WRITE=(0x%08x)", i, thisIb.IfdIdentity().UnindexedString(), nextIfdOffsetToWrite) + + tableAndAllocated, effectiveTableSize, effectiveAllocatedDataSize, childIfdSizes, err := + ibe.encodeIfdToBytes(thisIb, ifdAddressableOffset, nextIfdOffsetToWrite, setNextIb) + + log.PanicIf(err) + + if effectiveTableSize != tableSize { + log.Panicf("written table size does not match the pre-calculated table size: (%d) != (%d) %s", effectiveTableSize, tableSize, ib) + } else if effectiveAllocatedDataSize != allocatedDataSize { + log.Panicf("written allocated-data size does not match the pre-calculated allocated-data size: (%d) != (%d) %s", effectiveAllocatedDataSize, allocatedDataSize, ib) + } + + ibe.pushToJournal("encodeAndAttachIfd", "<", "Encoding done: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString()) + + totalChildIfdSize := uint32(0) + for _, childIfdSize := range childIfdSizes { + totalChildIfdSize += childIfdSize + } + + if len(tableAndAllocated) != int(tableSize+allocatedDataSize+totalChildIfdSize) { + log.Panicf("IFD table and data is not a consistent size: (%d) != (%d)", len(tableAndAllocated), tableSize+allocatedDataSize+totalChildIfdSize) + } + + // TODO(dustin): We might want to verify the original tableAndAllocated length, too. + + _, err = b.Write(tableAndAllocated) + log.PanicIf(err) + + // Advance past what we've allocated, thus far. + + ifdAddressableOffset += allocatedDataSize + totalChildIfdSize + + ibe.pushToJournal("encodeAndAttachIfd", "<", "Finishing encoding process: (%d) [%s] [FINAL:] NEXT-IFD-OFFSET-TO-WRITE=(0x%08x)", i, ib.IfdIdentity().UnindexedString(), nextIfdOffsetToWrite) + + i++ + } + + ibe.pushToJournal("encodeAndAttachIfd", "<", "%s", ib) + + return b.Bytes(), nil +} + +// EncodeToExifPayload is the base encoding step that transcribes the entire IB +// structure to its on-disk layout. +func (ibe *IfdByteEncoder) EncodeToExifPayload(ib *IfdBuilder) (data []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + data, err = ibe.encodeAndAttachIfd(ib, ExifDefaultFirstIfdOffset) + log.PanicIf(err) + + return data, nil +} + +// EncodeToExif calls EncodeToExifPayload and then packages the result into a +// complete EXIF block. +func (ibe *IfdByteEncoder) EncodeToExif(ib *IfdBuilder) (data []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + encodedIfds, err := ibe.EncodeToExifPayload(ib) + log.PanicIf(err) + + // Wrap the IFD in a formal EXIF block. + + b := new(bytes.Buffer) + + headerBytes, err := BuildExifHeader(ib.byteOrder, ExifDefaultFirstIfdOffset) + log.PanicIf(err) + + _, err = b.Write(headerBytes) + log.PanicIf(err) + + _, err = b.Write(encodedIfds) + log.PanicIf(err) + + return b.Bytes(), nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/ifd_enumerate.go b/vendor/github.com/dsoprea/go-exif/v3/ifd_enumerate.go new file mode 100644 index 000000000..3167596ef --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/ifd_enumerate.go @@ -0,0 +1,1672 @@ +package exif + +import ( + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "time" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" + "github.com/dsoprea/go-exif/v3/undefined" +) + +var ( + ifdEnumerateLogger = log.NewLogger("exif.ifd_enumerate") +) + +var ( + // ErrNoThumbnail means that no thumbnail was found. + ErrNoThumbnail = errors.New("no thumbnail") + + // ErrNoGpsTags means that no GPS info was found. + ErrNoGpsTags = errors.New("no gps tags") + + // ErrTagTypeNotValid means that the tag-type is not valid. + ErrTagTypeNotValid = errors.New("tag type invalid") + + // ErrOffsetInvalid means that the file offset is not valid. + ErrOffsetInvalid = errors.New("file offset invalid") +) + +var ( + // ValidGpsVersions is the list of recognized EXIF GPS versions/signatures. + ValidGpsVersions = [][4]byte{ + // 2.0.0.0 appears to have a very similar format to 2.2.0.0, so enabling + // it under that assumption. + // + // IFD-PATH=[IFD] ID=(0x8825) NAME=[GPSTag] COUNT=(1) TYPE=[LONG] VALUE=[114] + // IFD-PATH=[IFD/GPSInfo] ID=(0x0000) NAME=[GPSVersionID] COUNT=(4) TYPE=[BYTE] VALUE=[02 00 00 00] + // IFD-PATH=[IFD/GPSInfo] ID=(0x0001) NAME=[GPSLatitudeRef] COUNT=(2) TYPE=[ASCII] VALUE=[S] + // IFD-PATH=[IFD/GPSInfo] ID=(0x0002) NAME=[GPSLatitude] COUNT=(3) TYPE=[RATIONAL] VALUE=[38/1...] + // IFD-PATH=[IFD/GPSInfo] ID=(0x0003) NAME=[GPSLongitudeRef] COUNT=(2) TYPE=[ASCII] VALUE=[E] + // IFD-PATH=[IFD/GPSInfo] ID=(0x0004) NAME=[GPSLongitude] COUNT=(3) TYPE=[RATIONAL] VALUE=[144/1...] + // IFD-PATH=[IFD/GPSInfo] ID=(0x0012) NAME=[GPSMapDatum] COUNT=(7) TYPE=[ASCII] VALUE=[WGS-84] + // + {2, 0, 0, 0}, + + {2, 2, 0, 0}, + + // Suddenly appeared at the default in 2.31: https://home.jeita.or.jp/tsc/std-pdf/CP-3451D.pdf + // + // Note that the presence of 2.3.0.0 doesn't seem to guarantee + // coordinates. In some cases, we seen just the following: + // + // GPS Tag Version |2.3.0.0 + // GPS Receiver Status |V + // Geodetic Survey Data|WGS-84 + // GPS Differential Cor|0 + // + {2, 3, 0, 0}, + } +) + +// byteParser knows how to decode an IFD and all of the tags it +// describes. +// +// The IFDs and the actual values can float throughout the EXIF block, but the +// IFD itself is just a minor header followed by a set of repeating, +// statically-sized records. So, the tags (though notnecessarily their values) +// are fairly simple to enumerate. +type byteParser struct { + byteOrder binary.ByteOrder + rs io.ReadSeeker + ifdOffset uint32 + currentOffset uint32 +} + +// newByteParser returns a new byteParser struct. +// +// initialOffset is for arithmetic-based tracking of where we should be at in +// the stream. +func newByteParser(rs io.ReadSeeker, byteOrder binary.ByteOrder, initialOffset uint32) (bp *byteParser, err error) { + // TODO(dustin): Add test + + bp = &byteParser{ + rs: rs, + byteOrder: byteOrder, + currentOffset: initialOffset, + } + + return bp, nil +} + +// getUint16 reads a uint16 and advances both our current and our current +// accumulator (which allows us to know how far to seek to the beginning of the +// next IFD when it's time to jump). +func (bp *byteParser) getUint16() (value uint16, raw []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + needBytes := 2 + + raw = make([]byte, needBytes) + + _, err = io.ReadFull(bp.rs, raw) + log.PanicIf(err) + + value = bp.byteOrder.Uint16(raw) + + bp.currentOffset += uint32(needBytes) + + return value, raw, nil +} + +// getUint32 reads a uint32 and advances both our current and our current +// accumulator (which allows us to know how far to seek to the beginning of the +// next IFD when it's time to jump). +func (bp *byteParser) getUint32() (value uint32, raw []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + needBytes := 4 + + raw = make([]byte, needBytes) + + _, err = io.ReadFull(bp.rs, raw) + log.PanicIf(err) + + value = bp.byteOrder.Uint32(raw) + + bp.currentOffset += uint32(needBytes) + + return value, raw, nil +} + +// CurrentOffset returns the starting offset but the number of bytes that we +// have parsed. This is arithmetic-based tracking, not a seek(0) operation. +func (bp *byteParser) CurrentOffset() uint32 { + return bp.currentOffset +} + +// IfdEnumerate is the main enumeration type. It knows how to parse the IFD +// containers in the EXIF blob. +type IfdEnumerate struct { + ebs ExifBlobSeeker + byteOrder binary.ByteOrder + tagIndex *TagIndex + ifdMapping *exifcommon.IfdMapping + furthestOffset uint32 + + visitedIfdOffsets map[uint32]struct{} +} + +// NewIfdEnumerate returns a new instance of IfdEnumerate. +func NewIfdEnumerate(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, ebs ExifBlobSeeker, byteOrder binary.ByteOrder) *IfdEnumerate { + return &IfdEnumerate{ + ebs: ebs, + byteOrder: byteOrder, + ifdMapping: ifdMapping, + tagIndex: tagIndex, + + visitedIfdOffsets: make(map[uint32]struct{}), + } +} + +func (ie *IfdEnumerate) getByteParser(ifdOffset uint32) (bp *byteParser, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + initialOffset := ExifAddressableAreaStart + ifdOffset + + rs, err := ie.ebs.GetReadSeeker(int64(initialOffset)) + log.PanicIf(err) + + bp, err = + newByteParser( + rs, + ie.byteOrder, + initialOffset) + + if err != nil { + if err == ErrOffsetInvalid { + return nil, err + } + + log.Panic(err) + } + + return bp, nil +} + +func (ie *IfdEnumerate) parseTag(ii *exifcommon.IfdIdentity, tagPosition int, bp *byteParser) (ite *IfdTagEntry, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + tagId, _, err := bp.getUint16() + log.PanicIf(err) + + tagTypeRaw, _, err := bp.getUint16() + log.PanicIf(err) + + tagType := exifcommon.TagTypePrimitive(tagTypeRaw) + + unitCount, _, err := bp.getUint32() + log.PanicIf(err) + + valueOffset, rawValueOffset, err := bp.getUint32() + log.PanicIf(err) + + // Check whether the embedded type indicator is valid. + + if tagType.IsValid() == false { + // Technically, we have the type on-file in the tags-index, but + // if the type stored alongside the data disagrees with it, + // which it apparently does, all bets are off. + ifdEnumerateLogger.Warningf(nil, + "Tag (0x%04x) in IFD [%s] at position (%d) has invalid type (0x%04x) and will be skipped.", + tagId, ii, tagPosition, int(tagType)) + + ite = &IfdTagEntry{ + tagId: tagId, + tagType: tagType, + } + + return ite, ErrTagTypeNotValid + } + + // Check whether the embedded type is listed among the supported types for + // the registered tag. If not, skip processing the tag. + + it, err := ie.tagIndex.Get(ii, tagId) + if err != nil { + if log.Is(err, ErrTagNotFound) == true { + ifdEnumerateLogger.Warningf(nil, "Tag (0x%04x) is not known and will be skipped.", tagId) + + ite = &IfdTagEntry{ + tagId: tagId, + } + + return ite, ErrTagNotFound + } + + log.Panic(err) + } + + // If we're trying to be as forgiving as possible then use whatever type was + // reported in the format. Otherwise, only accept a type that's expected for + // this tag. + if ie.tagIndex.UniversalSearch() == false && it.DoesSupportType(tagType) == false { + // The type in the stream disagrees with the type that this tag is + // expected to have. This can present issues with how we handle the + // special-case tags (e.g. thumbnails, GPS, etc..) when those tags + // suddenly have data that we no longer manipulate correctly/ + // accurately. + ifdEnumerateLogger.Warningf(nil, + "Tag (0x%04x) in IFD [%s] at position (%d) has unsupported type (0x%02x) and will be skipped.", + tagId, ii, tagPosition, int(tagType)) + + return nil, ErrTagTypeNotValid + } + + // Construct tag struct. + + rs, err := ie.ebs.GetReadSeeker(0) + log.PanicIf(err) + + ite = newIfdTagEntry( + ii, + tagId, + tagPosition, + tagType, + unitCount, + valueOffset, + rawValueOffset, + rs, + ie.byteOrder) + + ifdPath := ii.UnindexedString() + + // If it's an IFD but not a standard one, it'll just be seen as a LONG + // (the standard IFD tag type), later, unless we skip it because it's + // [likely] not even in the standard list of known tags. + mi, err := ie.ifdMapping.GetChild(ifdPath, tagId) + if err == nil { + currentIfdTag := ii.IfdTag() + + childIt := exifcommon.NewIfdTag(¤tIfdTag, tagId, mi.Name) + iiChild := ii.NewChild(childIt, 0) + ite.SetChildIfd(iiChild) + + // We also need to set `tag.ChildFqIfdPath` but can't do it here + // because we don't have the IFD index. + } else if log.Is(err, exifcommon.ErrChildIfdNotMapped) == false { + log.Panic(err) + } + + return ite, nil +} + +// TagVisitorFn is called for each tag when enumerating through the EXIF. +type TagVisitorFn func(ite *IfdTagEntry) (err error) + +// tagPostParse do some tag-level processing here following the parse of each. +func (ie *IfdEnumerate) tagPostParse(ite *IfdTagEntry, med *MiscellaneousExifData) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + ii := ite.IfdIdentity() + + tagId := ite.TagId() + tagType := ite.TagType() + + it, err := ie.tagIndex.Get(ii, tagId) + if err == nil { + ite.setTagName(it.Name) + } else { + if err != ErrTagNotFound { + log.Panic(err) + } + + // This is an unknown tag. + + originalBt := exifcommon.BasicTag{ + FqIfdPath: ii.String(), + IfdPath: ii.UnindexedString(), + TagId: tagId, + } + + if med != nil { + med.unknownTags[originalBt] = exifcommon.BasicTag{} + } + + utilityLogger.Debugf(nil, + "Tag (0x%04x) is not valid for IFD [%s]. Attempting secondary "+ + "lookup.", tagId, ii.String()) + + // This will overwrite the existing `it` and `err`. Since `FindFirst()` + // might generate different Errors than `Get()`, the log message above + // is import to try and mitigate confusion in that case. + it, err = ie.tagIndex.FindFirst(tagId, tagType, nil) + if err != nil { + if err != ErrTagNotFound { + log.Panic(err) + } + + // This is supposed to be a convenience function and if we were + // to keep the name empty or set it to some placeholder, it + // might be mismanaged by the package that is calling us. If + // they want to specifically manage these types of tags, they + // can use more advanced functionality to specifically -handle + // unknown tags. + utilityLogger.Warningf(nil, + "Tag with ID (0x%04x) in IFD [%s] is not recognized and "+ + "will be ignored.", tagId, ii.String()) + + return ErrTagNotFound + } + + ite.setTagName(it.Name) + + utilityLogger.Warningf(nil, + "Tag with ID (0x%04x) is not valid for IFD [%s], but it *is* "+ + "valid as tag [%s] under IFD [%s] and has the same type "+ + "[%s], so we will use that. This EXIF blob was probably "+ + "written by a buggy implementation.", + tagId, ii.UnindexedString(), it.Name, it.IfdPath, + tagType) + + if med != nil { + med.unknownTags[originalBt] = exifcommon.BasicTag{ + IfdPath: it.IfdPath, + TagId: tagId, + } + } + } + + // This is a known tag (from the standard, unless the user did + // something different). + + // Skip any tags that have a type that doesn't match the type in the + // index (which is loaded with the standard and accept tag + // information unless configured otherwise). + // + // We've run into multiple instances of the same tag, where a) no + // tag should ever be repeated, and b) all but one had an incorrect + // type and caused parsing/conversion woes. So, this is a quick fix + // for those scenarios. + if ie.tagIndex.UniversalSearch() == false && it.DoesSupportType(tagType) == false { + ifdEnumerateLogger.Warningf(nil, + "Skipping tag [%s] (0x%04x) [%s] with an unexpected type: %v ∉ %v", + ii.UnindexedString(), tagId, it.Name, + tagType, it.SupportedTypes) + + return ErrTagNotFound + } + + return nil +} + +// parseIfd decodes the IFD block that we're currently sitting on the first +// byte of. +func (ie *IfdEnumerate) parseIfd(ii *exifcommon.IfdIdentity, bp *byteParser, visitor TagVisitorFn, doDescend bool, med *MiscellaneousExifData) (nextIfdOffset uint32, entries []*IfdTagEntry, thumbnailData []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + tagCount, _, err := bp.getUint16() + log.PanicIf(err) + + ifdEnumerateLogger.Debugf(nil, "IFD [%s] tag-count: (%d)", ii.String(), tagCount) + + entries = make([]*IfdTagEntry, 0) + + var enumeratorThumbnailOffset *IfdTagEntry + var enumeratorThumbnailSize *IfdTagEntry + + for i := 0; i < int(tagCount); i++ { + ite, err := ie.parseTag(ii, i, bp) + if err != nil { + if log.Is(err, ErrTagNotFound) == true || log.Is(err, ErrTagTypeNotValid) == true { + // These tags should've been fully logged in parseTag(). The + // ITE returned is nil so we can't print anything about them, now. + continue + } + + log.Panic(err) + } + + err = ie.tagPostParse(ite, med) + if err == nil { + if err == ErrTagNotFound { + continue + } + + log.PanicIf(err) + } + + tagId := ite.TagId() + + if visitor != nil { + err := visitor(ite) + log.PanicIf(err) + } + + if ite.IsThumbnailOffset() == true { + ifdEnumerateLogger.Debugf(nil, "Skipping the thumbnail offset tag (0x%04x). Use accessors to get it or set it.", tagId) + + enumeratorThumbnailOffset = ite + entries = append(entries, ite) + + continue + } else if ite.IsThumbnailSize() == true { + ifdEnumerateLogger.Debugf(nil, "Skipping the thumbnail size tag (0x%04x). Use accessors to get it or set it.", tagId) + + enumeratorThumbnailSize = ite + entries = append(entries, ite) + + continue + } + + if ite.TagType() != exifcommon.TypeUndefined { + // If this tag's value is an offset, bump our max-offset value to + // what that offset is plus however large that value is. + + vc := ite.getValueContext() + + farOffset, err := vc.GetFarOffset() + if err == nil { + candidateOffset := farOffset + uint32(vc.SizeInBytes()) + if candidateOffset > ie.furthestOffset { + ie.furthestOffset = candidateOffset + } + } else if err != exifcommon.ErrNotFarValue { + log.PanicIf(err) + } + } + + // If it's an IFD but not a standard one, it'll just be seen as a LONG + // (the standard IFD tag type), later, unless we skip it because it's + // [likely] not even in the standard list of known tags. + if ite.ChildIfdPath() != "" { + if doDescend == true { + ifdEnumerateLogger.Debugf(nil, "Descending from IFD [%s] to IFD [%s].", ii, ite.ChildIfdPath()) + + currentIfdTag := ii.IfdTag() + + childIfdTag := + exifcommon.NewIfdTag( + ¤tIfdTag, + ite.TagId(), + ite.ChildIfdName()) + + iiChild := ii.NewChild(childIfdTag, 0) + + err := ie.scan(iiChild, ite.getValueOffset(), visitor, med) + log.PanicIf(err) + + ifdEnumerateLogger.Debugf(nil, "Ascending from IFD [%s] to IFD [%s].", ite.ChildIfdPath(), ii) + } + } + + entries = append(entries, ite) + } + + if enumeratorThumbnailOffset != nil && enumeratorThumbnailSize != nil { + thumbnailData, err = ie.parseThumbnail(enumeratorThumbnailOffset, enumeratorThumbnailSize) + if err != nil { + ifdEnumerateLogger.Errorf( + nil, err, + "We tried to bump our furthest-offset counter but there was an issue first seeking past the thumbnail.") + } else { + // In this case, the value is always an offset. + offset := enumeratorThumbnailOffset.getValueOffset() + + // This this case, the value is always a length. + length := enumeratorThumbnailSize.getValueOffset() + + ifdEnumerateLogger.Debugf(nil, "Found thumbnail in IFD [%s]. Its offset is (%d) and is (%d) bytes.", ii, offset, length) + + furthestOffset := offset + length + + if furthestOffset > ie.furthestOffset { + ie.furthestOffset = furthestOffset + } + } + } + + nextIfdOffset, _, err = bp.getUint32() + log.PanicIf(err) + + _, alreadyVisited := ie.visitedIfdOffsets[nextIfdOffset] + + if alreadyVisited == true { + ifdEnumerateLogger.Warningf(nil, "IFD at offset (0x%08x) has been linked-to more than once. There might be a cycle in the IFD chain. Not reparsing.", nextIfdOffset) + nextIfdOffset = 0 + } + + if nextIfdOffset != 0 { + ie.visitedIfdOffsets[nextIfdOffset] = struct{}{} + ifdEnumerateLogger.Debugf(nil, "[%s] Next IFD at offset: (0x%08x)", ii.String(), nextIfdOffset) + } else { + ifdEnumerateLogger.Debugf(nil, "[%s] IFD chain has terminated.", ii.String()) + } + + return nextIfdOffset, entries, thumbnailData, nil +} + +func (ie *IfdEnumerate) parseThumbnail(offsetIte, lengthIte *IfdTagEntry) (thumbnailData []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + vRaw, err := lengthIte.Value() + log.PanicIf(err) + + vList := vRaw.([]uint32) + if len(vList) != 1 { + log.Panicf("not exactly one long: (%d)", len(vList)) + } + + length := vList[0] + + // The tag is official a LONG type, but it's actually an offset to a blob of bytes. + offsetIte.updateTagType(exifcommon.TypeByte) + offsetIte.updateUnitCount(length) + + thumbnailData, err = offsetIte.GetRawBytes() + log.PanicIf(err) + + return thumbnailData, nil +} + +// scan parses and enumerates the different IFD blocks and invokes a visitor +// callback for each tag. No information is kept or returned. +func (ie *IfdEnumerate) scan(iiGeneral *exifcommon.IfdIdentity, ifdOffset uint32, visitor TagVisitorFn, med *MiscellaneousExifData) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + for ifdIndex := 0; ; ifdIndex++ { + iiSibling := iiGeneral.NewSibling(ifdIndex) + + ifdEnumerateLogger.Debugf(nil, "Parsing IFD [%s] at offset (0x%04x) (scan).", iiSibling.String(), ifdOffset) + + bp, err := ie.getByteParser(ifdOffset) + if err != nil { + if err == ErrOffsetInvalid { + ifdEnumerateLogger.Errorf(nil, nil, "IFD [%s] at offset (0x%04x) is unreachable. Terminating scan.", iiSibling.String(), ifdOffset) + break + } + + log.Panic(err) + } + + nextIfdOffset, _, _, err := ie.parseIfd(iiSibling, bp, visitor, true, med) + log.PanicIf(err) + + currentOffset := bp.CurrentOffset() + if currentOffset > ie.furthestOffset { + ie.furthestOffset = currentOffset + } + + if nextIfdOffset == 0 { + break + } + + ifdOffset = nextIfdOffset + } + + return nil +} + +// MiscellaneousExifData is reports additional data collected during the parse. +type MiscellaneousExifData struct { + // UnknownTags contains all tags that were invalid for their containing + // IFDs. The values represent alternative IFDs that were correctly matched + // to those tags and used instead. + unknownTags map[exifcommon.BasicTag]exifcommon.BasicTag +} + +// UnknownTags returns the unknown tags encountered during the scan. +func (med *MiscellaneousExifData) UnknownTags() map[exifcommon.BasicTag]exifcommon.BasicTag { + return med.unknownTags +} + +// ScanOptions tweaks parser behavior/choices. +type ScanOptions struct { + // NOTE(dustin): Reserved for future usage. +} + +// Scan enumerates the different EXIF blocks (called IFDs). `rootIfdName` will +// be "IFD" in the TIFF standard. +func (ie *IfdEnumerate) Scan(iiRoot *exifcommon.IfdIdentity, ifdOffset uint32, visitor TagVisitorFn, so *ScanOptions) (med *MiscellaneousExifData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + med = &MiscellaneousExifData{ + unknownTags: make(map[exifcommon.BasicTag]exifcommon.BasicTag), + } + + err = ie.scan(iiRoot, ifdOffset, visitor, med) + log.PanicIf(err) + + ifdEnumerateLogger.Debugf(nil, "Scan: It looks like the furthest offset that contained EXIF data in the EXIF blob was (%d) (Scan).", ie.FurthestOffset()) + + return med, nil +} + +// Ifd represents a single, parsed IFD. +type Ifd struct { + ifdIdentity *exifcommon.IfdIdentity + + ifdMapping *exifcommon.IfdMapping + tagIndex *TagIndex + + offset uint32 + byteOrder binary.ByteOrder + id int + + parentIfd *Ifd + + // ParentTagIndex is our tag position in the parent IFD, if we had a parent + // (if `ParentIfd` is not nil and we weren't an IFD referenced as a sibling + // instead of as a child). + parentTagIndex int + + entries []*IfdTagEntry + entriesByTagId map[uint16][]*IfdTagEntry + + children []*Ifd + childIfdIndex map[string]*Ifd + + thumbnailData []byte + + nextIfdOffset uint32 + nextIfd *Ifd +} + +// IfdIdentity returns IFD identity that this struct represents. +func (ifd *Ifd) IfdIdentity() *exifcommon.IfdIdentity { + return ifd.ifdIdentity +} + +// Entries returns a flat list of all tags for this IFD. +func (ifd *Ifd) Entries() []*IfdTagEntry { + + // TODO(dustin): Add test + + return ifd.entries +} + +// EntriesByTagId returns a map of all tags for this IFD. +func (ifd *Ifd) EntriesByTagId() map[uint16][]*IfdTagEntry { + + // TODO(dustin): Add test + + return ifd.entriesByTagId +} + +// Children returns a flat list of all child IFDs of this IFD. +func (ifd *Ifd) Children() []*Ifd { + + // TODO(dustin): Add test + + return ifd.children +} + +// ChildWithIfdPath returns a map of all child IFDs of this IFD. +func (ifd *Ifd) ChildIfdIndex() map[string]*Ifd { + + // TODO(dustin): Add test + + return ifd.childIfdIndex +} + +// ParentTagIndex returns the position of this IFD's tag in its parent IFD (*if* +// there is a parent). +func (ifd *Ifd) ParentTagIndex() int { + + // TODO(dustin): Add test + + return ifd.parentTagIndex +} + +// Offset returns the offset of the IFD in the stream. +func (ifd *Ifd) Offset() uint32 { + + // TODO(dustin): Add test + + return ifd.offset +} + +// Offset returns the offset of the IFD in the stream. +func (ifd *Ifd) ByteOrder() binary.ByteOrder { + + // TODO(dustin): Add test + + return ifd.byteOrder +} + +// NextIfd returns the Ifd struct for the next IFD in the chain. +func (ifd *Ifd) NextIfd() *Ifd { + + // TODO(dustin): Add test + + return ifd.nextIfd +} + +// ChildWithIfdPath returns an `Ifd` struct for the given child of the current +// IFD. +func (ifd *Ifd) ChildWithIfdPath(iiChild *exifcommon.IfdIdentity) (childIfd *Ifd, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): This is a bridge while we're introducing the IFD type-system. We should be able to use the (IfdIdentity).Equals() method for this. + ifdPath := iiChild.UnindexedString() + + for _, childIfd := range ifd.children { + if childIfd.ifdIdentity.UnindexedString() == ifdPath { + return childIfd, nil + } + } + + log.Panic(ErrTagNotFound) + return nil, nil +} + +// FindTagWithId returns a list of tags (usually just zero or one) that match +// the given tag ID. This is efficient. +func (ifd *Ifd) FindTagWithId(tagId uint16) (results []*IfdTagEntry, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + results, found := ifd.entriesByTagId[tagId] + if found != true { + log.Panic(ErrTagNotFound) + } + + return results, nil +} + +// FindTagWithName returns a list of tags (usually just zero or one) that match +// the given tag name. This is not efficient (though the labor is trivial). +func (ifd *Ifd) FindTagWithName(tagName string) (results []*IfdTagEntry, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + it, err := ifd.tagIndex.GetWithName(ifd.ifdIdentity, tagName) + if log.Is(err, ErrTagNotFound) == true { + log.Panic(ErrTagNotKnown) + } else if err != nil { + log.Panic(err) + } + + results = make([]*IfdTagEntry, 0) + for _, ite := range ifd.entries { + if ite.TagId() == it.Id { + results = append(results, ite) + } + } + + if len(results) == 0 { + log.Panic(ErrTagNotFound) + } + + return results, nil +} + +// String returns a description string. +func (ifd *Ifd) String() string { + parentOffset := uint32(0) + if ifd.parentIfd != nil { + parentOffset = ifd.parentIfd.offset + } + + return fmt.Sprintf("Ifd", ifd.id, ifd.ifdIdentity.UnindexedString(), ifd.ifdIdentity.Index(), len(ifd.entries), ifd.offset, len(ifd.children), parentOffset, ifd.nextIfdOffset) +} + +// Thumbnail returns the raw thumbnail bytes. This is typically directly +// readable by any standard image viewer. +func (ifd *Ifd) Thumbnail() (data []byte, err error) { + + if ifd.thumbnailData == nil { + return nil, ErrNoThumbnail + } + + return ifd.thumbnailData, nil +} + +// dumpTags recursively builds a list of tags from an IFD. +func (ifd *Ifd) dumpTags(tags []*IfdTagEntry) []*IfdTagEntry { + if tags == nil { + tags = make([]*IfdTagEntry, 0) + } + + // Now, print the tags while also descending to child-IFDS as we encounter them. + + ifdsFoundCount := 0 + + for _, ite := range ifd.entries { + tags = append(tags, ite) + + childIfdPath := ite.ChildIfdPath() + if childIfdPath != "" { + ifdsFoundCount++ + + childIfd, found := ifd.childIfdIndex[childIfdPath] + if found != true { + log.Panicf("alien child IFD referenced by a tag: [%s]", childIfdPath) + } + + tags = childIfd.dumpTags(tags) + } + } + + if len(ifd.children) != ifdsFoundCount { + log.Panicf("have one or more dangling child IFDs: (%d) != (%d)", len(ifd.children), ifdsFoundCount) + } + + if ifd.nextIfd != nil { + tags = ifd.nextIfd.dumpTags(tags) + } + + return tags +} + +// DumpTags prints the IFD hierarchy. +func (ifd *Ifd) DumpTags() []*IfdTagEntry { + return ifd.dumpTags(nil) +} + +func (ifd *Ifd) printTagTree(populateValues bool, index, level int, nextLink bool) { + indent := strings.Repeat(" ", level*2) + + prefix := " " + if nextLink { + prefix = ">" + } + + fmt.Printf("%s%sIFD: %s\n", indent, prefix, ifd) + + // Now, print the tags while also descending to child-IFDS as we encounter them. + + ifdsFoundCount := 0 + + for _, ite := range ifd.entries { + if ite.ChildIfdPath() != "" { + fmt.Printf("%s - TAG: %s\n", indent, ite) + } else { + // This will just add noise to the output (byte-tags are fully + // dumped). + if ite.IsThumbnailOffset() == true || ite.IsThumbnailSize() == true { + continue + } + + it, err := ifd.tagIndex.Get(ifd.ifdIdentity, ite.TagId()) + + tagName := "" + if err == nil { + tagName = it.Name + } + + var valuePhrase string + if populateValues == true { + var err error + + valuePhrase, err = ite.Format() + if err != nil { + if log.Is(err, exifcommon.ErrUnhandledUndefinedTypedTag) == true { + ifdEnumerateLogger.Warningf(nil, "Skipping non-standard undefined tag: [%s] (%04x)", ifd.ifdIdentity.UnindexedString(), ite.TagId()) + continue + } else if err == exifundefined.ErrUnparseableValue { + ifdEnumerateLogger.Warningf(nil, "Skipping unparseable undefined tag: [%s] (%04x) [%s]", ifd.ifdIdentity.UnindexedString(), ite.TagId(), it.Name) + continue + } + + log.Panic(err) + } + } else { + valuePhrase = "!UNRESOLVED" + } + + fmt.Printf("%s - TAG: %s NAME=[%s] VALUE=[%v]\n", indent, ite, tagName, valuePhrase) + } + + childIfdPath := ite.ChildIfdPath() + if childIfdPath != "" { + ifdsFoundCount++ + + childIfd, found := ifd.childIfdIndex[childIfdPath] + if found != true { + log.Panicf("alien child IFD referenced by a tag: [%s]", childIfdPath) + } + + childIfd.printTagTree(populateValues, 0, level+1, false) + } + } + + if len(ifd.children) != ifdsFoundCount { + log.Panicf("have one or more dangling child IFDs: (%d) != (%d)", len(ifd.children), ifdsFoundCount) + } + + if ifd.nextIfd != nil { + ifd.nextIfd.printTagTree(populateValues, index+1, level, true) + } +} + +// PrintTagTree prints the IFD hierarchy. +func (ifd *Ifd) PrintTagTree(populateValues bool) { + ifd.printTagTree(populateValues, 0, 0, false) +} + +func (ifd *Ifd) printIfdTree(level int, nextLink bool) { + indent := strings.Repeat(" ", level*2) + + prefix := " " + if nextLink { + prefix = ">" + } + + fmt.Printf("%s%s%s\n", indent, prefix, ifd) + + // Now, print the tags while also descending to child-IFDS as we encounter them. + + ifdsFoundCount := 0 + + for _, ite := range ifd.entries { + childIfdPath := ite.ChildIfdPath() + if childIfdPath != "" { + ifdsFoundCount++ + + childIfd, found := ifd.childIfdIndex[childIfdPath] + if found != true { + log.Panicf("alien child IFD referenced by a tag: [%s]", childIfdPath) + } + + childIfd.printIfdTree(level+1, false) + } + } + + if len(ifd.children) != ifdsFoundCount { + log.Panicf("have one or more dangling child IFDs: (%d) != (%d)", len(ifd.children), ifdsFoundCount) + } + + if ifd.nextIfd != nil { + ifd.nextIfd.printIfdTree(level, true) + } +} + +// PrintIfdTree prints the IFD hierarchy. +func (ifd *Ifd) PrintIfdTree() { + ifd.printIfdTree(0, false) +} + +func (ifd *Ifd) dumpTree(tagsDump []string, level int) []string { + if tagsDump == nil { + tagsDump = make([]string, 0) + } + + indent := strings.Repeat(" ", level*2) + + var ifdPhrase string + if ifd.parentIfd != nil { + ifdPhrase = fmt.Sprintf("[%s]->[%s]:(%d)", ifd.parentIfd.ifdIdentity.UnindexedString(), ifd.ifdIdentity.UnindexedString(), ifd.ifdIdentity.Index()) + } else { + ifdPhrase = fmt.Sprintf("[ROOT]->[%s]:(%d)", ifd.ifdIdentity.UnindexedString(), ifd.ifdIdentity.Index()) + } + + startBlurb := fmt.Sprintf("%s> IFD %s TOP", indent, ifdPhrase) + tagsDump = append(tagsDump, startBlurb) + + ifdsFoundCount := 0 + for _, ite := range ifd.entries { + tagsDump = append(tagsDump, fmt.Sprintf("%s - (0x%04x)", indent, ite.TagId())) + + childIfdPath := ite.ChildIfdPath() + if childIfdPath != "" { + ifdsFoundCount++ + + childIfd, found := ifd.childIfdIndex[childIfdPath] + if found != true { + log.Panicf("alien child IFD referenced by a tag: [%s]", childIfdPath) + } + + tagsDump = childIfd.dumpTree(tagsDump, level+1) + } + } + + if len(ifd.children) != ifdsFoundCount { + log.Panicf("have one or more dangling child IFDs: (%d) != (%d)", len(ifd.children), ifdsFoundCount) + } + + finishBlurb := fmt.Sprintf("%s< IFD %s BOTTOM", indent, ifdPhrase) + tagsDump = append(tagsDump, finishBlurb) + + if ifd.nextIfd != nil { + siblingBlurb := fmt.Sprintf("%s* LINKING TO SIBLING IFD [%s]:(%d)", indent, ifd.nextIfd.ifdIdentity.UnindexedString(), ifd.nextIfd.ifdIdentity.Index()) + tagsDump = append(tagsDump, siblingBlurb) + + tagsDump = ifd.nextIfd.dumpTree(tagsDump, level) + } + + return tagsDump +} + +// DumpTree returns a list of strings describing the IFD hierarchy. +func (ifd *Ifd) DumpTree() []string { + return ifd.dumpTree(nil, 0) +} + +// GpsInfo parses and consolidates the GPS info. This can only be called on the +// GPS IFD. +func (ifd *Ifd) GpsInfo() (gi *GpsInfo, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + gi = new(GpsInfo) + + if ifd.ifdIdentity.Equals(exifcommon.IfdGpsInfoStandardIfdIdentity) == false { + log.Panicf("GPS can only be read on GPS IFD: [%s]", ifd.ifdIdentity.UnindexedString()) + } + + if tags, found := ifd.entriesByTagId[TagGpsVersionId]; found == false { + // We've seen this. We'll just have to default to assuming we're in a + // 2.2.0.0 format. + ifdEnumerateLogger.Warningf(nil, "No GPS version tag (0x%04x) found.", TagGpsVersionId) + } else { + versionBytes, err := tags[0].GetRawBytes() + log.PanicIf(err) + + hit := false + for _, acceptedGpsVersion := range ValidGpsVersions { + if bytes.Compare(versionBytes, acceptedGpsVersion[:]) == 0 { + hit = true + break + } + } + + if hit != true { + ifdEnumerateLogger.Warningf(nil, "GPS version not supported: %v", versionBytes) + log.Panic(ErrNoGpsTags) + } + } + + tags, found := ifd.entriesByTagId[TagLatitudeId] + if found == false { + ifdEnumerateLogger.Warningf(nil, "latitude not found") + log.Panic(ErrNoGpsTags) + } + + latitudeValue, err := tags[0].Value() + log.PanicIf(err) + + // Look for whether North or South. + tags, found = ifd.entriesByTagId[TagLatitudeRefId] + if found == false { + ifdEnumerateLogger.Warningf(nil, "latitude-ref not found") + log.Panic(ErrNoGpsTags) + } + + latitudeRefValue, err := tags[0].Value() + log.PanicIf(err) + + tags, found = ifd.entriesByTagId[TagLongitudeId] + if found == false { + ifdEnumerateLogger.Warningf(nil, "longitude not found") + log.Panic(ErrNoGpsTags) + } + + longitudeValue, err := tags[0].Value() + log.PanicIf(err) + + // Look for whether West or East. + tags, found = ifd.entriesByTagId[TagLongitudeRefId] + if found == false { + ifdEnumerateLogger.Warningf(nil, "longitude-ref not found") + log.Panic(ErrNoGpsTags) + } + + longitudeRefValue, err := tags[0].Value() + log.PanicIf(err) + + // Parse location. + + latitudeRaw := latitudeValue.([]exifcommon.Rational) + + gi.Latitude, err = NewGpsDegreesFromRationals(latitudeRefValue.(string), latitudeRaw) + log.PanicIf(err) + + longitudeRaw := longitudeValue.([]exifcommon.Rational) + + gi.Longitude, err = NewGpsDegreesFromRationals(longitudeRefValue.(string), longitudeRaw) + log.PanicIf(err) + + // Parse altitude. + + altitudeTags, foundAltitude := ifd.entriesByTagId[TagAltitudeId] + altitudeRefTags, foundAltitudeRef := ifd.entriesByTagId[TagAltitudeRefId] + + if foundAltitude == true && foundAltitudeRef == true { + altitudePhrase, err := altitudeTags[0].Format() + log.PanicIf(err) + + ifdEnumerateLogger.Debugf(nil, "Altitude is [%s].", altitudePhrase) + + altitudeValue, err := altitudeTags[0].Value() + log.PanicIf(err) + + altitudeRefPhrase, err := altitudeRefTags[0].Format() + log.PanicIf(err) + + ifdEnumerateLogger.Debugf(nil, "Altitude-reference is [%s].", altitudeRefPhrase) + + altitudeRefValue, err := altitudeRefTags[0].Value() + log.PanicIf(err) + + altitudeRaw := altitudeValue.([]exifcommon.Rational) + if altitudeRaw[0].Denominator > 0 { + altitude := int(altitudeRaw[0].Numerator / altitudeRaw[0].Denominator) + + if altitudeRefValue.([]byte)[0] == 1 { + altitude *= -1 + } + + gi.Altitude = altitude + } + } + + // Parse timestamp from separate date and time tags. + + timestampTags, foundTimestamp := ifd.entriesByTagId[TagTimestampId] + datestampTags, foundDatestamp := ifd.entriesByTagId[TagDatestampId] + + if foundTimestamp == true && foundDatestamp == true { + datestampValue, err := datestampTags[0].Value() + log.PanicIf(err) + + datePhrase := datestampValue.(string) + ifdEnumerateLogger.Debugf(nil, "Date tag value is [%s].", datePhrase) + + // Normalize the separators. + datePhrase = strings.ReplaceAll(datePhrase, "-", ":") + + dateParts := strings.Split(datePhrase, ":") + + year, err1 := strconv.ParseUint(dateParts[0], 10, 16) + month, err2 := strconv.ParseUint(dateParts[1], 10, 8) + day, err3 := strconv.ParseUint(dateParts[2], 10, 8) + + if err1 == nil && err2 == nil && err3 == nil { + timestampValue, err := timestampTags[0].Value() + log.PanicIf(err) + + timePhrase, err := timestampTags[0].Format() + log.PanicIf(err) + + ifdEnumerateLogger.Debugf(nil, "Time tag value is [%s].", timePhrase) + + timestampRaw := timestampValue.([]exifcommon.Rational) + + hour := int(timestampRaw[0].Numerator / timestampRaw[0].Denominator) + minute := int(timestampRaw[1].Numerator / timestampRaw[1].Denominator) + second := int(timestampRaw[2].Numerator / timestampRaw[2].Denominator) + + gi.Timestamp = time.Date(int(year), time.Month(month), int(day), hour, minute, second, 0, time.UTC) + } + } + + return gi, nil +} + +// ParsedTagVisitor is a callback used if wanting to visit through all tags and +// child IFDs from the current IFD and going down. +type ParsedTagVisitor func(*Ifd, *IfdTagEntry) error + +// EnumerateTagsRecursively calls the given visitor function for every tag and +// IFD in the current IFD, recursively. +func (ifd *Ifd) EnumerateTagsRecursively(visitor ParsedTagVisitor) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + for ptr := ifd; ptr != nil; ptr = ptr.nextIfd { + for _, ite := range ifd.entries { + childIfdPath := ite.ChildIfdPath() + if childIfdPath != "" { + childIfd := ifd.childIfdIndex[childIfdPath] + + err := childIfd.EnumerateTagsRecursively(visitor) + log.PanicIf(err) + } else { + err := visitor(ifd, ite) + log.PanicIf(err) + } + } + } + + return nil +} + +// QueuedIfd is one IFD that has been identified but yet to be processed. +type QueuedIfd struct { + IfdIdentity *exifcommon.IfdIdentity + + Offset uint32 + Parent *Ifd + + // ParentTagIndex is our tag position in the parent IFD, if we had a parent + // (if `ParentIfd` is not nil and we weren't an IFD referenced as a sibling + // instead of as a child). + ParentTagIndex int +} + +// IfdIndex collects a bunch of IFD and tag information stored in several +// different ways in order to provide convenient lookups. +type IfdIndex struct { + RootIfd *Ifd + Ifds []*Ifd + Tree map[int]*Ifd + Lookup map[string]*Ifd +} + +// Collect enumerates the different EXIF blocks (called IFDs) and builds out an +// index struct for referencing all of the parsed data. +func (ie *IfdEnumerate) Collect(rootIfdOffset uint32) (index IfdIndex, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add MiscellaneousExifData to IfdIndex + + tree := make(map[int]*Ifd) + ifds := make([]*Ifd, 0) + lookup := make(map[string]*Ifd) + + queue := []QueuedIfd{ + { + IfdIdentity: exifcommon.IfdStandardIfdIdentity, + Offset: rootIfdOffset, + }, + } + + edges := make(map[uint32]*Ifd) + + for { + if len(queue) == 0 { + break + } + + qi := queue[0] + ii := qi.IfdIdentity + + offset := qi.Offset + parentIfd := qi.Parent + + queue = queue[1:] + + ifdEnumerateLogger.Debugf(nil, "Parsing IFD [%s] (%d) at offset (0x%04x) (Collect).", ii.String(), ii.Index(), offset) + + bp, err := ie.getByteParser(offset) + if err != nil { + if err == ErrOffsetInvalid { + return index, err + } + + log.Panic(err) + } + + // TODO(dustin): We don't need to pass the index in as a separate argument. Get from the II. + + nextIfdOffset, entries, thumbnailData, err := ie.parseIfd(ii, bp, nil, false, nil) + log.PanicIf(err) + + currentOffset := bp.CurrentOffset() + if currentOffset > ie.furthestOffset { + ie.furthestOffset = currentOffset + } + + id := len(ifds) + + entriesByTagId := make(map[uint16][]*IfdTagEntry) + for _, ite := range entries { + tagId := ite.TagId() + + tags, found := entriesByTagId[tagId] + if found == false { + tags = make([]*IfdTagEntry, 0) + } + + entriesByTagId[tagId] = append(tags, ite) + } + + ifd := &Ifd{ + ifdIdentity: ii, + + byteOrder: ie.byteOrder, + + id: id, + + parentIfd: parentIfd, + parentTagIndex: qi.ParentTagIndex, + + offset: offset, + entries: entries, + entriesByTagId: entriesByTagId, + + // This is populated as each child is processed. + children: make([]*Ifd, 0), + + nextIfdOffset: nextIfdOffset, + thumbnailData: thumbnailData, + + ifdMapping: ie.ifdMapping, + tagIndex: ie.tagIndex, + } + + // Add ourselves to a big list of IFDs. + ifds = append(ifds, ifd) + + // Install ourselves into a by-id lookup table (keys are unique). + tree[id] = ifd + + // Install into by-name buckets. + lookup[ii.String()] = ifd + + // Add a link from the previous IFD in the chain to us. + if previousIfd, found := edges[offset]; found == true { + previousIfd.nextIfd = ifd + } + + // Attach as a child to our parent (where we appeared as a tag in + // that IFD). + if parentIfd != nil { + parentIfd.children = append(parentIfd.children, ifd) + } + + // Determine if any of our entries is a child IFD and queue it. + for i, ite := range entries { + if ite.ChildIfdPath() == "" { + continue + } + + tagId := ite.TagId() + childIfdName := ite.ChildIfdName() + + currentIfdTag := ii.IfdTag() + + childIfdTag := + exifcommon.NewIfdTag( + ¤tIfdTag, + tagId, + childIfdName) + + iiChild := ii.NewChild(childIfdTag, 0) + + qi := QueuedIfd{ + IfdIdentity: iiChild, + + Offset: ite.getValueOffset(), + Parent: ifd, + ParentTagIndex: i, + } + + queue = append(queue, qi) + } + + // If there's another IFD in the chain. + if nextIfdOffset != 0 { + iiSibling := ii.NewSibling(ii.Index() + 1) + + // Allow the next link to know what the previous link was. + edges[nextIfdOffset] = ifd + + qi := QueuedIfd{ + IfdIdentity: iiSibling, + Offset: nextIfdOffset, + } + + queue = append(queue, qi) + } + } + + index.RootIfd = tree[0] + index.Ifds = ifds + index.Tree = tree + index.Lookup = lookup + + err = ie.setChildrenIndex(index.RootIfd) + log.PanicIf(err) + + ifdEnumerateLogger.Debugf(nil, "Collect: It looks like the furthest offset that contained EXIF data in the EXIF blob was (%d).", ie.FurthestOffset()) + + return index, nil +} + +func (ie *IfdEnumerate) setChildrenIndex(ifd *Ifd) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + childIfdIndex := make(map[string]*Ifd) + for _, childIfd := range ifd.children { + childIfdIndex[childIfd.ifdIdentity.UnindexedString()] = childIfd + } + + ifd.childIfdIndex = childIfdIndex + + for _, childIfd := range ifd.children { + err := ie.setChildrenIndex(childIfd) + log.PanicIf(err) + } + + return nil +} + +// FurthestOffset returns the furthest offset visited in the EXIF blob. This +// *does not* account for the locations of any undefined tags since we always +// evaluate the furthest offset, whether or not the user wants to know it. +// +// We are not willing to incur the cost of actually parsing those tags just to +// know their length when there are still undefined tags that are out there +// that we still won't have any idea how to parse, thus making this an +// approximation regardless of how clever we get. +func (ie *IfdEnumerate) FurthestOffset() uint32 { + + // TODO(dustin): Add test + + return ie.furthestOffset +} + +// parseOneIfd is a hack to use an IE to parse a raw IFD block. Can be used for +// testing. The fqIfdPath ("fully-qualified IFD path") will be less qualified +// in that the numeric index will always be zero (the zeroth child) rather than +// the proper number (if its actually a sibling to the first child, for +// instance). +func parseOneIfd(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, ii *exifcommon.IfdIdentity, byteOrder binary.ByteOrder, ifdBlock []byte, visitor TagVisitorFn) (nextIfdOffset uint32, entries []*IfdTagEntry, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + ebs := NewExifReadSeekerWithBytes(ifdBlock) + + rs, err := ebs.GetReadSeeker(0) + log.PanicIf(err) + + bp, err := newByteParser(rs, byteOrder, 0) + if err != nil { + if err == ErrOffsetInvalid { + return 0, nil, err + } + + log.Panic(err) + } + + dummyEbs := NewExifReadSeekerWithBytes([]byte{}) + ie := NewIfdEnumerate(ifdMapping, tagIndex, dummyEbs, byteOrder) + + nextIfdOffset, entries, _, err = ie.parseIfd(ii, bp, visitor, true, nil) + log.PanicIf(err) + + return nextIfdOffset, entries, nil +} + +// parseOneTag is a hack to use an IE to parse a raw tag block. +func parseOneTag(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, ii *exifcommon.IfdIdentity, byteOrder binary.ByteOrder, tagBlock []byte) (ite *IfdTagEntry, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + ebs := NewExifReadSeekerWithBytes(tagBlock) + + rs, err := ebs.GetReadSeeker(0) + log.PanicIf(err) + + bp, err := newByteParser(rs, byteOrder, 0) + if err != nil { + if err == ErrOffsetInvalid { + return nil, err + } + + log.Panic(err) + } + + dummyEbs := NewExifReadSeekerWithBytes([]byte{}) + ie := NewIfdEnumerate(ifdMapping, tagIndex, dummyEbs, byteOrder) + + ite, err = ie.parseTag(ii, 0, bp) + log.PanicIf(err) + + err = ie.tagPostParse(ite, nil) + if err != nil { + if err == ErrTagNotFound { + return nil, err + } + + log.Panic(err) + } + + return ite, nil +} + +// FindIfdFromRootIfd returns the given `Ifd` given the root-IFD and path of the +// desired IFD. +func FindIfdFromRootIfd(rootIfd *Ifd, ifdPath string) (ifd *Ifd, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): !! Add test. + + lineage, err := rootIfd.ifdMapping.ResolvePath(ifdPath) + log.PanicIf(err) + + // Confirm the first IFD is our root IFD type, and then prune it because + // from then on we'll be searching down through our children. + + if len(lineage) == 0 { + log.Panicf("IFD path must be non-empty.") + } else if lineage[0].Name != exifcommon.IfdStandardIfdIdentity.Name() { + log.Panicf("First IFD path item must be [%s].", exifcommon.IfdStandardIfdIdentity.Name()) + } + + desiredRootIndex := lineage[0].Index + lineage = lineage[1:] + + // TODO(dustin): !! This is a poorly conceived fix that just doubles the work we already have to do below, which then interacts badly with the indices not being properly represented in the IFD-phrase. + // TODO(dustin): !! <-- However, we're not sure whether we shouldn't store a secondary IFD-path with the indices. Some IFDs may not necessarily restrict which IFD indices they can be a child of (only the IFD itself matters). Validation should be delegated to the caller. + thisIfd := rootIfd + for currentRootIndex := 0; currentRootIndex < desiredRootIndex; currentRootIndex++ { + if thisIfd.nextIfd == nil { + log.Panicf("Root-IFD index (%d) does not exist in the data.", currentRootIndex) + } + + thisIfd = thisIfd.nextIfd + } + + for _, itii := range lineage { + var hit *Ifd + for _, childIfd := range thisIfd.children { + if childIfd.ifdIdentity.TagId() == itii.TagId { + hit = childIfd + break + } + } + + // If we didn't find the child, add it. + if hit == nil { + log.Panicf("IFD [%s] in [%s] not found: %s", itii.Name, ifdPath, thisIfd.children) + } + + thisIfd = hit + + // If we didn't find the sibling, add it. + for i := 0; i < itii.Index; i++ { + if thisIfd.nextIfd == nil { + log.Panicf("IFD [%s] does not have (%d) occurrences/siblings", thisIfd.ifdIdentity.UnindexedString(), itii.Index) + } + + thisIfd = thisIfd.nextIfd + } + } + + return thisIfd, nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/ifd_tag_entry.go b/vendor/github.com/dsoprea/go-exif/v3/ifd_tag_entry.go new file mode 100644 index 000000000..ed6ba2291 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/ifd_tag_entry.go @@ -0,0 +1,298 @@ +package exif + +import ( + "fmt" + "io" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" + "github.com/dsoprea/go-exif/v3/undefined" +) + +var ( + iteLogger = log.NewLogger("exif.ifd_tag_entry") +) + +// IfdTagEntry refers to a tag in the loaded EXIF block. +type IfdTagEntry struct { + tagId uint16 + tagIndex int + tagType exifcommon.TagTypePrimitive + unitCount uint32 + valueOffset uint32 + rawValueOffset []byte + + // childIfdName is the right most atom in the IFD-path. We need this to + // construct the fully-qualified IFD-path. + childIfdName string + + // childIfdPath is the IFD-path of the child if this tag represents a child + // IFD. + childIfdPath string + + // childFqIfdPath is the IFD-path of the child if this tag represents a + // child IFD. Includes indices. + childFqIfdPath string + + // TODO(dustin): !! IB's host the child-IBs directly in the tag, but that's not the case here. Refactor to accommodate it for a consistent experience. + + ifdIdentity *exifcommon.IfdIdentity + + isUnhandledUnknown bool + + rs io.ReadSeeker + byteOrder binary.ByteOrder + + tagName string +} + +func newIfdTagEntry(ii *exifcommon.IfdIdentity, tagId uint16, tagIndex int, tagType exifcommon.TagTypePrimitive, unitCount uint32, valueOffset uint32, rawValueOffset []byte, rs io.ReadSeeker, byteOrder binary.ByteOrder) *IfdTagEntry { + return &IfdTagEntry{ + ifdIdentity: ii, + tagId: tagId, + tagIndex: tagIndex, + tagType: tagType, + unitCount: unitCount, + valueOffset: valueOffset, + rawValueOffset: rawValueOffset, + rs: rs, + byteOrder: byteOrder, + } +} + +// String returns a stringified representation of the struct. +func (ite *IfdTagEntry) String() string { + return fmt.Sprintf("IfdTagEntry", ite.ifdIdentity.String(), ite.tagId, ite.tagType.String(), ite.unitCount) +} + +// TagName returns the name of the tag. This is determined else and set after +// the parse (since it's not actually stored in the stream). If it's empty, it +// is because it is an unknown tag (nonstandard or otherwise unavailable in the +// tag-index). +func (ite *IfdTagEntry) TagName() string { + return ite.tagName +} + +// setTagName sets the tag-name. This provides the name for convenience and +// efficiency by determining it when most efficient while we're parsing rather +// than delegating it to the caller (or, worse, the user). +func (ite *IfdTagEntry) setTagName(tagName string) { + ite.tagName = tagName +} + +// IfdPath returns the fully-qualified path of the IFD that owns this tag. +func (ite *IfdTagEntry) IfdPath() string { + return ite.ifdIdentity.String() +} + +// TagId returns the ID of the tag that we represent. The combination of +// (IfdPath(), TagId()) is unique. +func (ite *IfdTagEntry) TagId() uint16 { + return ite.tagId +} + +// IsThumbnailOffset returns true if the tag has the IFD and tag-ID of a +// thumbnail offset. +func (ite *IfdTagEntry) IsThumbnailOffset() bool { + return ite.tagId == ThumbnailOffsetTagId && ite.ifdIdentity.String() == ThumbnailFqIfdPath +} + +// IsThumbnailSize returns true if the tag has the IFD and tag-ID of a thumbnail +// size. +func (ite *IfdTagEntry) IsThumbnailSize() bool { + return ite.tagId == ThumbnailSizeTagId && ite.ifdIdentity.String() == ThumbnailFqIfdPath +} + +// TagType is the type of value for this tag. +func (ite *IfdTagEntry) TagType() exifcommon.TagTypePrimitive { + return ite.tagType +} + +// updateTagType sets an alternatively interpreted tag-type. +func (ite *IfdTagEntry) updateTagType(tagType exifcommon.TagTypePrimitive) { + ite.tagType = tagType +} + +// UnitCount returns the unit-count of the tag's value. +func (ite *IfdTagEntry) UnitCount() uint32 { + return ite.unitCount +} + +// updateUnitCount sets an alternatively interpreted unit-count. +func (ite *IfdTagEntry) updateUnitCount(unitCount uint32) { + ite.unitCount = unitCount +} + +// getValueOffset is the four-byte offset converted to an integer to point to +// the location of its value in the EXIF block. The "get" parameter is obviously +// used in order to differentiate the naming of the method from the field. +func (ite *IfdTagEntry) getValueOffset() uint32 { + return ite.valueOffset +} + +// GetRawBytes renders a specific list of bytes from the value in this tag. +func (ite *IfdTagEntry) GetRawBytes() (rawBytes []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext := ite.getValueContext() + + if ite.tagType == exifcommon.TypeUndefined { + value, err := exifundefined.Decode(valueContext) + if err != nil { + if err == exifcommon.ErrUnhandledUndefinedTypedTag { + ite.setIsUnhandledUnknown(true) + return nil, exifundefined.ErrUnparseableValue + } else if err == exifundefined.ErrUnparseableValue { + return nil, err + } else { + log.Panic(err) + } + } + + // Encode it back, in order to get the raw bytes. This is the best, + // general way to do it with an undefined tag. + + rawBytes, _, err := exifundefined.Encode(value, ite.byteOrder) + log.PanicIf(err) + + return rawBytes, nil + } + + rawBytes, err = valueContext.ReadRawEncoded() + log.PanicIf(err) + + return rawBytes, nil +} + +// Value returns the specific, parsed, typed value from the tag. +func (ite *IfdTagEntry) Value() (value interface{}, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext := ite.getValueContext() + + if ite.tagType == exifcommon.TypeUndefined { + var err error + + value, err = exifundefined.Decode(valueContext) + if err != nil { + if err == exifcommon.ErrUnhandledUndefinedTypedTag || err == exifundefined.ErrUnparseableValue { + return nil, err + } + + log.Panic(err) + } + } else { + var err error + + value, err = valueContext.Values() + log.PanicIf(err) + } + + return value, nil +} + +// Format returns the tag's value as a string. +func (ite *IfdTagEntry) Format() (phrase string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + value, err := ite.Value() + if err != nil { + if err == exifcommon.ErrUnhandledUndefinedTypedTag { + return exifundefined.UnparseableUnknownTagValuePlaceholder, nil + } else if err == exifundefined.ErrUnparseableValue { + return exifundefined.UnparseableHandledTagValuePlaceholder, nil + } + + log.Panic(err) + } + + phrase, err = exifcommon.FormatFromType(value, false) + log.PanicIf(err) + + return phrase, nil +} + +// FormatFirst returns the same as Format() but only the first item. +func (ite *IfdTagEntry) FormatFirst() (phrase string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): We should add a convenience type "timestamp", to simplify translating to and from the physical ASCII and provide validation. + + value, err := ite.Value() + if err != nil { + if err == exifcommon.ErrUnhandledUndefinedTypedTag { + return exifundefined.UnparseableUnknownTagValuePlaceholder, nil + } + + log.Panic(err) + } + + phrase, err = exifcommon.FormatFromType(value, true) + log.PanicIf(err) + + return phrase, nil +} + +func (ite *IfdTagEntry) setIsUnhandledUnknown(isUnhandledUnknown bool) { + ite.isUnhandledUnknown = isUnhandledUnknown +} + +// SetChildIfd sets child-IFD information (if we represent a child IFD). +func (ite *IfdTagEntry) SetChildIfd(ii *exifcommon.IfdIdentity) { + ite.childFqIfdPath = ii.String() + ite.childIfdPath = ii.UnindexedString() + ite.childIfdName = ii.Name() +} + +// ChildIfdName returns the name of the child IFD +func (ite *IfdTagEntry) ChildIfdName() string { + return ite.childIfdName +} + +// ChildIfdPath returns the path of the child IFD. +func (ite *IfdTagEntry) ChildIfdPath() string { + return ite.childIfdPath +} + +// ChildFqIfdPath returns the complete path of the child IFD along with the +// numeric suffixes differentiating sibling occurrences of the same type. "0" +// indices are omitted. +func (ite *IfdTagEntry) ChildFqIfdPath() string { + return ite.childFqIfdPath +} + +// IfdIdentity returns the IfdIdentity associated with this tag. +func (ite *IfdTagEntry) IfdIdentity() *exifcommon.IfdIdentity { + return ite.ifdIdentity +} + +func (ite *IfdTagEntry) getValueContext() *exifcommon.ValueContext { + return exifcommon.NewValueContext( + ite.ifdIdentity.String(), + ite.tagId, + ite.unitCount, + ite.valueOffset, + ite.rawValueOffset, + ite.rs, + ite.tagType, + ite.byteOrder) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/package.go b/vendor/github.com/dsoprea/go-exif/v3/package.go new file mode 100644 index 000000000..428f74e3a --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/package.go @@ -0,0 +1,8 @@ +// Package exif parses raw EXIF information given a block of raw EXIF data. It +// can also construct new EXIF information, and provides tools for doing so. +// This package is not involved with the parsing of particular file-formats. +// +// The EXIF data must first be extracted and then provided to us. Conversely, +// when constructing new EXIF data, the caller is responsible for packaging +// this in whichever format they require. +package exif diff --git a/vendor/github.com/dsoprea/go-exif/v3/tags.go b/vendor/github.com/dsoprea/go-exif/v3/tags.go new file mode 100644 index 000000000..aca902c5d --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/tags.go @@ -0,0 +1,475 @@ +package exif + +import ( + "fmt" + "sync" + + "github.com/dsoprea/go-logging" + "gopkg.in/yaml.v2" + + "github.com/dsoprea/go-exif/v3/common" +) + +const ( + // IFD1 + + // ThumbnailFqIfdPath is the fully-qualified IFD path that the thumbnail + // must be found in. + ThumbnailFqIfdPath = "IFD1" + + // ThumbnailOffsetTagId returns the tag-ID of the thumbnail offset. + ThumbnailOffsetTagId = 0x0201 + + // ThumbnailSizeTagId returns the tag-ID of the thumbnail size. + ThumbnailSizeTagId = 0x0202 +) + +const ( + // GPS + + // TagGpsVersionId is the ID of the GPS version tag. + TagGpsVersionId = 0x0000 + + // TagLatitudeId is the ID of the GPS latitude tag. + TagLatitudeId = 0x0002 + + // TagLatitudeRefId is the ID of the GPS latitude orientation tag. + TagLatitudeRefId = 0x0001 + + // TagLongitudeId is the ID of the GPS longitude tag. + TagLongitudeId = 0x0004 + + // TagLongitudeRefId is the ID of the GPS longitude-orientation tag. + TagLongitudeRefId = 0x0003 + + // TagTimestampId is the ID of the GPS time tag. + TagTimestampId = 0x0007 + + // TagDatestampId is the ID of the GPS date tag. + TagDatestampId = 0x001d + + // TagAltitudeId is the ID of the GPS altitude tag. + TagAltitudeId = 0x0006 + + // TagAltitudeRefId is the ID of the GPS altitude-orientation tag. + TagAltitudeRefId = 0x0005 +) + +var ( + // tagsWithoutAlignment is a tag-lookup for tags whose value size won't + // necessarily be a multiple of its tag-type. + tagsWithoutAlignment = map[uint16]struct{}{ + // The thumbnail offset is stored as a long, but its data is a binary + // blob (not a slice of longs). + ThumbnailOffsetTagId: {}, + } +) + +var ( + tagsLogger = log.NewLogger("exif.tags") +) + +// File structures. + +type encodedTag struct { + // id is signed, here, because YAML doesn't have enough information to + // support unsigned. + Id int `yaml:"id"` + Name string `yaml:"name"` + TypeName string `yaml:"type_name"` + TypeNames []string `yaml:"type_names"` +} + +// Indexing structures. + +// IndexedTag describes one index lookup result. +type IndexedTag struct { + // Id is the tag-ID. + Id uint16 + + // Name is the tag name. + Name string + + // IfdPath is the proper IFD path of this tag. This is not fully-qualified. + IfdPath string + + // SupportedTypes is an unsorted list of allowed tag-types. + SupportedTypes []exifcommon.TagTypePrimitive +} + +// String returns a descriptive string. +func (it *IndexedTag) String() string { + return fmt.Sprintf("TAG", it.Id, it.Name, it.IfdPath) +} + +// IsName returns true if this tag matches the given tag name. +func (it *IndexedTag) IsName(ifdPath, name string) bool { + return it.Name == name && it.IfdPath == ifdPath +} + +// Is returns true if this tag matched the given tag ID. +func (it *IndexedTag) Is(ifdPath string, id uint16) bool { + return it.Id == id && it.IfdPath == ifdPath +} + +// GetEncodingType returns the largest type that this tag's value can occupy. +func (it *IndexedTag) GetEncodingType(value interface{}) exifcommon.TagTypePrimitive { + // For convenience, we handle encoding a `time.Time` directly. + if exifcommon.IsTime(value) == true { + // Timestamps are encoded as ASCII. + value = "" + } + + if len(it.SupportedTypes) == 0 { + log.Panicf("IndexedTag [%s] (%d) has no supported types.", it.IfdPath, it.Id) + } else if len(it.SupportedTypes) == 1 { + return it.SupportedTypes[0] + } + + supportsLong := false + supportsShort := false + supportsRational := false + supportsSignedRational := false + for _, supportedType := range it.SupportedTypes { + if supportedType == exifcommon.TypeLong { + supportsLong = true + } else if supportedType == exifcommon.TypeShort { + supportsShort = true + } else if supportedType == exifcommon.TypeRational { + supportsRational = true + } else if supportedType == exifcommon.TypeSignedRational { + supportsSignedRational = true + } + } + + // We specifically check for the cases that we know to expect. + + if supportsLong == true && supportsShort == true { + return exifcommon.TypeLong + } else if supportsRational == true && supportsSignedRational == true { + if value == nil { + log.Panicf("GetEncodingType: require value to be given") + } + + if _, ok := value.(exifcommon.SignedRational); ok == true { + return exifcommon.TypeSignedRational + } + + return exifcommon.TypeRational + } + + log.Panicf("WidestSupportedType() case is not handled for tag [%s] (0x%04x): %v", it.IfdPath, it.Id, it.SupportedTypes) + return 0 +} + +// DoesSupportType returns true if this tag can be found/decoded with this type. +func (it *IndexedTag) DoesSupportType(tagType exifcommon.TagTypePrimitive) bool { + // This is always a very small collection. So, we keep it unsorted. + for _, thisTagType := range it.SupportedTypes { + if thisTagType == tagType { + return true + } + } + + return false +} + +// TagIndex is a tag-lookup facility. +type TagIndex struct { + tagsByIfd map[string]map[uint16]*IndexedTag + tagsByIfdR map[string]map[string]*IndexedTag + + mutex sync.Mutex + + doUniversalSearch bool +} + +// NewTagIndex returns a new TagIndex struct. +func NewTagIndex() *TagIndex { + ti := new(TagIndex) + + ti.tagsByIfd = make(map[string]map[uint16]*IndexedTag) + ti.tagsByIfdR = make(map[string]map[string]*IndexedTag) + + return ti +} + +// SetUniversalSearch enables a fallback to matching tags under *any* IFD. +func (ti *TagIndex) SetUniversalSearch(flag bool) { + ti.doUniversalSearch = flag +} + +// UniversalSearch enables a fallback to matching tags under *any* IFD. +func (ti *TagIndex) UniversalSearch() bool { + return ti.doUniversalSearch +} + +// Add registers a new tag to be recognized during the parse. +func (ti *TagIndex) Add(it *IndexedTag) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ti.mutex.Lock() + defer ti.mutex.Unlock() + + // Store by ID. + + family, found := ti.tagsByIfd[it.IfdPath] + if found == false { + family = make(map[uint16]*IndexedTag) + ti.tagsByIfd[it.IfdPath] = family + } + + if _, found := family[it.Id]; found == true { + log.Panicf("tag-ID defined more than once for IFD [%s]: (%02x)", it.IfdPath, it.Id) + } + + family[it.Id] = it + + // Store by name. + + familyR, found := ti.tagsByIfdR[it.IfdPath] + if found == false { + familyR = make(map[string]*IndexedTag) + ti.tagsByIfdR[it.IfdPath] = familyR + } + + if _, found := familyR[it.Name]; found == true { + log.Panicf("tag-name defined more than once for IFD [%s]: (%s)", it.IfdPath, it.Name) + } + + familyR[it.Name] = it + + return nil +} + +func (ti *TagIndex) getOne(ifdPath string, id uint16) (it *IndexedTag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if len(ti.tagsByIfd) == 0 { + err := LoadStandardTags(ti) + log.PanicIf(err) + } + + ti.mutex.Lock() + defer ti.mutex.Unlock() + + family, found := ti.tagsByIfd[ifdPath] + if found == false { + return nil, ErrTagNotFound + } + + it, found = family[id] + if found == false { + return nil, ErrTagNotFound + } + + return it, nil +} + +// Get returns information about the non-IFD tag given a tag ID. `ifdPath` must +// not be fully-qualified. +func (ti *TagIndex) Get(ii *exifcommon.IfdIdentity, id uint16) (it *IndexedTag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ifdPath := ii.UnindexedString() + + it, err = ti.getOne(ifdPath, id) + if err == nil { + return it, nil + } else if err != ErrTagNotFound { + log.Panic(err) + } + + if ti.doUniversalSearch == false { + return nil, ErrTagNotFound + } + + // We've been told to fallback to look for the tag in other IFDs. + + skipIfdPath := ii.UnindexedString() + + for currentIfdPath, _ := range ti.tagsByIfd { + if currentIfdPath == skipIfdPath { + // Skip the primary IFD, which has already been checked. + continue + } + + it, err = ti.getOne(currentIfdPath, id) + if err == nil { + tagsLogger.Warningf(nil, + "Found tag (0x%02x) in the wrong IFD: [%s] != [%s]", + id, currentIfdPath, ifdPath) + + return it, nil + } else if err != ErrTagNotFound { + log.Panic(err) + } + } + + return nil, ErrTagNotFound +} + +var ( + // tagGuessDefaultIfdIdentities describes which IFDs we'll look for a given + // tag-ID in, if it's not found where it's supposed to be. We suppose that + // Exif-IFD tags might be found in IFD0 or IFD1, or IFD0/IFD1 tags might be + // found in the Exif IFD. This is the only thing we've seen so far. So, this + // is the limit of our guessing. + tagGuessDefaultIfdIdentities = []*exifcommon.IfdIdentity{ + exifcommon.IfdExifStandardIfdIdentity, + exifcommon.IfdStandardIfdIdentity, + } +) + +// FindFirst looks for the given tag-ID in each of the given IFDs in the given +// order. If `fqIfdPaths` is `nil` then use a default search order. This defies +// the standard, which requires each tag to exist in certain IFDs. This is a +// contingency to make recommendations for malformed data. +// +// Things *can* end badly here, in that the same tag-ID in different IFDs might +// describe different data and different ata-types, and our decode might then +// produce binary and non-printable data. +func (ti *TagIndex) FindFirst(id uint16, typeId exifcommon.TagTypePrimitive, ifdIdentities []*exifcommon.IfdIdentity) (it *IndexedTag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if ifdIdentities == nil { + ifdIdentities = tagGuessDefaultIfdIdentities + } + + for _, ii := range ifdIdentities { + it, err := ti.Get(ii, id) + if err != nil { + if err == ErrTagNotFound { + continue + } + + log.Panic(err) + } + + // Even though the tag might be mislocated, the type should still be the + // same. Check this so we don't accidentally end-up on a complete + // irrelevant tag with a totally different data type. This attempts to + // mitigate producing garbage. + for _, supportedType := range it.SupportedTypes { + if supportedType == typeId { + return it, nil + } + } + } + + return nil, ErrTagNotFound +} + +// GetWithName returns information about the non-IFD tag given a tag name. +func (ti *TagIndex) GetWithName(ii *exifcommon.IfdIdentity, name string) (it *IndexedTag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if len(ti.tagsByIfdR) == 0 { + err := LoadStandardTags(ti) + log.PanicIf(err) + } + + ifdPath := ii.UnindexedString() + + it, found := ti.tagsByIfdR[ifdPath][name] + if found != true { + log.Panic(ErrTagNotFound) + } + + return it, nil +} + +// LoadStandardTags registers the tags that all devices/applications should +// support. +func LoadStandardTags(ti *TagIndex) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // Read static data. + + encodedIfds := make(map[string][]encodedTag) + + err = yaml.Unmarshal([]byte(tagsYaml), encodedIfds) + log.PanicIf(err) + + // Load structure. + + count := 0 + for ifdPath, tags := range encodedIfds { + for _, tagInfo := range tags { + tagId := uint16(tagInfo.Id) + tagName := tagInfo.Name + tagTypeName := tagInfo.TypeName + tagTypeNames := tagInfo.TypeNames + + if tagTypeNames == nil { + if tagTypeName == "" { + log.Panicf("no tag-types were given when registering standard tag [%s] (0x%04x) [%s]", ifdPath, tagId, tagName) + } + + tagTypeNames = []string{ + tagTypeName, + } + } else if tagTypeName != "" { + log.Panicf("both 'type_names' and 'type_name' were given when registering standard tag [%s] (0x%04x) [%s]", ifdPath, tagId, tagName) + } + + tagTypes := make([]exifcommon.TagTypePrimitive, 0) + for _, tagTypeName := range tagTypeNames { + + // TODO(dustin): Discard unsupported types. This helps us with non-standard types that have actually been found in real data, that we ignore for right now. e.g. SSHORT, FLOAT, DOUBLE + tagTypeId, found := exifcommon.GetTypeByName(tagTypeName) + if found == false { + tagsLogger.Warningf(nil, "Type [%s] for tag [%s] being loaded is not valid and is being ignored.", tagTypeName, tagName) + continue + } + + tagTypes = append(tagTypes, tagTypeId) + } + + if len(tagTypes) == 0 { + tagsLogger.Warningf(nil, "Tag [%s] (0x%04x) [%s] being loaded does not have any supported types and will not be registered.", ifdPath, tagId, tagName) + continue + } + + it := &IndexedTag{ + IfdPath: ifdPath, + Id: tagId, + Name: tagName, + SupportedTypes: tagTypes, + } + + err = ti.Add(it) + log.PanicIf(err) + + count++ + } + } + + tagsLogger.Debugf(nil, "(%d) tags loaded.", count) + + return nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/tags_data.go b/vendor/github.com/dsoprea/go-exif/v3/tags_data.go new file mode 100644 index 000000000..dcf0cc4f4 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/tags_data.go @@ -0,0 +1,968 @@ +package exif + +var ( + // From assets/tags.yaml . Needs to be here so it's embedded in the binary. + tagsYaml = ` +# Notes: +# +# This file was produced from http://www.exiv2.org/tags.html, using the included +# tool, though that document appears to have some duplicates when all IDs are +# supposed to be unique (EXIF information only has IDs, not IFDs; IFDs are +# determined by our pre-existing knowledge of those tags). +# +# The webpage that we've produced this file from appears to indicate that +# ImageWidth is represented by both 0x0100 and 0x0001 depending on whether the +# encoding is RGB or YCbCr. +IFD/Exif: +- id: 0x829a + name: ExposureTime + type_name: RATIONAL +- id: 0x829d + name: FNumber + type_name: RATIONAL +- id: 0x8822 + name: ExposureProgram + type_name: SHORT +- id: 0x8824 + name: SpectralSensitivity + type_name: ASCII +- id: 0x8827 + name: ISOSpeedRatings + type_name: SHORT +- id: 0x8828 + name: OECF + type_name: UNDEFINED +- id: 0x8830 + name: SensitivityType + type_name: SHORT +- id: 0x8831 + name: StandardOutputSensitivity + type_name: LONG +- id: 0x8832 + name: RecommendedExposureIndex + type_name: LONG +- id: 0x8833 + name: ISOSpeed + type_name: LONG +- id: 0x8834 + name: ISOSpeedLatitudeyyy + type_name: LONG +- id: 0x8835 + name: ISOSpeedLatitudezzz + type_name: LONG +- id: 0x9000 + name: ExifVersion + type_name: UNDEFINED +- id: 0x9003 + name: DateTimeOriginal + type_name: ASCII +- id: 0x9004 + name: DateTimeDigitized + type_name: ASCII +- id: 0x9010 + name: OffsetTime + type_name: ASCII +- id: 0x9011 + name: OffsetTimeOriginal + type_name: ASCII +- id: 0x9012 + name: OffsetTimeDigitized + type_name: ASCII +- id: 0x9101 + name: ComponentsConfiguration + type_name: UNDEFINED +- id: 0x9102 + name: CompressedBitsPerPixel + type_name: RATIONAL +- id: 0x9201 + name: ShutterSpeedValue + type_name: SRATIONAL +- id: 0x9202 + name: ApertureValue + type_name: RATIONAL +- id: 0x9203 + name: BrightnessValue + type_name: SRATIONAL +- id: 0x9204 + name: ExposureBiasValue + type_name: SRATIONAL +- id: 0x9205 + name: MaxApertureValue + type_name: RATIONAL +- id: 0x9206 + name: SubjectDistance + type_name: RATIONAL +- id: 0x9207 + name: MeteringMode + type_name: SHORT +- id: 0x9208 + name: LightSource + type_name: SHORT +- id: 0x9209 + name: Flash + type_name: SHORT +- id: 0x920a + name: FocalLength + type_name: RATIONAL +- id: 0x9214 + name: SubjectArea + type_name: SHORT +- id: 0x927c + name: MakerNote + type_name: UNDEFINED +- id: 0x9286 + name: UserComment + type_name: UNDEFINED +- id: 0x9290 + name: SubSecTime + type_name: ASCII +- id: 0x9291 + name: SubSecTimeOriginal + type_name: ASCII +- id: 0x9292 + name: SubSecTimeDigitized + type_name: ASCII +- id: 0xa000 + name: FlashpixVersion + type_name: UNDEFINED +- id: 0xa001 + name: ColorSpace + type_name: SHORT +- id: 0xa002 + name: PixelXDimension + type_names: [LONG, SHORT] +- id: 0xa003 + name: PixelYDimension + type_names: [LONG, SHORT] +- id: 0xa004 + name: RelatedSoundFile + type_name: ASCII +- id: 0xa005 + name: InteroperabilityTag + type_name: LONG +- id: 0xa20b + name: FlashEnergy + type_name: RATIONAL +- id: 0xa20c + name: SpatialFrequencyResponse + type_name: UNDEFINED +- id: 0xa20e + name: FocalPlaneXResolution + type_name: RATIONAL +- id: 0xa20f + name: FocalPlaneYResolution + type_name: RATIONAL +- id: 0xa210 + name: FocalPlaneResolutionUnit + type_name: SHORT +- id: 0xa214 + name: SubjectLocation + type_name: SHORT +- id: 0xa215 + name: ExposureIndex + type_name: RATIONAL +- id: 0xa217 + name: SensingMethod + type_name: SHORT +- id: 0xa300 + name: FileSource + type_name: UNDEFINED +- id: 0xa301 + name: SceneType + type_name: UNDEFINED +- id: 0xa302 + name: CFAPattern + type_name: UNDEFINED +- id: 0xa401 + name: CustomRendered + type_name: SHORT +- id: 0xa402 + name: ExposureMode + type_name: SHORT +- id: 0xa403 + name: WhiteBalance + type_name: SHORT +- id: 0xa404 + name: DigitalZoomRatio + type_name: RATIONAL +- id: 0xa405 + name: FocalLengthIn35mmFilm + type_name: SHORT +- id: 0xa406 + name: SceneCaptureType + type_name: SHORT +- id: 0xa407 + name: GainControl + type_name: SHORT +- id: 0xa408 + name: Contrast + type_name: SHORT +- id: 0xa409 + name: Saturation + type_name: SHORT +- id: 0xa40a + name: Sharpness + type_name: SHORT +- id: 0xa40b + name: DeviceSettingDescription + type_name: UNDEFINED +- id: 0xa40c + name: SubjectDistanceRange + type_name: SHORT +- id: 0xa420 + name: ImageUniqueID + type_name: ASCII +- id: 0xa430 + name: CameraOwnerName + type_name: ASCII +- id: 0xa431 + name: BodySerialNumber + type_name: ASCII +- id: 0xa432 + name: LensSpecification + type_name: RATIONAL +- id: 0xa433 + name: LensMake + type_name: ASCII +- id: 0xa434 + name: LensModel + type_name: ASCII +- id: 0xa435 + name: LensSerialNumber + type_name: ASCII +IFD/GPSInfo: +- id: 0x0000 + name: GPSVersionID + type_name: BYTE +- id: 0x0001 + name: GPSLatitudeRef + type_name: ASCII +- id: 0x0002 + name: GPSLatitude + type_name: RATIONAL +- id: 0x0003 + name: GPSLongitudeRef + type_name: ASCII +- id: 0x0004 + name: GPSLongitude + type_name: RATIONAL +- id: 0x0005 + name: GPSAltitudeRef + type_name: BYTE +- id: 0x0006 + name: GPSAltitude + type_name: RATIONAL +- id: 0x0007 + name: GPSTimeStamp + type_name: RATIONAL +- id: 0x0008 + name: GPSSatellites + type_name: ASCII +- id: 0x0009 + name: GPSStatus + type_name: ASCII +- id: 0x000a + name: GPSMeasureMode + type_name: ASCII +- id: 0x000b + name: GPSDOP + type_name: RATIONAL +- id: 0x000c + name: GPSSpeedRef + type_name: ASCII +- id: 0x000d + name: GPSSpeed + type_name: RATIONAL +- id: 0x000e + name: GPSTrackRef + type_name: ASCII +- id: 0x000f + name: GPSTrack + type_name: RATIONAL +- id: 0x0010 + name: GPSImgDirectionRef + type_name: ASCII +- id: 0x0011 + name: GPSImgDirection + type_name: RATIONAL +- id: 0x0012 + name: GPSMapDatum + type_name: ASCII +- id: 0x0013 + name: GPSDestLatitudeRef + type_name: ASCII +- id: 0x0014 + name: GPSDestLatitude + type_name: RATIONAL +- id: 0x0015 + name: GPSDestLongitudeRef + type_name: ASCII +- id: 0x0016 + name: GPSDestLongitude + type_name: RATIONAL +- id: 0x0017 + name: GPSDestBearingRef + type_name: ASCII +- id: 0x0018 + name: GPSDestBearing + type_name: RATIONAL +- id: 0x0019 + name: GPSDestDistanceRef + type_name: ASCII +- id: 0x001a + name: GPSDestDistance + type_name: RATIONAL +- id: 0x001b + name: GPSProcessingMethod + type_name: UNDEFINED +- id: 0x001c + name: GPSAreaInformation + type_name: UNDEFINED +- id: 0x001d + name: GPSDateStamp + type_name: ASCII +- id: 0x001e + name: GPSDifferential + type_name: SHORT +IFD: +- id: 0x000b + name: ProcessingSoftware + type_name: ASCII +- id: 0x00fe + name: NewSubfileType + type_name: LONG +- id: 0x00ff + name: SubfileType + type_name: SHORT +- id: 0x0100 + name: ImageWidth + type_names: [LONG, SHORT] +- id: 0x0101 + name: ImageLength + type_names: [LONG, SHORT] +- id: 0x0102 + name: BitsPerSample + type_name: SHORT +- id: 0x0103 + name: Compression + type_name: SHORT +- id: 0x0106 + name: PhotometricInterpretation + type_name: SHORT +- id: 0x0107 + name: Thresholding + type_name: SHORT +- id: 0x0108 + name: CellWidth + type_name: SHORT +- id: 0x0109 + name: CellLength + type_name: SHORT +- id: 0x010a + name: FillOrder + type_name: SHORT +- id: 0x010d + name: DocumentName + type_name: ASCII +- id: 0x010e + name: ImageDescription + type_name: ASCII +- id: 0x010f + name: Make + type_name: ASCII +- id: 0x0110 + name: Model + type_name: ASCII +- id: 0x0111 + name: StripOffsets + type_names: [LONG, SHORT] +- id: 0x0112 + name: Orientation + type_name: SHORT +- id: 0x0115 + name: SamplesPerPixel + type_name: SHORT +- id: 0x0116 + name: RowsPerStrip + type_names: [LONG, SHORT] +- id: 0x0117 + name: StripByteCounts + type_names: [LONG, SHORT] +- id: 0x011a + name: XResolution + type_name: RATIONAL +- id: 0x011b + name: YResolution + type_name: RATIONAL +- id: 0x011c + name: PlanarConfiguration + type_name: SHORT +- id: 0x0122 + name: GrayResponseUnit + type_name: SHORT +- id: 0x0123 + name: GrayResponseCurve + type_name: SHORT +- id: 0x0124 + name: T4Options + type_name: LONG +- id: 0x0125 + name: T6Options + type_name: LONG +- id: 0x0128 + name: ResolutionUnit + type_name: SHORT +- id: 0x0129 + name: PageNumber + type_name: SHORT +- id: 0x012d + name: TransferFunction + type_name: SHORT +- id: 0x0131 + name: Software + type_name: ASCII +- id: 0x0132 + name: DateTime + type_name: ASCII +- id: 0x013b + name: Artist + type_name: ASCII +- id: 0x013c + name: HostComputer + type_name: ASCII +- id: 0x013d + name: Predictor + type_name: SHORT +- id: 0x013e + name: WhitePoint + type_name: RATIONAL +- id: 0x013f + name: PrimaryChromaticities + type_name: RATIONAL +- id: 0x0140 + name: ColorMap + type_name: SHORT +- id: 0x0141 + name: HalftoneHints + type_name: SHORT +- id: 0x0142 + name: TileWidth + type_name: SHORT +- id: 0x0143 + name: TileLength + type_name: SHORT +- id: 0x0144 + name: TileOffsets + type_name: SHORT +- id: 0x0145 + name: TileByteCounts + type_name: SHORT +- id: 0x014a + name: SubIFDs + type_name: LONG +- id: 0x014c + name: InkSet + type_name: SHORT +- id: 0x014d + name: InkNames + type_name: ASCII +- id: 0x014e + name: NumberOfInks + type_name: SHORT +- id: 0x0150 + name: DotRange + type_name: BYTE +- id: 0x0151 + name: TargetPrinter + type_name: ASCII +- id: 0x0152 + name: ExtraSamples + type_name: SHORT +- id: 0x0153 + name: SampleFormat + type_name: SHORT +- id: 0x0154 + name: SMinSampleValue + type_name: SHORT +- id: 0x0155 + name: SMaxSampleValue + type_name: SHORT +- id: 0x0156 + name: TransferRange + type_name: SHORT +- id: 0x0157 + name: ClipPath + type_name: BYTE +- id: 0x015a + name: Indexed + type_name: SHORT +- id: 0x015b + name: JPEGTables + type_name: UNDEFINED +- id: 0x015f + name: OPIProxy + type_name: SHORT +- id: 0x0200 + name: JPEGProc + type_name: LONG +- id: 0x0201 + name: JPEGInterchangeFormat + type_name: LONG +- id: 0x0202 + name: JPEGInterchangeFormatLength + type_name: LONG +- id: 0x0203 + name: JPEGRestartInterval + type_name: SHORT +- id: 0x0205 + name: JPEGLosslessPredictors + type_name: SHORT +- id: 0x0206 + name: JPEGPointTransforms + type_name: SHORT +- id: 0x0207 + name: JPEGQTables + type_name: LONG +- id: 0x0208 + name: JPEGDCTables + type_name: LONG +- id: 0x0209 + name: JPEGACTables + type_name: LONG +- id: 0x0211 + name: YCbCrCoefficients + type_name: RATIONAL +- id: 0x0212 + name: YCbCrSubSampling + type_name: SHORT +- id: 0x0213 + name: YCbCrPositioning + type_name: SHORT +- id: 0x0214 + name: ReferenceBlackWhite + type_name: RATIONAL +- id: 0x02bc + name: XMLPacket + type_name: BYTE +- id: 0x4746 + name: Rating + type_name: SHORT +- id: 0x4749 + name: RatingPercent + type_name: SHORT +- id: 0x800d + name: ImageID + type_name: ASCII +- id: 0x828d + name: CFARepeatPatternDim + type_name: SHORT +- id: 0x828e + name: CFAPattern + type_name: BYTE +- id: 0x828f + name: BatteryLevel + type_name: RATIONAL +- id: 0x8298 + name: Copyright + type_name: ASCII +- id: 0x829a + name: ExposureTime +# NOTE(dustin): SRATIONAL isn't mentioned in the standard, but we have seen it in real data. + type_names: [RATIONAL, SRATIONAL] +- id: 0x829d + name: FNumber +# NOTE(dustin): SRATIONAL isn't mentioned in the standard, but we have seen it in real data. + type_names: [RATIONAL, SRATIONAL] +- id: 0x83bb + name: IPTCNAA + type_name: LONG +- id: 0x8649 + name: ImageResources + type_name: BYTE +- id: 0x8769 + name: ExifTag + type_name: LONG +- id: 0x8773 + name: InterColorProfile + type_name: UNDEFINED +- id: 0x8822 + name: ExposureProgram + type_name: SHORT +- id: 0x8824 + name: SpectralSensitivity + type_name: ASCII +- id: 0x8825 + name: GPSTag + type_name: LONG +- id: 0x8827 + name: ISOSpeedRatings + type_name: SHORT +- id: 0x8828 + name: OECF + type_name: UNDEFINED +- id: 0x8829 + name: Interlace + type_name: SHORT +- id: 0x882b + name: SelfTimerMode + type_name: SHORT +- id: 0x9003 + name: DateTimeOriginal + type_name: ASCII +- id: 0x9102 + name: CompressedBitsPerPixel + type_name: RATIONAL +- id: 0x9201 + name: ShutterSpeedValue + type_name: SRATIONAL +- id: 0x9202 + name: ApertureValue + type_name: RATIONAL +- id: 0x9203 + name: BrightnessValue + type_name: SRATIONAL +- id: 0x9204 + name: ExposureBiasValue + type_name: SRATIONAL +- id: 0x9205 + name: MaxApertureValue + type_name: RATIONAL +- id: 0x9206 + name: SubjectDistance + type_name: SRATIONAL +- id: 0x9207 + name: MeteringMode + type_name: SHORT +- id: 0x9208 + name: LightSource + type_name: SHORT +- id: 0x9209 + name: Flash + type_name: SHORT +- id: 0x920a + name: FocalLength + type_name: RATIONAL +- id: 0x920b + name: FlashEnergy + type_name: RATIONAL +- id: 0x920c + name: SpatialFrequencyResponse + type_name: UNDEFINED +- id: 0x920d + name: Noise + type_name: UNDEFINED +- id: 0x920e + name: FocalPlaneXResolution + type_name: RATIONAL +- id: 0x920f + name: FocalPlaneYResolution + type_name: RATIONAL +- id: 0x9210 + name: FocalPlaneResolutionUnit + type_name: SHORT +- id: 0x9211 + name: ImageNumber + type_name: LONG +- id: 0x9212 + name: SecurityClassification + type_name: ASCII +- id: 0x9213 + name: ImageHistory + type_name: ASCII +- id: 0x9214 + name: SubjectLocation + type_name: SHORT +- id: 0x9215 + name: ExposureIndex + type_name: RATIONAL +- id: 0x9216 + name: TIFFEPStandardID + type_name: BYTE +- id: 0x9217 + name: SensingMethod + type_name: SHORT +- id: 0x9c9b + name: XPTitle + type_name: BYTE +- id: 0x9c9c + name: XPComment + type_name: BYTE +- id: 0x9c9d + name: XPAuthor + type_name: BYTE +- id: 0x9c9e + name: XPKeywords + type_name: BYTE +- id: 0x9c9f + name: XPSubject + type_name: BYTE +- id: 0xc4a5 + name: PrintImageMatching + type_name: UNDEFINED +- id: 0xc612 + name: DNGVersion + type_name: BYTE +- id: 0xc613 + name: DNGBackwardVersion + type_name: BYTE +- id: 0xc614 + name: UniqueCameraModel + type_name: ASCII +- id: 0xc615 + name: LocalizedCameraModel + type_name: BYTE +- id: 0xc616 + name: CFAPlaneColor + type_name: BYTE +- id: 0xc617 + name: CFALayout + type_name: SHORT +- id: 0xc618 + name: LinearizationTable + type_name: SHORT +- id: 0xc619 + name: BlackLevelRepeatDim + type_name: SHORT +- id: 0xc61a + name: BlackLevel + type_name: RATIONAL +- id: 0xc61b + name: BlackLevelDeltaH + type_name: SRATIONAL +- id: 0xc61c + name: BlackLevelDeltaV + type_name: SRATIONAL +- id: 0xc61d + name: WhiteLevel + type_name: SHORT +- id: 0xc61e + name: DefaultScale + type_name: RATIONAL +- id: 0xc61f + name: DefaultCropOrigin + type_name: SHORT +- id: 0xc620 + name: DefaultCropSize + type_name: SHORT +- id: 0xc621 + name: ColorMatrix1 + type_name: SRATIONAL +- id: 0xc622 + name: ColorMatrix2 + type_name: SRATIONAL +- id: 0xc623 + name: CameraCalibration1 + type_name: SRATIONAL +- id: 0xc624 + name: CameraCalibration2 + type_name: SRATIONAL +- id: 0xc625 + name: ReductionMatrix1 + type_name: SRATIONAL +- id: 0xc626 + name: ReductionMatrix2 + type_name: SRATIONAL +- id: 0xc627 + name: AnalogBalance + type_name: RATIONAL +- id: 0xc628 + name: AsShotNeutral + type_name: SHORT +- id: 0xc629 + name: AsShotWhiteXY + type_name: RATIONAL +- id: 0xc62a + name: BaselineExposure + type_name: SRATIONAL +- id: 0xc62b + name: BaselineNoise + type_name: RATIONAL +- id: 0xc62c + name: BaselineSharpness + type_name: RATIONAL +- id: 0xc62d + name: BayerGreenSplit + type_name: LONG +- id: 0xc62e + name: LinearResponseLimit + type_name: RATIONAL +- id: 0xc62f + name: CameraSerialNumber + type_name: ASCII +- id: 0xc630 + name: LensInfo + type_name: RATIONAL +- id: 0xc631 + name: ChromaBlurRadius + type_name: RATIONAL +- id: 0xc632 + name: AntiAliasStrength + type_name: RATIONAL +- id: 0xc633 + name: ShadowScale + type_name: SRATIONAL +- id: 0xc634 + name: DNGPrivateData + type_name: BYTE +- id: 0xc635 + name: MakerNoteSafety + type_name: SHORT +- id: 0xc65a + name: CalibrationIlluminant1 + type_name: SHORT +- id: 0xc65b + name: CalibrationIlluminant2 + type_name: SHORT +- id: 0xc65c + name: BestQualityScale + type_name: RATIONAL +- id: 0xc65d + name: RawDataUniqueID + type_name: BYTE +- id: 0xc68b + name: OriginalRawFileName + type_name: BYTE +- id: 0xc68c + name: OriginalRawFileData + type_name: UNDEFINED +- id: 0xc68d + name: ActiveArea + type_name: SHORT +- id: 0xc68e + name: MaskedAreas + type_name: SHORT +- id: 0xc68f + name: AsShotICCProfile + type_name: UNDEFINED +- id: 0xc690 + name: AsShotPreProfileMatrix + type_name: SRATIONAL +- id: 0xc691 + name: CurrentICCProfile + type_name: UNDEFINED +- id: 0xc692 + name: CurrentPreProfileMatrix + type_name: SRATIONAL +- id: 0xc6bf + name: ColorimetricReference + type_name: SHORT +- id: 0xc6f3 + name: CameraCalibrationSignature + type_name: BYTE +- id: 0xc6f4 + name: ProfileCalibrationSignature + type_name: BYTE +- id: 0xc6f6 + name: AsShotProfileName + type_name: BYTE +- id: 0xc6f7 + name: NoiseReductionApplied + type_name: RATIONAL +- id: 0xc6f8 + name: ProfileName + type_name: BYTE +- id: 0xc6f9 + name: ProfileHueSatMapDims + type_name: LONG +- id: 0xc6fd + name: ProfileEmbedPolicy + type_name: LONG +- id: 0xc6fe + name: ProfileCopyright + type_name: BYTE +- id: 0xc714 + name: ForwardMatrix1 + type_name: SRATIONAL +- id: 0xc715 + name: ForwardMatrix2 + type_name: SRATIONAL +- id: 0xc716 + name: PreviewApplicationName + type_name: BYTE +- id: 0xc717 + name: PreviewApplicationVersion + type_name: BYTE +- id: 0xc718 + name: PreviewSettingsName + type_name: BYTE +- id: 0xc719 + name: PreviewSettingsDigest + type_name: BYTE +- id: 0xc71a + name: PreviewColorSpace + type_name: LONG +- id: 0xc71b + name: PreviewDateTime + type_name: ASCII +- id: 0xc71c + name: RawImageDigest + type_name: UNDEFINED +- id: 0xc71d + name: OriginalRawFileDigest + type_name: UNDEFINED +- id: 0xc71e + name: SubTileBlockSize + type_name: LONG +- id: 0xc71f + name: RowInterleaveFactor + type_name: LONG +- id: 0xc725 + name: ProfileLookTableDims + type_name: LONG +- id: 0xc740 + name: OpcodeList1 + type_name: UNDEFINED +- id: 0xc741 + name: OpcodeList2 + type_name: UNDEFINED +- id: 0xc74e + name: OpcodeList3 + type_name: UNDEFINED +# This tag may be used to specify the size of raster pixel spacing in the +# model space units, when the raster space can be embedded in the model space +# coordinate system without rotation, and consists of the following 3 values: +# ModelPixelScaleTag = (ScaleX, ScaleY, ScaleZ) +# where ScaleX and ScaleY give the horizontal and vertical spacing of raster +# pixels. The ScaleZ is primarily used to map the pixel value of a digital +# elevation model into the correct Z-scale, and so for most other purposes +# this value should be zero (since most model spaces are 2-D, with Z=0). +# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1 +- id: 0x830e + name: ModelPixelScaleTag + type_name: DOUBLE +# This tag stores raster->model tiepoint pairs in the order +# ModelTiepointTag = (...,I,J,K, X,Y,Z...), +# where (I,J,K) is the point at location (I,J) in raster space with +# pixel-value K, and (X,Y,Z) is a vector in model space. In most cases the +# model space is only two-dimensional, in which case both K and Z should be +# set to zero; this third dimension is provided in anticipation of future +# support for 3D digital elevation models and vertical coordinate systems. +# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1 +- id: 0x8482 + name: ModelTiepointTag + type_name: DOUBLE +# This tag may be used to specify the transformation matrix between the +# raster space (and its dependent pixel-value space) and the (possibly 3D) +# model space. +# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1 +- id: 0x85d8 + name: ModelTransformationTag + type_name: DOUBLE +IFD/Exif/Iop: +- id: 0x0001 + name: InteroperabilityIndex + type_name: ASCII +- id: 0x0002 + name: InteroperabilityVersion + type_name: UNDEFINED +- id: 0x1000 + name: RelatedImageFileFormat + type_name: ASCII +- id: 0x1001 + name: RelatedImageWidth + type_name: LONG +- id: 0x1002 + name: RelatedImageLength + type_name: LONG +` +) diff --git a/vendor/github.com/dsoprea/go-exif/v3/testing_common.go b/vendor/github.com/dsoprea/go-exif/v3/testing_common.go new file mode 100644 index 000000000..061276430 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/testing_common.go @@ -0,0 +1,188 @@ +package exif + +import ( + "path" + "reflect" + "testing" + + "io/ioutil" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +var ( + testExifData []byte +) + +func getExifSimpleTestIb() *IfdBuilder { + defer func() { + if state := recover(); state != nil { + err := log.Wrap(state.(error)) + log.Panic(err) + } + }() + + im := exifcommon.NewIfdMapping() + + err := exifcommon.LoadStandardIfds(im) + log.PanicIf(err) + + ti := NewTagIndex() + ib := NewIfdBuilder(im, ti, exifcommon.IfdStandardIfdIdentity, exifcommon.TestDefaultByteOrder) + + err = ib.AddStandard(0x000b, "asciivalue") + log.PanicIf(err) + + err = ib.AddStandard(0x00ff, []uint16{0x1122}) + log.PanicIf(err) + + err = ib.AddStandard(0x0100, []uint32{0x33445566}) + log.PanicIf(err) + + err = ib.AddStandard(0x013e, []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}}) + log.PanicIf(err) + + return ib +} + +func getExifSimpleTestIbBytes() []byte { + defer func() { + if state := recover(); state != nil { + err := log.Wrap(state.(error)) + log.Panic(err) + } + }() + + im := exifcommon.NewIfdMapping() + + err := exifcommon.LoadStandardIfds(im) + log.PanicIf(err) + + ti := NewTagIndex() + ib := NewIfdBuilder(im, ti, exifcommon.IfdStandardIfdIdentity, exifcommon.TestDefaultByteOrder) + + err = ib.AddStandard(0x000b, "asciivalue") + log.PanicIf(err) + + err = ib.AddStandard(0x00ff, []uint16{0x1122}) + log.PanicIf(err) + + err = ib.AddStandard(0x0100, []uint32{0x33445566}) + log.PanicIf(err) + + err = ib.AddStandard(0x013e, []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}}) + log.PanicIf(err) + + ibe := NewIfdByteEncoder() + + exifData, err := ibe.EncodeToExif(ib) + log.PanicIf(err) + + return exifData +} + +func validateExifSimpleTestIb(exifData []byte, t *testing.T) { + defer func() { + if state := recover(); state != nil { + err := log.Wrap(state.(error)) + log.Panic(err) + } + }() + + im := exifcommon.NewIfdMapping() + + err := exifcommon.LoadStandardIfds(im) + log.PanicIf(err) + + ti := NewTagIndex() + + eh, index, err := Collect(im, ti, exifData) + log.PanicIf(err) + + if eh.ByteOrder != exifcommon.TestDefaultByteOrder { + t.Fatalf("EXIF byte-order is not correct: %v", eh.ByteOrder) + } else if eh.FirstIfdOffset != ExifDefaultFirstIfdOffset { + t.Fatalf("EXIF first IFD-offset not correct: (0x%02x)", eh.FirstIfdOffset) + } + + if len(index.Ifds) != 1 { + t.Fatalf("There wasn't exactly one IFD decoded: (%d)", len(index.Ifds)) + } + + ifd := index.RootIfd + + if ifd.ByteOrder() != exifcommon.TestDefaultByteOrder { + t.Fatalf("IFD byte-order not correct.") + } else if ifd.ifdIdentity.UnindexedString() != exifcommon.IfdStandardIfdIdentity.UnindexedString() { + t.Fatalf("IFD name not correct.") + } else if ifd.ifdIdentity.Index() != 0 { + t.Fatalf("IFD index not zero: (%d)", ifd.ifdIdentity.Index()) + } else if ifd.Offset() != uint32(0x0008) { + t.Fatalf("IFD offset not correct.") + } else if len(ifd.Entries()) != 4 { + t.Fatalf("IFD number of entries not correct: (%d)", len(ifd.Entries())) + } else if ifd.nextIfdOffset != uint32(0) { + t.Fatalf("Next-IFD offset is non-zero.") + } else if ifd.nextIfd != nil { + t.Fatalf("Next-IFD pointer is non-nil.") + } + + // Verify the values by using the actual, original types (this is awesome). + + expected := []struct { + tagId uint16 + value interface{} + }{ + {tagId: 0x000b, value: "asciivalue"}, + {tagId: 0x00ff, value: []uint16{0x1122}}, + {tagId: 0x0100, value: []uint32{0x33445566}}, + {tagId: 0x013e, value: []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}}}, + } + + for i, ite := range ifd.Entries() { + if ite.TagId() != expected[i].tagId { + t.Fatalf("Tag-ID for entry (%d) not correct: (0x%02x) != (0x%02x)", i, ite.TagId(), expected[i].tagId) + } + + value, err := ite.Value() + log.PanicIf(err) + + if reflect.DeepEqual(value, expected[i].value) != true { + t.Fatalf("Value for entry (%d) not correct: [%v] != [%v]", i, value, expected[i].value) + } + } +} + +func getTestImageFilepath() string { + assetsPath := exifcommon.GetTestAssetsPath() + testImageFilepath := path.Join(assetsPath, "NDM_8901.jpg") + return testImageFilepath +} + +func getTestExifData() []byte { + if testExifData == nil { + assetsPath := exifcommon.GetTestAssetsPath() + filepath := path.Join(assetsPath, "NDM_8901.jpg.exif") + + var err error + + testExifData, err = ioutil.ReadFile(filepath) + log.PanicIf(err) + } + + return testExifData +} + +func getTestGpsImageFilepath() string { + assetsPath := exifcommon.GetTestAssetsPath() + testGpsImageFilepath := path.Join(assetsPath, "gps.jpg") + return testGpsImageFilepath +} + +func getTestGeotiffFilepath() string { + assetsPath := exifcommon.GetTestAssetsPath() + testGeotiffFilepath := path.Join(assetsPath, "geotiff_example.tif") + return testGeotiffFilepath +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/README.md b/vendor/github.com/dsoprea/go-exif/v3/undefined/README.md new file mode 100644 index 000000000..d2caa6e51 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/README.md @@ -0,0 +1,4 @@ + +## 0xa40b + +The specification is not specific/clear enough to be handled. Without a working example ,we're deferring until some point in the future when either we or someone else has a better understanding. diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/accessor.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/accessor.go new file mode 100644 index 000000000..11a21e1f0 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/accessor.go @@ -0,0 +1,62 @@ +package exifundefined + +import ( + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +// Encode encodes the given encodeable undefined value to bytes. +func Encode(value EncodeableValue, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + encoderName := value.EncoderName() + + encoder, found := encoders[encoderName] + if found == false { + log.Panicf("no encoder registered for type [%s]", encoderName) + } + + encoded, unitCount, err = encoder.Encode(value, byteOrder) + log.PanicIf(err) + + return encoded, unitCount, nil +} + +// Decode constructs a value from raw encoded bytes +func Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + uth := UndefinedTagHandle{ + IfdPath: valueContext.IfdPath(), + TagId: valueContext.TagId(), + } + + decoder, found := decoders[uth] + if found == false { + // We have no choice but to return the error. We have no way of knowing how + // much data there is without already knowing what data-type this tag is. + return nil, exifcommon.ErrUnhandledUndefinedTypedTag + } + + value, err = decoder.Decode(valueContext) + if err != nil { + if err == ErrUnparseableValue { + return nil, err + } + + log.Panic(err) + } + + return value, nil +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_8828_oecf.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_8828_oecf.go new file mode 100644 index 000000000..26f3675ab --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_8828_oecf.go @@ -0,0 +1,148 @@ +package exifundefined + +import ( + "bytes" + "fmt" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type Tag8828Oecf struct { + Columns uint16 + Rows uint16 + ColumnNames []string + Values []exifcommon.SignedRational +} + +func (oecf Tag8828Oecf) String() string { + return fmt.Sprintf("Tag8828Oecf", oecf.Columns, oecf.Rows) +} + +func (oecf Tag8828Oecf) EncoderName() string { + return "Codec8828Oecf" +} + +type Codec8828Oecf struct { +} + +func (Codec8828Oecf) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + oecf, ok := value.(Tag8828Oecf) + if ok == false { + log.Panicf("can only encode a Tag8828Oecf") + } + + b := new(bytes.Buffer) + + err = binary.Write(b, byteOrder, oecf.Columns) + log.PanicIf(err) + + err = binary.Write(b, byteOrder, oecf.Rows) + log.PanicIf(err) + + for _, name := range oecf.ColumnNames { + _, err := b.Write([]byte(name)) + log.PanicIf(err) + + _, err = b.Write([]byte{0}) + log.PanicIf(err) + } + + ve := exifcommon.NewValueEncoder(byteOrder) + + ed, err := ve.Encode(oecf.Values) + log.PanicIf(err) + + _, err = b.Write(ed.Encoded) + log.PanicIf(err) + + return b.Bytes(), uint32(b.Len()), nil +} + +func (Codec8828Oecf) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test using known good data. + + valueContext.SetUndefinedValueType(exifcommon.TypeByte) + + valueBytes, err := valueContext.ReadBytes() + log.PanicIf(err) + + oecf := Tag8828Oecf{} + + oecf.Columns = valueContext.ByteOrder().Uint16(valueBytes[0:2]) + oecf.Rows = valueContext.ByteOrder().Uint16(valueBytes[2:4]) + + columnNames := make([]string, oecf.Columns) + + // startAt is where the current column name starts. + startAt := 4 + + // offset is our current position. + offset := startAt + + currentColumnNumber := uint16(0) + + for currentColumnNumber < oecf.Columns { + if valueBytes[offset] == 0 { + columnName := string(valueBytes[startAt:offset]) + if len(columnName) == 0 { + log.Panicf("SFR column (%d) has zero length", currentColumnNumber) + } + + columnNames[currentColumnNumber] = columnName + currentColumnNumber++ + + offset++ + startAt = offset + continue + } + + offset++ + } + + oecf.ColumnNames = columnNames + + rawRationalBytes := valueBytes[offset:] + + rationalSize := exifcommon.TypeSignedRational.Size() + if len(rawRationalBytes)%rationalSize > 0 { + log.Panicf("OECF signed-rationals not aligned: (%d) %% (%d) > 0", len(rawRationalBytes), rationalSize) + } + + rationalCount := len(rawRationalBytes) / rationalSize + + parser := new(exifcommon.Parser) + + byteOrder := valueContext.ByteOrder() + + items, err := parser.ParseSignedRationals(rawRationalBytes, uint32(rationalCount), byteOrder) + log.PanicIf(err) + + oecf.Values = items + + return oecf, nil +} + +func init() { + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0x8828, + Codec8828Oecf{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9000_exif_version.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9000_exif_version.go new file mode 100644 index 000000000..8f18c8114 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9000_exif_version.go @@ -0,0 +1,69 @@ +package exifundefined + +import ( + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type Tag9000ExifVersion struct { + ExifVersion string +} + +func (Tag9000ExifVersion) EncoderName() string { + return "Codec9000ExifVersion" +} + +func (ev Tag9000ExifVersion) String() string { + return ev.ExifVersion +} + +type Codec9000ExifVersion struct { +} + +func (Codec9000ExifVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + s, ok := value.(Tag9000ExifVersion) + if ok == false { + log.Panicf("can only encode a Tag9000ExifVersion") + } + + return []byte(s.ExifVersion), uint32(len(s.ExifVersion)), nil +} + +func (Codec9000ExifVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) + + valueString, err := valueContext.ReadAsciiNoNul() + log.PanicIf(err) + + ev := Tag9000ExifVersion{ + ExifVersion: valueString, + } + + return ev, nil +} + +func init() { + registerEncoder( + Tag9000ExifVersion{}, + Codec9000ExifVersion{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0x9000, + Codec9000ExifVersion{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9101_components_configuration.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9101_components_configuration.go new file mode 100644 index 000000000..e357fe0a6 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9101_components_configuration.go @@ -0,0 +1,124 @@ +package exifundefined + +import ( + "bytes" + "fmt" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +const ( + TagUndefinedType_9101_ComponentsConfiguration_Channel_Y = 0x1 + TagUndefinedType_9101_ComponentsConfiguration_Channel_Cb = 0x2 + TagUndefinedType_9101_ComponentsConfiguration_Channel_Cr = 0x3 + TagUndefinedType_9101_ComponentsConfiguration_Channel_R = 0x4 + TagUndefinedType_9101_ComponentsConfiguration_Channel_G = 0x5 + TagUndefinedType_9101_ComponentsConfiguration_Channel_B = 0x6 +) + +const ( + TagUndefinedType_9101_ComponentsConfiguration_OTHER = iota + TagUndefinedType_9101_ComponentsConfiguration_RGB = iota + TagUndefinedType_9101_ComponentsConfiguration_YCBCR = iota +) + +var ( + TagUndefinedType_9101_ComponentsConfiguration_Names = map[int]string{ + TagUndefinedType_9101_ComponentsConfiguration_OTHER: "OTHER", + TagUndefinedType_9101_ComponentsConfiguration_RGB: "RGB", + TagUndefinedType_9101_ComponentsConfiguration_YCBCR: "YCBCR", + } + + TagUndefinedType_9101_ComponentsConfiguration_Configurations = map[int][]byte{ + TagUndefinedType_9101_ComponentsConfiguration_RGB: { + TagUndefinedType_9101_ComponentsConfiguration_Channel_R, + TagUndefinedType_9101_ComponentsConfiguration_Channel_G, + TagUndefinedType_9101_ComponentsConfiguration_Channel_B, + 0, + }, + + TagUndefinedType_9101_ComponentsConfiguration_YCBCR: { + TagUndefinedType_9101_ComponentsConfiguration_Channel_Y, + TagUndefinedType_9101_ComponentsConfiguration_Channel_Cb, + TagUndefinedType_9101_ComponentsConfiguration_Channel_Cr, + 0, + }, + } +) + +type TagExif9101ComponentsConfiguration struct { + ConfigurationId int + ConfigurationBytes []byte +} + +func (TagExif9101ComponentsConfiguration) EncoderName() string { + return "CodecExif9101ComponentsConfiguration" +} + +func (cc TagExif9101ComponentsConfiguration) String() string { + return fmt.Sprintf("Exif9101ComponentsConfiguration", TagUndefinedType_9101_ComponentsConfiguration_Names[cc.ConfigurationId], cc.ConfigurationBytes) +} + +type CodecExif9101ComponentsConfiguration struct { +} + +func (CodecExif9101ComponentsConfiguration) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + cc, ok := value.(TagExif9101ComponentsConfiguration) + if ok == false { + log.Panicf("can only encode a TagExif9101ComponentsConfiguration") + } + + return cc.ConfigurationBytes, uint32(len(cc.ConfigurationBytes)), nil +} + +func (CodecExif9101ComponentsConfiguration) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeByte) + + valueBytes, err := valueContext.ReadBytes() + log.PanicIf(err) + + for configurationId, configurationBytes := range TagUndefinedType_9101_ComponentsConfiguration_Configurations { + if bytes.Equal(configurationBytes, valueBytes) == true { + cc := TagExif9101ComponentsConfiguration{ + ConfigurationId: configurationId, + ConfigurationBytes: valueBytes, + } + + return cc, nil + } + } + + cc := TagExif9101ComponentsConfiguration{ + ConfigurationId: TagUndefinedType_9101_ComponentsConfiguration_OTHER, + ConfigurationBytes: valueBytes, + } + + return cc, nil +} + +func init() { + registerEncoder( + TagExif9101ComponentsConfiguration{}, + CodecExif9101ComponentsConfiguration{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0x9101, + CodecExif9101ComponentsConfiguration{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_927C_maker_note.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_927C_maker_note.go new file mode 100644 index 000000000..f9cd2788e --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_927C_maker_note.go @@ -0,0 +1,114 @@ +package exifundefined + +import ( + "fmt" + "strings" + + "crypto/sha1" + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type Tag927CMakerNote struct { + MakerNoteType []byte + MakerNoteBytes []byte +} + +func (Tag927CMakerNote) EncoderName() string { + return "Codec927CMakerNote" +} + +func (mn Tag927CMakerNote) String() string { + parts := make([]string, len(mn.MakerNoteType)) + + for i, c := range mn.MakerNoteType { + parts[i] = fmt.Sprintf("%02x", c) + } + + h := sha1.New() + + _, err := h.Write(mn.MakerNoteBytes) + log.PanicIf(err) + + digest := h.Sum(nil) + + return fmt.Sprintf("MakerNote", strings.Join(parts, " "), len(mn.MakerNoteBytes), digest) +} + +type Codec927CMakerNote struct { +} + +func (Codec927CMakerNote) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + mn, ok := value.(Tag927CMakerNote) + if ok == false { + log.Panicf("can only encode a Tag927CMakerNote") + } + + // TODO(dustin): Confirm this size against the specification. + + return mn.MakerNoteBytes, uint32(len(mn.MakerNoteBytes)), nil +} + +func (Codec927CMakerNote) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // MakerNote + // TODO(dustin): !! This is the Wild Wild West. This very well might be a child IFD, but any and all OEM's define their own formats. If we're going to be writing changes and this is complete EXIF (which may not have the first eight bytes), it might be fine. However, if these are just IFDs they'll be relative to the main EXIF, this will invalidate the MakerNote data for IFDs and any other implementations that use offsets unless we can interpret them all. It be best to return to this later and just exclude this from being written for now, though means a loss of a wealth of image metadata. + // -> We can also just blindly try to interpret as an IFD and just validate that it's looks good (maybe it will even have a 'next ifd' pointer that we can validate is 0x0). + + valueContext.SetUndefinedValueType(exifcommon.TypeByte) + + valueBytes, err := valueContext.ReadBytes() + log.PanicIf(err) + + // TODO(dustin): Doesn't work, but here as an example. + // ie := NewIfdEnumerate(valueBytes, byteOrder) + + // // TODO(dustin): !! Validate types (might have proprietary types, but it might be worth splitting the list between valid and not valid; maybe fail if a certain proportion are invalid, or maybe aren't less then a certain small integer)? + // ii, err := ie.Collect(0x0) + + // for _, entry := range ii.RootIfd.Entries { + // fmt.Printf("ENTRY: 0x%02x %d\n", entry.TagId, entry.TagType) + // } + + var makerNoteType []byte + if len(valueBytes) >= 20 { + makerNoteType = valueBytes[:20] + } else { + makerNoteType = valueBytes + } + + mn := Tag927CMakerNote{ + MakerNoteType: makerNoteType, + + // MakerNoteBytes has the whole length of bytes. There's always + // the chance that the first 20 bytes includes actual data. + MakerNoteBytes: valueBytes, + } + + return mn, nil +} + +func init() { + registerEncoder( + Tag927CMakerNote{}, + Codec927CMakerNote{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0x927c, + Codec927CMakerNote{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9286_user_comment.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9286_user_comment.go new file mode 100644 index 000000000..320edc145 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9286_user_comment.go @@ -0,0 +1,142 @@ +package exifundefined + +import ( + "bytes" + "fmt" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +var ( + exif9286Logger = log.NewLogger("exifundefined.exif_9286_user_comment") +) + +const ( + TagUndefinedType_9286_UserComment_Encoding_ASCII = iota + TagUndefinedType_9286_UserComment_Encoding_JIS = iota + TagUndefinedType_9286_UserComment_Encoding_UNICODE = iota + TagUndefinedType_9286_UserComment_Encoding_UNDEFINED = iota +) + +var ( + TagUndefinedType_9286_UserComment_Encoding_Names = map[int]string{ + TagUndefinedType_9286_UserComment_Encoding_ASCII: "ASCII", + TagUndefinedType_9286_UserComment_Encoding_JIS: "JIS", + TagUndefinedType_9286_UserComment_Encoding_UNICODE: "UNICODE", + TagUndefinedType_9286_UserComment_Encoding_UNDEFINED: "UNDEFINED", + } + + TagUndefinedType_9286_UserComment_Encodings = map[int][]byte{ + TagUndefinedType_9286_UserComment_Encoding_ASCII: {'A', 'S', 'C', 'I', 'I', 0, 0, 0}, + TagUndefinedType_9286_UserComment_Encoding_JIS: {'J', 'I', 'S', 0, 0, 0, 0, 0}, + TagUndefinedType_9286_UserComment_Encoding_UNICODE: {'U', 'n', 'i', 'c', 'o', 'd', 'e', 0}, + TagUndefinedType_9286_UserComment_Encoding_UNDEFINED: {0, 0, 0, 0, 0, 0, 0, 0}, + } +) + +type Tag9286UserComment struct { + EncodingType int + EncodingBytes []byte +} + +func (Tag9286UserComment) EncoderName() string { + return "Codec9286UserComment" +} + +func (uc Tag9286UserComment) String() string { + var valuePhrase string + + if uc.EncodingType == TagUndefinedType_9286_UserComment_Encoding_ASCII { + return fmt.Sprintf("[ASCII] %s", string(uc.EncodingBytes)) + } else { + if len(uc.EncodingBytes) <= 8 { + valuePhrase = fmt.Sprintf("%v", uc.EncodingBytes) + } else { + valuePhrase = fmt.Sprintf("%v...", uc.EncodingBytes[:8]) + } + } + + return fmt.Sprintf("UserComment", len(uc.EncodingBytes), TagUndefinedType_9286_UserComment_Encoding_Names[uc.EncodingType], valuePhrase, len(uc.EncodingBytes)) +} + +type Codec9286UserComment struct { +} + +func (Codec9286UserComment) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + uc, ok := value.(Tag9286UserComment) + if ok == false { + log.Panicf("can only encode a Tag9286UserComment") + } + + encodingTypeBytes, found := TagUndefinedType_9286_UserComment_Encodings[uc.EncodingType] + if found == false { + log.Panicf("encoding-type not valid for unknown-type tag 9286 (UserComment): (%d)", uc.EncodingType) + } + + encoded = make([]byte, len(uc.EncodingBytes)+8) + + copy(encoded[:8], encodingTypeBytes) + copy(encoded[8:], uc.EncodingBytes) + + // TODO(dustin): Confirm this size against the specification. + + return encoded, uint32(len(encoded)), nil +} + +func (Codec9286UserComment) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeByte) + + valueBytes, err := valueContext.ReadBytes() + log.PanicIf(err) + + if len(valueBytes) < 8 { + return nil, ErrUnparseableValue + } + + unknownUc := Tag9286UserComment{ + EncodingType: TagUndefinedType_9286_UserComment_Encoding_UNDEFINED, + EncodingBytes: []byte{}, + } + + encoding := valueBytes[:8] + for encodingIndex, encodingBytes := range TagUndefinedType_9286_UserComment_Encodings { + if bytes.Compare(encoding, encodingBytes) == 0 { + uc := Tag9286UserComment{ + EncodingType: encodingIndex, + EncodingBytes: valueBytes[8:], + } + + return uc, nil + } + } + + exif9286Logger.Warningf(nil, "User-comment encoding not valid. Returning 'unknown' type (the default).") + return unknownUc, nil +} + +func init() { + registerEncoder( + Tag9286UserComment{}, + Codec9286UserComment{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0x9286, + Codec9286UserComment{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A000_flashpix_version.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A000_flashpix_version.go new file mode 100644 index 000000000..4a0fefad7 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A000_flashpix_version.go @@ -0,0 +1,69 @@ +package exifundefined + +import ( + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type TagA000FlashpixVersion struct { + FlashpixVersion string +} + +func (TagA000FlashpixVersion) EncoderName() string { + return "CodecA000FlashpixVersion" +} + +func (fv TagA000FlashpixVersion) String() string { + return fv.FlashpixVersion +} + +type CodecA000FlashpixVersion struct { +} + +func (CodecA000FlashpixVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + s, ok := value.(TagA000FlashpixVersion) + if ok == false { + log.Panicf("can only encode a TagA000FlashpixVersion") + } + + return []byte(s.FlashpixVersion), uint32(len(s.FlashpixVersion)), nil +} + +func (CodecA000FlashpixVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) + + valueString, err := valueContext.ReadAsciiNoNul() + log.PanicIf(err) + + fv := TagA000FlashpixVersion{ + FlashpixVersion: valueString, + } + + return fv, nil +} + +func init() { + registerEncoder( + TagA000FlashpixVersion{}, + CodecA000FlashpixVersion{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0xa000, + CodecA000FlashpixVersion{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A20C_spatial_frequency_response.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A20C_spatial_frequency_response.go new file mode 100644 index 000000000..0311175d6 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A20C_spatial_frequency_response.go @@ -0,0 +1,160 @@ +package exifundefined + +import ( + "bytes" + "fmt" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type TagA20CSpatialFrequencyResponse struct { + Columns uint16 + Rows uint16 + ColumnNames []string + Values []exifcommon.Rational +} + +func (TagA20CSpatialFrequencyResponse) EncoderName() string { + return "CodecA20CSpatialFrequencyResponse" +} + +func (sfr TagA20CSpatialFrequencyResponse) String() string { + return fmt.Sprintf("CodecA20CSpatialFrequencyResponse", sfr.Columns, sfr.Rows) +} + +type CodecA20CSpatialFrequencyResponse struct { +} + +func (CodecA20CSpatialFrequencyResponse) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test. + + sfr, ok := value.(TagA20CSpatialFrequencyResponse) + if ok == false { + log.Panicf("can only encode a TagA20CSpatialFrequencyResponse") + } + + b := new(bytes.Buffer) + + err = binary.Write(b, byteOrder, sfr.Columns) + log.PanicIf(err) + + err = binary.Write(b, byteOrder, sfr.Rows) + log.PanicIf(err) + + // Write columns. + + for _, name := range sfr.ColumnNames { + _, err := b.WriteString(name) + log.PanicIf(err) + + err = b.WriteByte(0) + log.PanicIf(err) + } + + // Write values. + + ve := exifcommon.NewValueEncoder(byteOrder) + + ed, err := ve.Encode(sfr.Values) + log.PanicIf(err) + + _, err = b.Write(ed.Encoded) + log.PanicIf(err) + + encoded = b.Bytes() + + // TODO(dustin): Confirm this size against the specification. + + return encoded, uint32(len(encoded)), nil +} + +func (CodecA20CSpatialFrequencyResponse) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test using known good data. + + byteOrder := valueContext.ByteOrder() + + valueContext.SetUndefinedValueType(exifcommon.TypeByte) + + valueBytes, err := valueContext.ReadBytes() + log.PanicIf(err) + + sfr := TagA20CSpatialFrequencyResponse{} + + sfr.Columns = byteOrder.Uint16(valueBytes[0:2]) + sfr.Rows = byteOrder.Uint16(valueBytes[2:4]) + + columnNames := make([]string, sfr.Columns) + + // startAt is where the current column name starts. + startAt := 4 + + // offset is our current position. + offset := 4 + + currentColumnNumber := uint16(0) + + for currentColumnNumber < sfr.Columns { + if valueBytes[offset] == 0 { + columnName := string(valueBytes[startAt:offset]) + if len(columnName) == 0 { + log.Panicf("SFR column (%d) has zero length", currentColumnNumber) + } + + columnNames[currentColumnNumber] = columnName + currentColumnNumber++ + + offset++ + startAt = offset + continue + } + + offset++ + } + + sfr.ColumnNames = columnNames + + rawRationalBytes := valueBytes[offset:] + + rationalSize := exifcommon.TypeRational.Size() + if len(rawRationalBytes)%rationalSize > 0 { + log.Panicf("SFR rationals not aligned: (%d) %% (%d) > 0", len(rawRationalBytes), rationalSize) + } + + rationalCount := len(rawRationalBytes) / rationalSize + + parser := new(exifcommon.Parser) + + items, err := parser.ParseRationals(rawRationalBytes, uint32(rationalCount), byteOrder) + log.PanicIf(err) + + sfr.Values = items + + return sfr, nil +} + +func init() { + registerEncoder( + TagA20CSpatialFrequencyResponse{}, + CodecA20CSpatialFrequencyResponse{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0xa20c, + CodecA20CSpatialFrequencyResponse{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A300_file_source.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A300_file_source.go new file mode 100644 index 000000000..f4f3a49f9 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A300_file_source.go @@ -0,0 +1,79 @@ +package exifundefined + +import ( + "fmt" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type TagExifA300FileSource uint32 + +func (TagExifA300FileSource) EncoderName() string { + return "CodecExifA300FileSource" +} + +func (af TagExifA300FileSource) String() string { + return fmt.Sprintf("0x%08x", uint32(af)) +} + +const ( + TagUndefinedType_A300_SceneType_Others TagExifA300FileSource = 0 + TagUndefinedType_A300_SceneType_ScannerOfTransparentType TagExifA300FileSource = 1 + TagUndefinedType_A300_SceneType_ScannerOfReflexType TagExifA300FileSource = 2 + TagUndefinedType_A300_SceneType_Dsc TagExifA300FileSource = 3 +) + +type CodecExifA300FileSource struct { +} + +func (CodecExifA300FileSource) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + st, ok := value.(TagExifA300FileSource) + if ok == false { + log.Panicf("can only encode a TagExifA300FileSource") + } + + ve := exifcommon.NewValueEncoder(byteOrder) + + ed, err := ve.Encode([]uint32{uint32(st)}) + log.PanicIf(err) + + // TODO(dustin): Confirm this size against the specification. It's non-specific about what type it is, but it looks to be no more than a single integer scalar. So, we're assuming it's a LONG. + + return ed.Encoded, 1, nil +} + +func (CodecExifA300FileSource) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeLong) + + valueLongs, err := valueContext.ReadLongs() + log.PanicIf(err) + + return TagExifA300FileSource(valueLongs[0]), nil +} + +func init() { + registerEncoder( + TagExifA300FileSource(0), + CodecExifA300FileSource{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0xa300, + CodecExifA300FileSource{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A301_scene_type.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A301_scene_type.go new file mode 100644 index 000000000..a29fd7673 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A301_scene_type.go @@ -0,0 +1,76 @@ +package exifundefined + +import ( + "fmt" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type TagExifA301SceneType uint32 + +func (TagExifA301SceneType) EncoderName() string { + return "CodecExifA301SceneType" +} + +func (st TagExifA301SceneType) String() string { + return fmt.Sprintf("0x%08x", uint32(st)) +} + +const ( + TagUndefinedType_A301_SceneType_DirectlyPhotographedImage TagExifA301SceneType = 1 +) + +type CodecExifA301SceneType struct { +} + +func (CodecExifA301SceneType) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + st, ok := value.(TagExifA301SceneType) + if ok == false { + log.Panicf("can only encode a TagExif9101ComponentsConfiguration") + } + + ve := exifcommon.NewValueEncoder(byteOrder) + + ed, err := ve.Encode([]uint32{uint32(st)}) + log.PanicIf(err) + + // TODO(dustin): Confirm this size against the specification. It's non-specific about what type it is, but it looks to be no more than a single integer scalar. So, we're assuming it's a LONG. + + return ed.Encoded, uint32(int(ed.UnitCount)), nil +} + +func (CodecExifA301SceneType) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeLong) + + valueLongs, err := valueContext.ReadLongs() + log.PanicIf(err) + + return TagExifA301SceneType(valueLongs[0]), nil +} + +func init() { + registerEncoder( + TagExifA301SceneType(0), + CodecExifA301SceneType{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0xa301, + CodecExifA301SceneType{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A302_cfa_pattern.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A302_cfa_pattern.go new file mode 100644 index 000000000..88976296d --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A302_cfa_pattern.go @@ -0,0 +1,97 @@ +package exifundefined + +import ( + "bytes" + "fmt" + + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type TagA302CfaPattern struct { + HorizontalRepeat uint16 + VerticalRepeat uint16 + CfaValue []byte +} + +func (TagA302CfaPattern) EncoderName() string { + return "CodecA302CfaPattern" +} + +func (cp TagA302CfaPattern) String() string { + return fmt.Sprintf("TagA302CfaPattern", cp.HorizontalRepeat, cp.VerticalRepeat, len(cp.CfaValue)) +} + +type CodecA302CfaPattern struct { +} + +func (CodecA302CfaPattern) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test. + + cp, ok := value.(TagA302CfaPattern) + if ok == false { + log.Panicf("can only encode a TagA302CfaPattern") + } + + b := new(bytes.Buffer) + + err = binary.Write(b, byteOrder, cp.HorizontalRepeat) + log.PanicIf(err) + + err = binary.Write(b, byteOrder, cp.VerticalRepeat) + log.PanicIf(err) + + _, err = b.Write(cp.CfaValue) + log.PanicIf(err) + + encoded = b.Bytes() + + // TODO(dustin): Confirm this size against the specification. + + return encoded, uint32(len(encoded)), nil +} + +func (CodecA302CfaPattern) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test using known good data. + + valueContext.SetUndefinedValueType(exifcommon.TypeByte) + + valueBytes, err := valueContext.ReadBytes() + log.PanicIf(err) + + cp := TagA302CfaPattern{} + + cp.HorizontalRepeat = valueContext.ByteOrder().Uint16(valueBytes[0:2]) + cp.VerticalRepeat = valueContext.ByteOrder().Uint16(valueBytes[2:4]) + + expectedLength := int(cp.HorizontalRepeat * cp.VerticalRepeat) + cp.CfaValue = valueBytes[4 : 4+expectedLength] + + return cp, nil +} + +func init() { + registerEncoder( + TagA302CfaPattern{}, + CodecA302CfaPattern{}) + + registerDecoder( + exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), + 0xa302, + CodecA302CfaPattern{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_iop_0002_interop_version.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_iop_0002_interop_version.go new file mode 100644 index 000000000..09ec98703 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_iop_0002_interop_version.go @@ -0,0 +1,69 @@ +package exifundefined + +import ( + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type Tag0002InteropVersion struct { + InteropVersion string +} + +func (Tag0002InteropVersion) EncoderName() string { + return "Codec0002InteropVersion" +} + +func (iv Tag0002InteropVersion) String() string { + return iv.InteropVersion +} + +type Codec0002InteropVersion struct { +} + +func (Codec0002InteropVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + s, ok := value.(Tag0002InteropVersion) + if ok == false { + log.Panicf("can only encode a Tag0002InteropVersion") + } + + return []byte(s.InteropVersion), uint32(len(s.InteropVersion)), nil +} + +func (Codec0002InteropVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) + + valueString, err := valueContext.ReadAsciiNoNul() + log.PanicIf(err) + + iv := Tag0002InteropVersion{ + InteropVersion: valueString, + } + + return iv, nil +} + +func init() { + registerEncoder( + Tag0002InteropVersion{}, + Codec0002InteropVersion{}) + + registerDecoder( + exifcommon.IfdExifIopStandardIfdIdentity.UnindexedString(), + 0x0002, + Codec0002InteropVersion{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001B_gps_processing_method.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001B_gps_processing_method.go new file mode 100644 index 000000000..6f54d2fc6 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001B_gps_processing_method.go @@ -0,0 +1,65 @@ +package exifundefined + +import ( + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type Tag001BGPSProcessingMethod struct { + string +} + +func (Tag001BGPSProcessingMethod) EncoderName() string { + return "Codec001BGPSProcessingMethod" +} + +func (gpm Tag001BGPSProcessingMethod) String() string { + return gpm.string +} + +type Codec001BGPSProcessingMethod struct { +} + +func (Codec001BGPSProcessingMethod) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + s, ok := value.(Tag001BGPSProcessingMethod) + if ok == false { + log.Panicf("can only encode a Tag001BGPSProcessingMethod") + } + + return []byte(s.string), uint32(len(s.string)), nil +} + +func (Codec001BGPSProcessingMethod) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) + + valueString, err := valueContext.ReadAsciiNoNul() + log.PanicIf(err) + + return Tag001BGPSProcessingMethod{valueString}, nil +} + +func init() { + registerEncoder( + Tag001BGPSProcessingMethod{}, + Codec001BGPSProcessingMethod{}) + + registerDecoder( + exifcommon.IfdGpsInfoStandardIfdIdentity.UnindexedString(), + 0x001b, + Codec001BGPSProcessingMethod{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001C_gps_area_information.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001C_gps_area_information.go new file mode 100644 index 000000000..ffdeb905b --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001C_gps_area_information.go @@ -0,0 +1,65 @@ +package exifundefined + +import ( + "encoding/binary" + + "github.com/dsoprea/go-logging" + + "github.com/dsoprea/go-exif/v3/common" +) + +type Tag001CGPSAreaInformation struct { + string +} + +func (Tag001CGPSAreaInformation) EncoderName() string { + return "Codec001CGPSAreaInformation" +} + +func (gai Tag001CGPSAreaInformation) String() string { + return gai.string +} + +type Codec001CGPSAreaInformation struct { +} + +func (Codec001CGPSAreaInformation) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + s, ok := value.(Tag001CGPSAreaInformation) + if ok == false { + log.Panicf("can only encode a Tag001CGPSAreaInformation") + } + + return []byte(s.string), uint32(len(s.string)), nil +} + +func (Codec001CGPSAreaInformation) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) + + valueString, err := valueContext.ReadAsciiNoNul() + log.PanicIf(err) + + return Tag001CGPSAreaInformation{valueString}, nil +} + +func init() { + registerEncoder( + Tag001CGPSAreaInformation{}, + Codec001CGPSAreaInformation{}) + + registerDecoder( + exifcommon.IfdGpsInfoStandardIfdIdentity.UnindexedString(), + 0x001c, + Codec001CGPSAreaInformation{}) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/registration.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/registration.go new file mode 100644 index 000000000..cccc20a82 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/registration.go @@ -0,0 +1,42 @@ +package exifundefined + +import ( + "github.com/dsoprea/go-logging" +) + +// UndefinedTagHandle defines one undefined-type tag with a corresponding +// decoder. +type UndefinedTagHandle struct { + IfdPath string + TagId uint16 +} + +func registerEncoder(entity EncodeableValue, encoder UndefinedValueEncoder) { + typeName := entity.EncoderName() + + _, found := encoders[typeName] + if found == true { + log.Panicf("encoder already registered: %v", typeName) + } + + encoders[typeName] = encoder +} + +func registerDecoder(ifdPath string, tagId uint16, decoder UndefinedValueDecoder) { + uth := UndefinedTagHandle{ + IfdPath: ifdPath, + TagId: tagId, + } + + _, found := decoders[uth] + if found == true { + log.Panicf("decoder already registered: %v", uth) + } + + decoders[uth] = decoder +} + +var ( + encoders = make(map[string]UndefinedValueEncoder) + decoders = make(map[UndefinedTagHandle]UndefinedValueDecoder) +) diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/type.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/type.go new file mode 100644 index 000000000..ff6ac2b4c --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/undefined/type.go @@ -0,0 +1,44 @@ +package exifundefined + +import ( + "errors" + + "encoding/binary" + + "github.com/dsoprea/go-exif/v3/common" +) + +const ( + // UnparseableUnknownTagValuePlaceholder is the string to use for an unknown + // undefined tag. + UnparseableUnknownTagValuePlaceholder = "!UNKNOWN" + + // UnparseableHandledTagValuePlaceholder is the string to use for a known + // value that is not parseable. + UnparseableHandledTagValuePlaceholder = "!MALFORMED" +) + +var ( + // ErrUnparseableValue is the error for a value that we should have been + // able to parse but were not able to. + ErrUnparseableValue = errors.New("unparseable undefined tag") +) + +// UndefinedValueEncoder knows how to encode an undefined-type tag's value to +// bytes. +type UndefinedValueEncoder interface { + Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) +} + +// EncodeableValue wraps a value with the information that will be needed to re- +// encode it later. +type EncodeableValue interface { + EncoderName() string + String() string +} + +// UndefinedValueDecoder knows how to decode an undefined-type tag's value from +// bytes. +type UndefinedValueDecoder interface { + Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) +} diff --git a/vendor/github.com/dsoprea/go-exif/v3/utility.go b/vendor/github.com/dsoprea/go-exif/v3/utility.go new file mode 100644 index 000000000..f0b5e6383 --- /dev/null +++ b/vendor/github.com/dsoprea/go-exif/v3/utility.go @@ -0,0 +1,237 @@ +package exif + +import ( + "fmt" + "io" + "math" + + "github.com/dsoprea/go-logging" + "github.com/dsoprea/go-utility/v2/filesystem" + + "github.com/dsoprea/go-exif/v3/common" + "github.com/dsoprea/go-exif/v3/undefined" +) + +var ( + utilityLogger = log.NewLogger("exif.utility") +) + +// ExifTag is one simple representation of a tag in a flat list of all of them. +type ExifTag struct { + // IfdPath is the fully-qualified IFD path (even though it is not named as + // such). + IfdPath string `json:"ifd_path"` + + // TagId is the tag-ID. + TagId uint16 `json:"id"` + + // TagName is the tag-name. This is never empty. + TagName string `json:"name"` + + // UnitCount is the recorded number of units constution of the value. + UnitCount uint32 `json:"unit_count"` + + // TagTypeId is the type-ID. + TagTypeId exifcommon.TagTypePrimitive `json:"type_id"` + + // TagTypeName is the type name. + TagTypeName string `json:"type_name"` + + // Value is the decoded value. + Value interface{} `json:"value"` + + // ValueBytes is the raw, encoded value. + ValueBytes []byte `json:"value_bytes"` + + // Formatted is the human representation of the first value (tag values are + // always an array). + FormattedFirst string `json:"formatted_first"` + + // Formatted is the human representation of the complete value. + Formatted string `json:"formatted"` + + // ChildIfdPath is the name of the child IFD this tag represents (if it + // represents any). Otherwise, this is empty. + ChildIfdPath string `json:"child_ifd_path"` +} + +// String returns a string representation. +func (et ExifTag) String() string { + return fmt.Sprintf( + "ExifTag<"+ + "IFD-PATH=[%s] "+ + "TAG-ID=(0x%02x) "+ + "TAG-NAME=[%s] "+ + "TAG-TYPE=[%s] "+ + "VALUE=[%v] "+ + "VALUE-BYTES=(%d) "+ + "CHILD-IFD-PATH=[%s]", + et.IfdPath, et.TagId, et.TagName, et.TagTypeName, et.FormattedFirst, + len(et.ValueBytes), et.ChildIfdPath) +} + +// GetFlatExifData returns a simple, flat representation of all tags. +func GetFlatExifData(exifData []byte, so *ScanOptions) (exifTags []ExifTag, med *MiscellaneousExifData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + sb := rifs.NewSeekableBufferWithBytes(exifData) + + exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(sb, so, false) + log.PanicIf(err) + + return exifTags, med, nil +} + +// RELEASE(dustin): GetFlatExifDataUniversalSearch is a kludge to allow univeral tag searching in a backwards-compatible manner. For the next release, undo this and simply add the flag to GetFlatExifData. + +// GetFlatExifDataUniversalSearch returns a simple, flat representation of all +// tags. +func GetFlatExifDataUniversalSearch(exifData []byte, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + sb := rifs.NewSeekableBufferWithBytes(exifData) + + exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(sb, so, doUniversalSearch) + log.PanicIf(err) + + return exifTags, med, nil +} + +// RELEASE(dustin): GetFlatExifDataUniversalSearchWithReadSeeker is a kludge to allow using a ReadSeeker in a backwards-compatible manner. For the next release, drop this and refactor GetFlatExifDataUniversalSearch to take a ReadSeeker. + +// GetFlatExifDataUniversalSearchWithReadSeeker returns a simple, flat +// representation of all tags given a ReadSeeker. +func GetFlatExifDataUniversalSearchWithReadSeeker(rs io.ReadSeeker, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(rs, so, doUniversalSearch) + log.PanicIf(err) + + return exifTags, med, nil +} + +// getFlatExifDataUniversalSearchWithReadSeeker returns a simple, flat +// representation of all tags given a ReadSeeker. +func getFlatExifDataUniversalSearchWithReadSeeker(rs io.ReadSeeker, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + headerData := make([]byte, ExifSignatureLength) + if _, err = io.ReadFull(rs, headerData); err != nil { + if err == io.EOF { + return nil, nil, err + } + + log.Panic(err) + } + + eh, err := ParseExifHeader(headerData) + log.PanicIf(err) + + im, err := exifcommon.NewIfdMappingWithStandard() + log.PanicIf(err) + + ti := NewTagIndex() + + if doUniversalSearch == true { + ti.SetUniversalSearch(true) + } + + ebs := NewExifReadSeeker(rs) + ie := NewIfdEnumerate(im, ti, ebs, eh.ByteOrder) + + exifTags = make([]ExifTag, 0) + + visitor := func(ite *IfdTagEntry) (err error) { + // This encodes down to base64. Since this an example tool and we do not + // expect to ever decode the output, we are not worried about + // specifically base64-encoding it in order to have a measure of + // control. + valueBytes, err := ite.GetRawBytes() + if err != nil { + if err == exifundefined.ErrUnparseableValue { + return nil + } + + log.Panic(err) + } + + value, err := ite.Value() + if err != nil { + if err == exifcommon.ErrUnhandledUndefinedTypedTag { + value = exifundefined.UnparseableUnknownTagValuePlaceholder + } else if log.Is(err, exifcommon.ErrParseFail) == true { + utilityLogger.Warningf(nil, + "Could not parse value for tag [%s] (%04x) [%s].", + ite.IfdPath(), ite.TagId(), ite.TagName()) + + return nil + } else { + log.Panic(err) + } + } + + et := ExifTag{ + IfdPath: ite.IfdPath(), + TagId: ite.TagId(), + TagName: ite.TagName(), + UnitCount: ite.UnitCount(), + TagTypeId: ite.TagType(), + TagTypeName: ite.TagType().String(), + Value: value, + ValueBytes: valueBytes, + ChildIfdPath: ite.ChildIfdPath(), + } + + et.Formatted, err = ite.Format() + log.PanicIf(err) + + et.FormattedFirst, err = ite.FormatFirst() + log.PanicIf(err) + + exifTags = append(exifTags, et) + + return nil + } + + med, err = ie.Scan(exifcommon.IfdStandardIfdIdentity, eh.FirstIfdOffset, visitor, nil) + log.PanicIf(err) + + return exifTags, med, nil +} + +// GpsDegreesEquals returns true if the two `GpsDegrees` are identical. +func GpsDegreesEquals(gi1, gi2 GpsDegrees) bool { + if gi2.Orientation != gi1.Orientation { + return false + } + + degreesRightBound := math.Nextafter(gi1.Degrees, gi1.Degrees+1) + minutesRightBound := math.Nextafter(gi1.Minutes, gi1.Minutes+1) + secondsRightBound := math.Nextafter(gi1.Seconds, gi1.Seconds+1) + + if gi2.Degrees < gi1.Degrees || gi2.Degrees >= degreesRightBound { + return false + } else if gi2.Minutes < gi1.Minutes || gi2.Minutes >= minutesRightBound { + return false + } else if gi2.Seconds < gi1.Seconds || gi2.Seconds >= secondsRightBound { + return false + } + + return true +} diff --git a/vendor/github.com/dsoprea/go-iptc/.MODULE_ROOT b/vendor/github.com/dsoprea/go-iptc/.MODULE_ROOT new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/dsoprea/go-iptc/LICENSE b/vendor/github.com/dsoprea/go-iptc/LICENSE new file mode 100644 index 000000000..d92c04268 --- /dev/null +++ b/vendor/github.com/dsoprea/go-iptc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Dustin Oprea + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-iptc/README.md b/vendor/github.com/dsoprea/go-iptc/README.md new file mode 100644 index 000000000..d28e03d0a --- /dev/null +++ b/vendor/github.com/dsoprea/go-iptc/README.md @@ -0,0 +1,3 @@ +# Overview + +This project provides functionality to parse a series of IPTC records/datasets. It also provides name resolution, but other constraints/validation is not yet implemented (though there is structure present that can accommodate this when desired/required). diff --git a/vendor/github.com/dsoprea/go-iptc/standard.go b/vendor/github.com/dsoprea/go-iptc/standard.go new file mode 100644 index 000000000..41a043f02 --- /dev/null +++ b/vendor/github.com/dsoprea/go-iptc/standard.go @@ -0,0 +1,99 @@ +package iptc + +import ( + "errors" +) + +type StreamTagInfo struct { + Description string +} + +var ( + standardTags = map[StreamTagKey]StreamTagInfo{ + StreamTagKey{1, 120}: StreamTagInfo{"ARM Identifier"}, + + StreamTagKey{1, 122}: StreamTagInfo{"ARM Version"}, + StreamTagKey{2, 0}: StreamTagInfo{"Record Version"}, + StreamTagKey{2, 3}: StreamTagInfo{"Object Type Reference"}, + StreamTagKey{2, 4}: StreamTagInfo{"Object Attribute Reference"}, + StreamTagKey{2, 5}: StreamTagInfo{"Object Name"}, + StreamTagKey{2, 7}: StreamTagInfo{"Edit Status"}, + StreamTagKey{2, 8}: StreamTagInfo{"Editorial Update"}, + StreamTagKey{2, 10}: StreamTagInfo{"Urgency"}, + StreamTagKey{2, 12}: StreamTagInfo{"Subject Reference"}, + StreamTagKey{2, 15}: StreamTagInfo{"Category"}, + StreamTagKey{2, 20}: StreamTagInfo{"Supplemental Category"}, + StreamTagKey{2, 22}: StreamTagInfo{"Fixture Identifier"}, + StreamTagKey{2, 25}: StreamTagInfo{"Keywords"}, + StreamTagKey{2, 26}: StreamTagInfo{"Content Location Code"}, + StreamTagKey{2, 27}: StreamTagInfo{"Content Location Name"}, + StreamTagKey{2, 30}: StreamTagInfo{"Release Date"}, + StreamTagKey{2, 35}: StreamTagInfo{"Release Time"}, + StreamTagKey{2, 37}: StreamTagInfo{"Expiration Date"}, + StreamTagKey{2, 38}: StreamTagInfo{"Expiration Time"}, + StreamTagKey{2, 40}: StreamTagInfo{"Special Instructions"}, + StreamTagKey{2, 42}: StreamTagInfo{"Action Advised"}, + StreamTagKey{2, 45}: StreamTagInfo{"Reference Service"}, + StreamTagKey{2, 47}: StreamTagInfo{"Reference Date"}, + StreamTagKey{2, 50}: StreamTagInfo{"Reference Number"}, + StreamTagKey{2, 55}: StreamTagInfo{"Date Created"}, + StreamTagKey{2, 60}: StreamTagInfo{"Time Created"}, + StreamTagKey{2, 62}: StreamTagInfo{"Digital Creation Date"}, + StreamTagKey{2, 63}: StreamTagInfo{"Digital Creation Time"}, + StreamTagKey{2, 65}: StreamTagInfo{"Originating Program"}, + StreamTagKey{2, 70}: StreamTagInfo{"Program Version"}, + StreamTagKey{2, 75}: StreamTagInfo{"Object Cycle"}, + StreamTagKey{2, 80}: StreamTagInfo{"By-line"}, + StreamTagKey{2, 85}: StreamTagInfo{"By-line Title"}, + StreamTagKey{2, 90}: StreamTagInfo{"City"}, + StreamTagKey{2, 92}: StreamTagInfo{"Sublocation"}, + StreamTagKey{2, 95}: StreamTagInfo{"Province/State"}, + StreamTagKey{2, 100}: StreamTagInfo{"Country/Primary Location Code"}, + StreamTagKey{2, 101}: StreamTagInfo{"Country/Primary Location Name"}, + StreamTagKey{2, 103}: StreamTagInfo{"Original Transmission Reference"}, + StreamTagKey{2, 105}: StreamTagInfo{"Headline"}, + StreamTagKey{2, 110}: StreamTagInfo{"Credit"}, + StreamTagKey{2, 115}: StreamTagInfo{"Source"}, + StreamTagKey{2, 116}: StreamTagInfo{"Copyright Notice"}, + StreamTagKey{2, 118}: StreamTagInfo{"Contact"}, + StreamTagKey{2, 120}: StreamTagInfo{"Caption/Abstract"}, + StreamTagKey{2, 122}: StreamTagInfo{"Writer/Editor"}, + StreamTagKey{2, 125}: StreamTagInfo{"Rasterized Caption"}, + StreamTagKey{2, 130}: StreamTagInfo{"Image Type"}, + StreamTagKey{2, 131}: StreamTagInfo{"Image Orientation"}, + StreamTagKey{2, 135}: StreamTagInfo{"Language Identifier"}, + StreamTagKey{2, 150}: StreamTagInfo{"Audio Type"}, + StreamTagKey{2, 151}: StreamTagInfo{"Audio Sampling Rate"}, + StreamTagKey{2, 152}: StreamTagInfo{"Audio Sampling Resolution"}, + StreamTagKey{2, 153}: StreamTagInfo{"Audio Duration"}, + StreamTagKey{2, 154}: StreamTagInfo{"Audio Outcue"}, + StreamTagKey{2, 200}: StreamTagInfo{"ObjectData Preview File Format"}, + StreamTagKey{2, 201}: StreamTagInfo{"ObjectData Preview File Format Version"}, + StreamTagKey{2, 202}: StreamTagInfo{"ObjectData Preview Data"}, + StreamTagKey{7, 10}: StreamTagInfo{"Size Mode"}, + StreamTagKey{7, 20}: StreamTagInfo{"Max Subfile Size"}, + StreamTagKey{7, 90}: StreamTagInfo{"ObjectData Size Announced"}, + StreamTagKey{7, 95}: StreamTagInfo{"Maximum ObjectData Size"}, + StreamTagKey{8, 10}: StreamTagInfo{"Subfile"}, + StreamTagKey{9, 10}: StreamTagInfo{"Confirmed ObjectData Size"}, + } +) + +var ( + // ErrTagNotStandard indicates that the given tag is not known among the + // documented standard set. + ErrTagNotStandard = errors.New("not a standard tag") +) + +// GetTagInfo return the info for the given tag. Returns ErrTagNotStandard if +// not known. +func GetTagInfo(recordNumber, datasetNumber int) (sti StreamTagInfo, err error) { + stk := StreamTagKey{uint8(recordNumber), uint8(datasetNumber)} + + sti, found := standardTags[stk] + if found == false { + return sti, ErrTagNotStandard + } + + return sti, nil +} diff --git a/vendor/github.com/dsoprea/go-iptc/tag.go b/vendor/github.com/dsoprea/go-iptc/tag.go new file mode 100644 index 000000000..18afbb897 --- /dev/null +++ b/vendor/github.com/dsoprea/go-iptc/tag.go @@ -0,0 +1,277 @@ +package iptc + +import ( + "errors" + "fmt" + "io" + "strings" + "unicode" + + "encoding/binary" + + "github.com/dsoprea/go-logging" +) + +var ( + // TODO(dustin): We're still not sure if this is the right endianness. No search to IPTC or IIM seems to state one or the other. + + // DefaultEncoding is the standard encoding for the IPTC format. + defaultEncoding = binary.BigEndian +) + +var ( + // ErrInvalidTagMarker indicates that the tag can not be parsed because the + // tag boundary marker is not the expected value. + ErrInvalidTagMarker = errors.New("invalid tag marker") +) + +// Tag describes one tag read from the stream. +type Tag struct { + recordNumber uint8 + datasetNumber uint8 + dataSize uint64 +} + +// String expresses state as a string. +func (tag *Tag) String() string { + return fmt.Sprintf( + "Tag", + tag.recordNumber, tag.datasetNumber, tag.dataSize) +} + +// DecodeTag parses one tag from the stream. +func DecodeTag(r io.Reader) (tag Tag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + tagMarker := uint8(0) + err = binary.Read(r, defaultEncoding, &tagMarker) + if err != nil { + if err == io.EOF { + return tag, err + } + + log.Panic(err) + } + + if tagMarker != 0x1c { + return tag, ErrInvalidTagMarker + } + + recordNumber := uint8(0) + err = binary.Read(r, defaultEncoding, &recordNumber) + log.PanicIf(err) + + datasetNumber := uint8(0) + err = binary.Read(r, defaultEncoding, &datasetNumber) + log.PanicIf(err) + + dataSize16Raw := uint16(0) + err = binary.Read(r, defaultEncoding, &dataSize16Raw) + log.PanicIf(err) + + var dataSize uint64 + + if dataSize16Raw < 32768 { + // We only had 16-bits (has the MSB set to (0)). + dataSize = uint64(dataSize16Raw) + } else { + // This field is just the length of the length (has the MSB set to (1)). + + // Clear the MSB. + lengthLength := dataSize16Raw & 32767 + + if lengthLength == 4 { + dataSize32Raw := uint32(0) + err := binary.Read(r, defaultEncoding, &dataSize32Raw) + log.PanicIf(err) + + dataSize = uint64(dataSize32Raw) + } else if lengthLength == 8 { + err := binary.Read(r, defaultEncoding, &dataSize) + log.PanicIf(err) + } else { + // No specific sizes or limits are specified in the specification + // so we need to impose our own limits in order to implement. + + log.Panicf("extended data-set tag size is not supported: (%d)", lengthLength) + } + } + + tag = Tag{ + recordNumber: recordNumber, + datasetNumber: datasetNumber, + dataSize: dataSize, + } + + return tag, nil +} + +// StreamTagKey is a convenience type that lets us key our index with a high- +// level type. +type StreamTagKey struct { + // RecordNumber is the major classification of the dataset. + RecordNumber uint8 + + // DatasetNumber is the minor classification of the dataset. + DatasetNumber uint8 +} + +// String returns a descriptive string. +func (stk StreamTagKey) String() string { + return fmt.Sprintf("%d:%d", stk.RecordNumber, stk.DatasetNumber) +} + +// Data is a convenience wrapper around a byte-slice. +type TagData []byte + +// IsPrintable returns true if all characters are printable. +func (tg TagData) IsPrintable() bool { + for _, b := range tg { + r := rune(b) + + // Newline characters aren't considered printable. + if r == 0x0d || r == 0x0a { + continue + } + + if unicode.IsGraphic(r) == false || unicode.IsPrint(r) == false { + return false + } + } + + return true +} + +// String returns a descriptive string. If the data doesn't include any non- +// printable characters, it will include the value itself. +func (tg TagData) String() string { + if tg.IsPrintable() == true { + return string(tg) + } else { + return fmt.Sprintf("BINARY<(%d) bytes>", len(tg)) + } +} + +// ParsedTags is the complete, unordered set of tags parsed from the stream. +type ParsedTags map[StreamTagKey][]TagData + +// ParseStream parses a serial sequence of tags and tag data out of the stream. +func ParseStream(r io.Reader) (tags map[StreamTagKey][]TagData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + tags = make(ParsedTags) + + for { + tag, err := DecodeTag(r) + if err != nil { + if err == io.EOF { + break + } + + log.Panic(err) + } + + raw := make([]byte, tag.dataSize) + + _, err = io.ReadFull(r, raw) + log.PanicIf(err) + + data := TagData(raw) + + stk := StreamTagKey{ + RecordNumber: tag.recordNumber, + DatasetNumber: tag.datasetNumber, + } + + if existing, found := tags[stk]; found == true { + tags[stk] = append(existing, data) + } else { + tags[stk] = []TagData{data} + } + } + + return tags, nil +} + +// GetSimpleDictionaryFromParsedTags returns a dictionary of tag names to tag +// values, where all values are strings and any tag that had a non-printable +// value is omitted. We will also only return the first value, therefore +// dropping any follow-up values for repeatable tags. This will ignore non- +// standard tags. This will trim whitespace from the ends of strings. +// +// This is a convenience function for quickly displaying only the summary IPTC +// metadata that a user might actually be interested in at first glance. +func GetSimpleDictionaryFromParsedTags(pt ParsedTags) (distilled map[string]string) { + distilled = make(map[string]string) + + for stk, dataSlice := range pt { + sti, err := GetTagInfo(int(stk.RecordNumber), int(stk.DatasetNumber)) + if err != nil { + if err == ErrTagNotStandard { + continue + } else { + log.Panic(err) + } + } + + data := dataSlice[0] + + if data.IsPrintable() == false { + continue + } + + // TODO(dustin): Trim leading whitespace, too. + distilled[sti.Description] = strings.Trim(string(data), "\r\n") + } + + return distilled +} + +// GetDictionaryFromParsedTags returns all tags. It will keep non-printable +// values, though will not print a placeholder instead. This will keep non- +// standard tags (and print the fully-qualified dataset ID rather than the +// name). It will keep repeated values (with the counter value appended to the +// end). +func GetDictionaryFromParsedTags(pt ParsedTags) (distilled map[string]string) { + distilled = make(map[string]string) + for stk, dataSlice := range pt { + var keyPhrase string + + sti, err := GetTagInfo(int(stk.RecordNumber), int(stk.DatasetNumber)) + if err != nil { + if err == ErrTagNotStandard { + keyPhrase = fmt.Sprintf("%s (not a standard tag)", stk.String()) + } else { + log.Panic(err) + } + } else { + keyPhrase = sti.Description + } + + for i, data := range dataSlice { + currentKeyPhrase := keyPhrase + if len(dataSlice) > 1 { + currentKeyPhrase = fmt.Sprintf("%s (%d)", currentKeyPhrase, i+1) + } + + var presentable string + if data.IsPrintable() == false { + presentable = fmt.Sprintf("[BINARY] %s", DumpBytesToString(data)) + } else { + presentable = string(data) + } + + distilled[currentKeyPhrase] = presentable + } + } + + return distilled +} diff --git a/vendor/github.com/dsoprea/go-iptc/testing_common.go b/vendor/github.com/dsoprea/go-iptc/testing_common.go new file mode 100644 index 000000000..bb5903fa1 --- /dev/null +++ b/vendor/github.com/dsoprea/go-iptc/testing_common.go @@ -0,0 +1,70 @@ +package iptc + +import ( + "os" + "path" + + "github.com/dsoprea/go-logging" +) + +var ( + testDataRelFilepath = "iptc.data" +) + +var ( + moduleRootPath = "" + assetsPath = "" +) + +func GetModuleRootPath() string { + if moduleRootPath == "" { + moduleRootPath = os.Getenv("IPTC_MODULE_ROOT_PATH") + if moduleRootPath != "" { + return moduleRootPath + } + + currentWd, err := os.Getwd() + log.PanicIf(err) + + currentPath := currentWd + visited := make([]string, 0) + + for { + tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") + + _, err := os.Stat(tryStampFilepath) + if err != nil && os.IsNotExist(err) != true { + log.Panic(err) + } else if err == nil { + break + } + + visited = append(visited, tryStampFilepath) + + currentPath = path.Dir(currentPath) + if currentPath == "/" { + log.Panicf("could not find module-root: %v", visited) + } + } + + moduleRootPath = currentPath + } + + return moduleRootPath +} + +func GetTestAssetsPath() string { + if assetsPath == "" { + moduleRootPath := GetModuleRootPath() + assetsPath = path.Join(moduleRootPath, "assets") + } + + return assetsPath +} + +func GetTestDataFilepath() string { + assetsPath := GetTestAssetsPath() + filepath := path.Join(assetsPath, testDataRelFilepath) + + return filepath +} diff --git a/vendor/github.com/dsoprea/go-iptc/utility.go b/vendor/github.com/dsoprea/go-iptc/utility.go new file mode 100644 index 000000000..5a4a10ad3 --- /dev/null +++ b/vendor/github.com/dsoprea/go-iptc/utility.go @@ -0,0 +1,25 @@ +package iptc + +import ( + "bytes" + "fmt" + + "github.com/dsoprea/go-logging" +) + +// DumpBytesToString returns a stringified list of hex-encoded bytes. +func DumpBytesToString(data []byte) string { + b := new(bytes.Buffer) + + for i, x := range data { + _, err := b.WriteString(fmt.Sprintf("%02x", x)) + log.PanicIf(err) + + if i < len(data)-1 { + _, err := b.WriteRune(' ') + log.PanicIf(err) + } + } + + return b.String() +} diff --git a/vendor/github.com/dsoprea/go-logging/.travis.yml b/vendor/github.com/dsoprea/go-logging/.travis.yml new file mode 100644 index 000000000..e37da4ba8 --- /dev/null +++ b/vendor/github.com/dsoprea/go-logging/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - tip +install: + - go get -t ./... + - go get github.com/mattn/goveralls +script: +# v1 + - go test -v . +# v2 + - cd v2 + - goveralls -v -service=travis-ci diff --git a/vendor/github.com/dsoprea/go-logging/LICENSE b/vendor/github.com/dsoprea/go-logging/LICENSE new file mode 100644 index 000000000..163291ed6 --- /dev/null +++ b/vendor/github.com/dsoprea/go-logging/LICENSE @@ -0,0 +1,9 @@ +MIT LICENSE + +Copyright 2020 Dustin Oprea + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-logging/README.md b/vendor/github.com/dsoprea/go-logging/README.md new file mode 100644 index 000000000..820cd9dc0 --- /dev/null +++ b/vendor/github.com/dsoprea/go-logging/README.md @@ -0,0 +1,223 @@ +[![Build Status](https://travis-ci.org/dsoprea/go-logging.svg?branch=master)](https://travis-ci.org/dsoprea/go-logging) +[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-logging/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-logging?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-logging/v2)](https://goreportcard.com/report/github.com/dsoprea/go-logging/v2) +[![GoDoc](https://godoc.org/github.com/dsoprea/go-logging/v2?status.svg)](https://godoc.org/github.com/dsoprea/go-logging/v2) + +## Introduction + +This project bridges several gaps that are present in the standard logging support in Go: + +- Equips errors with stacktraces and provides a facility for printing them +- Inherently supports the ability for each Go file to print its messages with a prefix representing that file/package +- Adds some functions to specifically log messages of different levels (e.g. debug, error) +- Adds a `PanicIf()` function that can be used to conditionally manage errors depending on whether an error variable is `nil` or actually has an error +- Adds support for pluggable logging adapters (so the output can be sent somewhere other than the console) +- Adds configuration (such as the logging level or adapter) that can be driven from the environment +- Supports filtering to show/hide the logging of certain places of the application +- The loggers can be definded at the package level, so you can determine which Go file any log message came from. + +When used with the Panic-Defer-Recover pattern in Go, even panics rising from the Go runtime will be caught and wrapped with a stacktrace. This compartmentalizes which function they could have originated from, which is, otherwise, potentially non-trivial to figure out. + +## AppEngine + +Go under AppEngine is very stripped down, such as there being no logging type (e.g. `Logger` in native Go) and there is no support for prefixing. As each logging call from this project takes a `Context`, this works cooperatively to bridge the additional gaps in AppEngine's logging support. + +With standard console logging outside of this context, that parameter will take a`nil`. + + +## Getting Started + +The simplest, possible example: + +```go +package thispackage + +import ( + "context" + "errors" + + "github.com/dsoprea/go-logging/v2" +) + +var ( + thisfileLog = log.NewLogger("thispackage.thisfile") +) + +func a_cry_for_help(ctx context.Context) { + err := errors.New("a big error") + thisfileLog.Errorf(ctx, err, "How big is my problem: %s", "pretty big") +} + +func init() { + cla := log.NewConsoleLogAdapter() + log.AddAdapter("console", cla) +} +``` + +Notice two things: + +1. We register the "console" adapter at the bottom. The first adapter registered will be used by default. +2. We pass-in a prefix (what we refer to as a "noun") to `log.NewLogger()`. This is a simple, descriptive name that represents the subject of the file. By convention, we construct this by dot-separating the current package and the name of the file. We recommend that you define a different log for every file at the package level, but it is your choice whether you want to do this or share the same logger over the entire package, define one in each struct, etc.. + + +### Example Output + +Example output from a real application (not from the above): + +``` +2016/09/09 12:57:44 DEBUG: user: User revisiting: [test@example.com] +2016/09/09 12:57:44 DEBUG: context: Session already inited: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ] +2016/09/09 12:57:44 DEBUG: session_data: Session save not necessary: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ] +2016/09/09 12:57:44 DEBUG: context: Got session: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ] +2016/09/09 12:57:44 DEBUG: session_data: Found user in session. +2016/09/09 12:57:44 DEBUG: cache: Cache miss: [geo.geocode.reverse:dhxp15x] +``` + + +## Adapters + +This project provides one built-in logging adapter, "console", which prints to the screen. To register it: + +```go +cla := log.NewConsoleLogAdapter() +log.AddAdapter("console", cla) +``` + +### Custom Adapters + +If you would like to implement your own logger, just create a struct type that satisfies the LogAdapter interface. + +```go +type LogAdapter interface { + Debugf(lc *LogContext, message *string) error + Infof(lc *LogContext, message *string) error + Warningf(lc *LogContext, message *string) error + Errorf(lc *LogContext, message *string) error +} +``` + +The *LogContext* struct passed in provides additional information that you may need in order to do what you need to do: + +```go +type LogContext struct { + Logger *Logger + Ctx context.Context +} +``` + +`Logger` represents your Logger instance. + +Adapter example: + +```go +type DummyLogAdapter struct { + +} + +func (dla *DummyLogAdapter) Debugf(lc *LogContext, message *string) error { + +} + +func (dla *DummyLogAdapter) Infof(lc *LogContext, message *string) error { + +} + +func (dla *DummyLogAdapter) Warningf(lc *LogContext, message *string) error { + +} + +func (dla *DummyLogAdapter) Errorf(lc *LogContext, message *string) error { + +} +``` + +Then, register it: + +```go +func init() { + log.AddAdapter("dummy", new(DummyLogAdapter)) +} +``` + +If this is a task-specific implementation, just register it from the `init()` of the file that defines it. + +If this is the first adapter you've registered, it will be the default one used. Otherwise, you'll have to deliberately specify it when you are creating a logger: Instead of calling `log.NewLogger(noun string)`, call `log.NewLoggerWithAdapterName(noun string, adapterName string)`. + +We discuss how to configure the adapter from configuration in the "Configuration" section below. + + +### Adapter Notes + +- The `Logger` instance exports `Noun()` in the event you want to discriminate where your log entries go in your adapter. It also exports `Adapter()` for if you need to access the adapter instance from your application. +- If no adapter is registered (specifically, the default adapter-name remains empty), logging calls will be a no-op. This allows libraries to implement *go-logging* where the larger application doesn't. + + +## Filters + +We support the ability to exclusively log for a specific set of nouns (we'll exclude any not specified): + +```go +log.AddIncludeFilter("nountoshow1") +log.AddIncludeFilter("nountoshow2") +``` + +Depending on your needs, you might just want to exclude a couple and include the rest: + +```go +log.AddExcludeFilter("nountohide1") +log.AddExcludeFilter("nountohide2") +``` + +We'll first hit the include-filters. If it's in there, we'll forward the log item to the adapter. If not, and there is at least one include filter in the list, we won't do anything. If the list of include filters is empty but the noun appears in the exclude list, we won't do anything. + +It is a good convention to exclude the nouns of any library you are writing whose logging you do not want to generally be aware of unless you are debugging. You might call `AddExcludeFilter()` from the `init()` function at the bottom of those files unless there is some configuration variable, such as "(LibraryNameHere)DoShowLogging", that has been defined and set to TRUE. + + +## Configuration + +The following configuration items are available: + +- *Format*: The default format used to build the message that gets sent to the adapter. It is assumed that the adapter already prefixes the message with time and log-level (since the default AppEngine logger does). The default value is: `{{.Noun}}: [{{.Level}}] {{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}`. The available tokens are "Level", "Noun", "ExcludeBypass", and "Message". +- *DefaultAdapterName*: The default name of the adapter to use when NewLogger() is called (if this isn't defined then the name of the first registered adapter will be used). +- *LevelName*: The priority-level of messages permitted to be logged (all others will be discarded). By default, it is "info". Other levels are: "debug", "warning", "error", "critical" +- *IncludeNouns*: Comma-separated list of nouns to log for. All others will be ignored. +- *ExcludeNouns*: Comma-separated list on nouns to exclude from logging. +- *ExcludeBypassLevelName*: The log-level at which we will show logging for nouns that have been excluded. Allows you to hide excessive, unimportant logging for nouns but to still see their warnings, errors, etc... + + +### Configuration Providers + +You provide the configuration by setting a configuration-provider. Configuration providers must satisfy the `ConfigurationProvider` interface. The following are provided with the project: + +- `EnvironmentConfigurationProvider`: Read values from the environment. +- `StaticConfigurationProvider`: Set values directly on the struct. + +**The configuration provider must be applied before doing any logging (otherwise it will have no effect).** + +Environments such as AppEngine work best with `EnvironmentConfigurationProvider` as this is generally how configuration is exposed *by* AppEngine *to* the application. You can define this configuration directly in *that* configuration. + +By default, no configuration-provider is applied, the level is defaulted to INFO and the format is defaulted to "{{.Noun}}:{{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}". + +Again, if a configuration-provider does not provide a log-level or format, they will be defaulted (or left alone, if already set). If it does not provide an adapter-name, the adapter-name of the first registered adapter will be used. + +Usage instructions of both follow. + + +### Environment-Based Configuration + +```go +ecp := log.NewEnvironmentConfigurationProvider() +log.LoadConfiguration(ecp) +``` + +Each of the items listed at the top of the "Configuration" section can be specified in the environment using a prefix of "Log" (e.g. LogDefaultAdapterName). + + +### Static Configuration + +```go +scp := log.NewStaticConfigurationProvider() +scp.SetLevelName(log.LevelNameWarning) + +log.LoadConfiguration(scp) +``` diff --git a/vendor/github.com/dsoprea/go-logging/config.go b/vendor/github.com/dsoprea/go-logging/config.go new file mode 100644 index 000000000..20896e342 --- /dev/null +++ b/vendor/github.com/dsoprea/go-logging/config.go @@ -0,0 +1,246 @@ +package log + +import ( + "fmt" + "os" +) + +// Config keys. +const ( + ckFormat = "LogFormat" + ckDefaultAdapterName = "LogDefaultAdapterName" + ckLevelName = "LogLevelName" + ckIncludeNouns = "LogIncludeNouns" + ckExcludeNouns = "LogExcludeNouns" + ckExcludeBypassLevelName = "LogExcludeBypassLevelName" +) + +// Other constants +const ( + defaultFormat = "{{.Noun}}: [{{.Level}}] {{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}" + defaultLevelName = LevelNameInfo +) + +// Config +var ( + // Alternative format. + format = defaultFormat + + // Alternative adapter. + defaultAdapterName = "" + + // Alternative level at which to display log-items + levelName = defaultLevelName + + // Configuration-driven comma-separated list of nouns to include. + includeNouns = "" + + // Configuration-driven comma-separated list of nouns to exclude. + excludeNouns = "" + + // Level at which to disregard exclusion (if the severity of a message + // meets or exceed this, always display). + excludeBypassLevelName = "" +) + +// Other +var ( + configurationLoaded = false +) + +// Return the current default adapter name. +func GetDefaultAdapterName() string { + return defaultAdapterName +} + +// The adapter will automatically be the first one registered. This overrides +// that. +func SetDefaultAdapterName(name string) { + defaultAdapterName = name +} + +func LoadConfiguration(cp ConfigurationProvider) { + configuredDefaultAdapterName := cp.DefaultAdapterName() + + if configuredDefaultAdapterName != "" { + defaultAdapterName = configuredDefaultAdapterName + } + + includeNouns = cp.IncludeNouns() + excludeNouns = cp.ExcludeNouns() + excludeBypassLevelName = cp.ExcludeBypassLevelName() + + f := cp.Format() + if f != "" { + format = f + } + + ln := cp.LevelName() + if ln != "" { + levelName = ln + } + + configurationLoaded = true +} + +func getConfigState() map[string]interface{} { + return map[string]interface{}{ + "format": format, + "defaultAdapterName": defaultAdapterName, + "levelName": levelName, + "includeNouns": includeNouns, + "excludeNouns": excludeNouns, + "excludeBypassLevelName": excludeBypassLevelName, + } +} + +func setConfigState(config map[string]interface{}) { + format = config["format"].(string) + + defaultAdapterName = config["defaultAdapterName"].(string) + levelName = config["levelName"].(string) + includeNouns = config["includeNouns"].(string) + excludeNouns = config["excludeNouns"].(string) + excludeBypassLevelName = config["excludeBypassLevelName"].(string) +} + +func getConfigDump() string { + return fmt.Sprintf( + "Current configuration:\n"+ + " FORMAT=[%s]\n"+ + " DEFAULT-ADAPTER-NAME=[%s]\n"+ + " LEVEL-NAME=[%s]\n"+ + " INCLUDE-NOUNS=[%s]\n"+ + " EXCLUDE-NOUNS=[%s]\n"+ + " EXCLUDE-BYPASS-LEVEL-NAME=[%s]", + format, defaultAdapterName, levelName, includeNouns, excludeNouns, excludeBypassLevelName) +} + +func IsConfigurationLoaded() bool { + return configurationLoaded +} + +type ConfigurationProvider interface { + // Alternative format (defaults to . + Format() string + + // Alternative adapter (defaults to "appengine"). + DefaultAdapterName() string + + // Alternative level at which to display log-items (defaults to + // "info"). + LevelName() string + + // Configuration-driven comma-separated list of nouns to include. Defaults + // to empty. + IncludeNouns() string + + // Configuration-driven comma-separated list of nouns to exclude. Defaults + // to empty. + ExcludeNouns() string + + // Level at which to disregard exclusion (if the severity of a message + // meets or exceed this, always display). Defaults to empty. + ExcludeBypassLevelName() string +} + +// Environment configuration-provider. +type EnvironmentConfigurationProvider struct { +} + +func NewEnvironmentConfigurationProvider() *EnvironmentConfigurationProvider { + return new(EnvironmentConfigurationProvider) +} + +func (ecp *EnvironmentConfigurationProvider) Format() string { + return os.Getenv(ckFormat) +} + +func (ecp *EnvironmentConfigurationProvider) DefaultAdapterName() string { + return os.Getenv(ckDefaultAdapterName) +} + +func (ecp *EnvironmentConfigurationProvider) LevelName() string { + return os.Getenv(ckLevelName) +} + +func (ecp *EnvironmentConfigurationProvider) IncludeNouns() string { + return os.Getenv(ckIncludeNouns) +} + +func (ecp *EnvironmentConfigurationProvider) ExcludeNouns() string { + return os.Getenv(ckExcludeNouns) +} + +func (ecp *EnvironmentConfigurationProvider) ExcludeBypassLevelName() string { + return os.Getenv(ckExcludeBypassLevelName) +} + +// Static configuration-provider. +type StaticConfigurationProvider struct { + format string + defaultAdapterName string + levelName string + includeNouns string + excludeNouns string + excludeBypassLevelName string +} + +func NewStaticConfigurationProvider() *StaticConfigurationProvider { + return new(StaticConfigurationProvider) +} + +func (scp *StaticConfigurationProvider) SetFormat(format string) { + scp.format = format +} + +func (scp *StaticConfigurationProvider) SetDefaultAdapterName(adapterName string) { + scp.defaultAdapterName = adapterName +} + +func (scp *StaticConfigurationProvider) SetLevelName(levelName string) { + scp.levelName = levelName +} + +func (scp *StaticConfigurationProvider) SetIncludeNouns(includeNouns string) { + scp.includeNouns = includeNouns +} + +func (scp *StaticConfigurationProvider) SetExcludeNouns(excludeNouns string) { + scp.excludeNouns = excludeNouns +} + +func (scp *StaticConfigurationProvider) SetExcludeBypassLevelName(excludeBypassLevelName string) { + scp.excludeBypassLevelName = excludeBypassLevelName +} + +func (scp *StaticConfigurationProvider) Format() string { + return scp.format +} + +func (scp *StaticConfigurationProvider) DefaultAdapterName() string { + return scp.defaultAdapterName +} + +func (scp *StaticConfigurationProvider) LevelName() string { + return scp.levelName +} + +func (scp *StaticConfigurationProvider) IncludeNouns() string { + return scp.includeNouns +} + +func (scp *StaticConfigurationProvider) ExcludeNouns() string { + return scp.excludeNouns +} + +func (scp *StaticConfigurationProvider) ExcludeBypassLevelName() string { + return scp.excludeBypassLevelName +} + +func init() { + // Do the initial configuration-load from the environment. We gotta seed it + // with something for simplicity's sake. + ecp := NewEnvironmentConfigurationProvider() + LoadConfiguration(ecp) +} diff --git a/vendor/github.com/dsoprea/go-logging/console_adapter.go b/vendor/github.com/dsoprea/go-logging/console_adapter.go new file mode 100644 index 000000000..c63a2911c --- /dev/null +++ b/vendor/github.com/dsoprea/go-logging/console_adapter.go @@ -0,0 +1,36 @@ +package log + +import ( + golog "log" +) + +type ConsoleLogAdapter struct { +} + +func NewConsoleLogAdapter() LogAdapter { + return new(ConsoleLogAdapter) +} + +func (cla *ConsoleLogAdapter) Debugf(lc *LogContext, message *string) error { + golog.Println(*message) + + return nil +} + +func (cla *ConsoleLogAdapter) Infof(lc *LogContext, message *string) error { + golog.Println(*message) + + return nil +} + +func (cla *ConsoleLogAdapter) Warningf(lc *LogContext, message *string) error { + golog.Println(*message) + + return nil +} + +func (cla *ConsoleLogAdapter) Errorf(lc *LogContext, message *string) error { + golog.Println(*message) + + return nil +} diff --git a/vendor/github.com/dsoprea/go-logging/log.go b/vendor/github.com/dsoprea/go-logging/log.go new file mode 100644 index 000000000..84117a92e --- /dev/null +++ b/vendor/github.com/dsoprea/go-logging/log.go @@ -0,0 +1,537 @@ +package log + +import ( + "bytes" + e "errors" + "fmt" + "strings" + "sync" + + "text/template" + + "github.com/go-errors/errors" + "golang.org/x/net/context" +) + +// TODO(dustin): Finish symbol documentation + +// Config severity integers. +const ( + LevelDebug = iota + LevelInfo = iota + LevelWarning = iota + LevelError = iota +) + +// Config severity names. +const ( + LevelNameDebug = "debug" + LevelNameInfo = "info" + LevelNameWarning = "warning" + LevelNameError = "error" +) + +// Seveirty name->integer map. +var ( + LevelNameMap = map[string]int{ + LevelNameDebug: LevelDebug, + LevelNameInfo: LevelInfo, + LevelNameWarning: LevelWarning, + LevelNameError: LevelError, + } + + LevelNameMapR = map[int]string{ + LevelDebug: LevelNameDebug, + LevelInfo: LevelNameInfo, + LevelWarning: LevelNameWarning, + LevelError: LevelNameError, + } +) + +// Errors +var ( + ErrAdapterAlreadyRegistered = e.New("adapter already registered") + ErrFormatEmpty = e.New("format is empty") + ErrExcludeLevelNameInvalid = e.New("exclude bypass-level is invalid") + ErrNoAdapterConfigured = e.New("no default adapter configured") + ErrAdapterIsNil = e.New("adapter is nil") + ErrConfigurationNotLoaded = e.New("can not configure because configuration is not loaded") +) + +// Other +var ( + includeFilters = make(map[string]bool) + useIncludeFilters = false + excludeFilters = make(map[string]bool) + useExcludeFilters = false + + adapters = make(map[string]LogAdapter) + + // TODO(dustin): !! Finish implementing this. + excludeBypassLevel = -1 +) + +// Add global include filter. +func AddIncludeFilter(noun string) { + includeFilters[noun] = true + useIncludeFilters = true +} + +// Remove global include filter. +func RemoveIncludeFilter(noun string) { + delete(includeFilters, noun) + if len(includeFilters) == 0 { + useIncludeFilters = false + } +} + +// Add global exclude filter. +func AddExcludeFilter(noun string) { + excludeFilters[noun] = true + useExcludeFilters = true +} + +// Remove global exclude filter. +func RemoveExcludeFilter(noun string) { + delete(excludeFilters, noun) + if len(excludeFilters) == 0 { + useExcludeFilters = false + } +} + +func AddAdapter(name string, la LogAdapter) { + if _, found := adapters[name]; found == true { + Panic(ErrAdapterAlreadyRegistered) + } + + if la == nil { + Panic(ErrAdapterIsNil) + } + + adapters[name] = la + + if GetDefaultAdapterName() == "" { + SetDefaultAdapterName(name) + } +} + +func ClearAdapters() { + adapters = make(map[string]LogAdapter) + SetDefaultAdapterName("") +} + +type LogAdapter interface { + Debugf(lc *LogContext, message *string) error + Infof(lc *LogContext, message *string) error + Warningf(lc *LogContext, message *string) error + Errorf(lc *LogContext, message *string) error +} + +// TODO(dustin): !! Also populate whether we've bypassed an exception so that +// we can add a template macro to prefix an exclamation of +// some sort. +type MessageContext struct { + Level *string + Noun *string + Message *string + ExcludeBypass bool +} + +type LogContext struct { + Logger *Logger + Ctx context.Context +} + +type Logger struct { + isConfigured bool + an string + la LogAdapter + t *template.Template + systemLevel int + noun string +} + +func NewLoggerWithAdapterName(noun string, adapterName string) (l *Logger) { + l = &Logger{ + noun: noun, + an: adapterName, + } + + return l +} + +func NewLogger(noun string) (l *Logger) { + l = NewLoggerWithAdapterName(noun, "") + + return l +} + +func (l *Logger) Noun() string { + return l.noun +} + +func (l *Logger) Adapter() LogAdapter { + return l.la +} + +var ( + configureMutex sync.Mutex +) + +func (l *Logger) doConfigure(force bool) { + configureMutex.Lock() + defer configureMutex.Unlock() + + if l.isConfigured == true && force == false { + return + } + + if IsConfigurationLoaded() == false { + Panic(ErrConfigurationNotLoaded) + } + + if l.an == "" { + l.an = GetDefaultAdapterName() + } + + // If this is empty, then no specific adapter was given or no system + // default was configured (which implies that no adapters were registered). + // All of our logging will be skipped. + if l.an != "" { + la, found := adapters[l.an] + if found == false { + Panic(fmt.Errorf("adapter is not valid: %s", l.an)) + } + + l.la = la + } + + // Set the level. + + systemLevel, found := LevelNameMap[levelName] + if found == false { + Panic(fmt.Errorf("log-level not valid: [%s]", levelName)) + } + + l.systemLevel = systemLevel + + // Set the form. + + if format == "" { + Panic(ErrFormatEmpty) + } + + if t, err := template.New("logItem").Parse(format); err != nil { + Panic(err) + } else { + l.t = t + } + + l.isConfigured = true +} + +func (l *Logger) flattenMessage(lc *MessageContext, format *string, args []interface{}) (string, error) { + m := fmt.Sprintf(*format, args...) + + lc.Message = &m + + var b bytes.Buffer + if err := l.t.Execute(&b, *lc); err != nil { + return "", err + } + + return b.String(), nil +} + +func (l *Logger) allowMessage(noun string, level int) bool { + if _, found := includeFilters[noun]; found == true { + return true + } + + // If we didn't hit an include filter and we *had* include filters, filter + // it out. + if useIncludeFilters == true { + return false + } + + if _, found := excludeFilters[noun]; found == true { + return false + } + + return true +} + +func (l *Logger) makeLogContext(ctx context.Context) *LogContext { + return &LogContext{ + Ctx: ctx, + Logger: l, + } +} + +type LogMethod func(lc *LogContext, message *string) error + +func (l *Logger) log(ctx context.Context, level int, lm LogMethod, format string, args []interface{}) error { + if l.systemLevel > level { + return nil + } + + // Preempt the normal filter checks if we can unconditionally allow at a + // certain level and we've hit that level. + // + // Notice that this is only relevant if the system-log level is letting + // *anything* show logs at the level we came in with. + canExcludeBypass := level >= excludeBypassLevel && excludeBypassLevel != -1 + didExcludeBypass := false + + n := l.Noun() + + if l.allowMessage(n, level) == false { + if canExcludeBypass == false { + return nil + } else { + didExcludeBypass = true + } + } + + levelName, found := LevelNameMapR[level] + if found == false { + Panic(fmt.Errorf("level not valid: (%d)", level)) + } + + levelName = strings.ToUpper(levelName) + + lc := &MessageContext{ + Level: &levelName, + Noun: &n, + ExcludeBypass: didExcludeBypass, + } + + if s, err := l.flattenMessage(lc, &format, args); err != nil { + return err + } else { + lc := l.makeLogContext(ctx) + if err := lm(lc, &s); err != nil { + panic(err) + } + + return e.New(s) + } +} + +func (l *Logger) Debugf(ctx context.Context, format string, args ...interface{}) { + l.doConfigure(false) + + if l.la != nil { + l.log(ctx, LevelDebug, l.la.Debugf, format, args) + } +} + +func (l *Logger) Infof(ctx context.Context, format string, args ...interface{}) { + l.doConfigure(false) + + if l.la != nil { + l.log(ctx, LevelInfo, l.la.Infof, format, args) + } +} + +func (l *Logger) Warningf(ctx context.Context, format string, args ...interface{}) { + l.doConfigure(false) + + if l.la != nil { + l.log(ctx, LevelWarning, l.la.Warningf, format, args) + } +} + +func (l *Logger) mergeStack(err interface{}, format string, args []interface{}) (string, []interface{}) { + if format != "" { + format += "\n%s" + } else { + format = "%s" + } + + var stackified *errors.Error + stackified, ok := err.(*errors.Error) + if ok == false { + stackified = errors.Wrap(err, 2) + } + + args = append(args, stackified.ErrorStack()) + + return format, args +} + +func (l *Logger) Errorf(ctx context.Context, errRaw interface{}, format string, args ...interface{}) { + l.doConfigure(false) + + var err interface{} + + if errRaw != nil { + _, ok := errRaw.(*errors.Error) + if ok == true { + err = errRaw + } else { + err = errors.Wrap(errRaw, 1) + } + } + + if l.la != nil { + if errRaw != nil { + format, args = l.mergeStack(err, format, args) + } + + l.log(ctx, LevelError, l.la.Errorf, format, args) + } +} + +func (l *Logger) ErrorIff(ctx context.Context, errRaw interface{}, format string, args ...interface{}) { + if errRaw == nil { + return + } + + var err interface{} + + _, ok := errRaw.(*errors.Error) + if ok == true { + err = errRaw + } else { + err = errors.Wrap(errRaw, 1) + } + + l.Errorf(ctx, err, format, args...) +} + +func (l *Logger) Panicf(ctx context.Context, errRaw interface{}, format string, args ...interface{}) { + l.doConfigure(false) + + var err interface{} + + _, ok := errRaw.(*errors.Error) + if ok == true { + err = errRaw + } else { + err = errors.Wrap(errRaw, 1) + } + + if l.la != nil { + format, args = l.mergeStack(err, format, args) + err = l.log(ctx, LevelError, l.la.Errorf, format, args) + } + + Panic(err.(error)) +} + +func (l *Logger) PanicIff(ctx context.Context, errRaw interface{}, format string, args ...interface{}) { + if errRaw == nil { + return + } + + var err interface{} + + _, ok := errRaw.(*errors.Error) + if ok == true { + err = errRaw + } else { + err = errors.Wrap(errRaw, 1) + } + + l.Panicf(ctx, err.(error), format, args...) +} + +func Wrap(err interface{}) *errors.Error { + es, ok := err.(*errors.Error) + if ok == true { + return es + } else { + return errors.Wrap(err, 1) + } +} + +func Errorf(message string, args ...interface{}) *errors.Error { + err := fmt.Errorf(message, args...) + return errors.Wrap(err, 1) +} + +func Panic(err interface{}) { + _, ok := err.(*errors.Error) + if ok == true { + panic(err) + } else { + panic(errors.Wrap(err, 1)) + } +} + +func Panicf(message string, args ...interface{}) { + err := Errorf(message, args...) + Panic(err) +} + +func PanicIf(err interface{}) { + if err == nil { + return + } + + _, ok := err.(*errors.Error) + if ok == true { + panic(err) + } else { + panic(errors.Wrap(err, 1)) + } +} + +// Is checks if the left ("actual") error equals the right ("against") error. +// The right must be an unwrapped error (the kind that you'd initialize as a +// global variable). The left can be a wrapped or unwrapped error. +func Is(actual, against error) bool { + // If it's an unwrapped error. + if _, ok := actual.(*errors.Error); ok == false { + return actual == against + } + + return errors.Is(actual, against) +} + +// Print is a utility function to prevent the caller from having to import the +// third-party library. +func PrintError(err error) { + wrapped := Wrap(err) + fmt.Printf("Stack:\n\n%s\n", wrapped.ErrorStack()) +} + +// PrintErrorf is a utility function to prevent the caller from having to +// import the third-party library. +func PrintErrorf(err error, format string, args ...interface{}) { + wrapped := Wrap(err) + + fmt.Printf(format, args...) + fmt.Printf("\n") + fmt.Printf("Stack:\n\n%s\n", wrapped.ErrorStack()) +} + +func init() { + if format == "" { + format = defaultFormat + } + + if levelName == "" { + levelName = defaultLevelName + } + + if includeNouns != "" { + for _, noun := range strings.Split(includeNouns, ",") { + AddIncludeFilter(noun) + } + } + + if excludeNouns != "" { + for _, noun := range strings.Split(excludeNouns, ",") { + AddExcludeFilter(noun) + } + } + + if excludeBypassLevelName != "" { + var found bool + if excludeBypassLevel, found = LevelNameMap[excludeBypassLevelName]; found == false { + panic(ErrExcludeLevelNameInvalid) + } + } +} diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/.MODULE_ROOT b/vendor/github.com/dsoprea/go-photoshop-info-format/.MODULE_ROOT new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/LICENSE b/vendor/github.com/dsoprea/go-photoshop-info-format/LICENSE new file mode 100644 index 000000000..d92c04268 --- /dev/null +++ b/vendor/github.com/dsoprea/go-photoshop-info-format/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Dustin Oprea + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/README.md b/vendor/github.com/dsoprea/go-photoshop-info-format/README.md new file mode 100644 index 000000000..6219ba020 --- /dev/null +++ b/vendor/github.com/dsoprea/go-photoshop-info-format/README.md @@ -0,0 +1,3 @@ +# Overview + +This is a minimal Photoshop format implementation to allow IPTC data to be extracted from a JPEG image. This project primarily services [go-jpeg-image-structure](https://github.com/dsoprea/go-jpeg-image-structure). diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/info.go b/vendor/github.com/dsoprea/go-photoshop-info-format/info.go new file mode 100644 index 000000000..7f17fa6c0 --- /dev/null +++ b/vendor/github.com/dsoprea/go-photoshop-info-format/info.go @@ -0,0 +1,119 @@ +package photoshopinfo + +import ( + "fmt" + "io" + + "encoding/binary" + + "github.com/dsoprea/go-logging" +) + +var ( + defaultByteOrder = binary.BigEndian +) + +// Photoshop30InfoRecord is the data for one parsed Photoshop-info record. +type Photoshop30InfoRecord struct { + // RecordType is the record-type. + RecordType string + + // ImageResourceId is the image resource-ID. + ImageResourceId uint16 + + // Name is the name of the record. It is optional and will be an empty- + // string if not present. + Name string + + // Data is the raw record data. + Data []byte +} + +// String returns a descriptive string. +func (pir Photoshop30InfoRecord) String() string { + return fmt.Sprintf("RECORD-TYPE=[%s] IMAGE-RESOURCE-ID=[0x%04x] NAME=[%s] DATA-SIZE=(%d)", pir.RecordType, pir.ImageResourceId, pir.Name, len(pir.Data)) +} + +// ReadPhotoshop30InfoRecord parses a single photoshop-info record. +func ReadPhotoshop30InfoRecord(r io.Reader) (pir Photoshop30InfoRecord, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + recordType := make([]byte, 4) + _, err = io.ReadFull(r, recordType) + if err != nil { + if err == io.EOF { + return pir, err + } + + log.Panic(err) + } + + // TODO(dustin): Move BigEndian to constant/config. + + irId := uint16(0) + err = binary.Read(r, defaultByteOrder, &irId) + log.PanicIf(err) + + nameSize := uint8(0) + err = binary.Read(r, defaultByteOrder, &nameSize) + log.PanicIf(err) + + // Add an extra byte if the two length+data size is odd to make the total + // bytes read even. + doAddPadding := (1+nameSize)%2 == 1 + if doAddPadding == true { + nameSize++ + } + + name := make([]byte, nameSize) + _, err = io.ReadFull(r, name) + log.PanicIf(err) + + // If the last byte is padding, truncate it. + if doAddPadding == true { + name = name[:nameSize-1] + } + + dataSize := uint32(0) + err = binary.Read(r, defaultByteOrder, &dataSize) + log.PanicIf(err) + + data := make([]byte, dataSize+dataSize%2) + _, err = io.ReadFull(r, data) + log.PanicIf(err) + + data = data[:dataSize] + + pir = Photoshop30InfoRecord{ + RecordType: string(recordType), + ImageResourceId: irId, + Name: string(name), + Data: data, + } + + return pir, nil +} + +// ReadPhotoshop30Info parses a sequence of photoship-info records from the stream. +func ReadPhotoshop30Info(r io.Reader) (pirIndex map[uint16]Photoshop30InfoRecord, err error) { + pirIndex = make(map[uint16]Photoshop30InfoRecord) + + for { + pir, err := ReadPhotoshop30InfoRecord(r) + if err != nil { + if err == io.EOF { + break + } + + log.Panic(err) + } + + pirIndex[pir.ImageResourceId] = pir + } + + return pirIndex, nil +} diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/testing_common.go b/vendor/github.com/dsoprea/go-photoshop-info-format/testing_common.go new file mode 100644 index 000000000..1bc03139a --- /dev/null +++ b/vendor/github.com/dsoprea/go-photoshop-info-format/testing_common.go @@ -0,0 +1,70 @@ +package photoshopinfo + +import ( + "os" + "path" + + "github.com/dsoprea/go-logging" +) + +var ( + testDataRelFilepath = "photoshop.data" +) + +var ( + moduleRootPath = "" + assetsPath = "" +) + +func GetModuleRootPath() string { + if moduleRootPath == "" { + moduleRootPath = os.Getenv("PHOTOSHOPINFO_MODULE_ROOT_PATH") + if moduleRootPath != "" { + return moduleRootPath + } + + currentWd, err := os.Getwd() + log.PanicIf(err) + + currentPath := currentWd + visited := make([]string, 0) + + for { + tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") + + _, err := os.Stat(tryStampFilepath) + if err != nil && os.IsNotExist(err) != true { + log.Panic(err) + } else if err == nil { + break + } + + visited = append(visited, tryStampFilepath) + + currentPath = path.Dir(currentPath) + if currentPath == "/" { + log.Panicf("could not find module-root: %v", visited) + } + } + + moduleRootPath = currentPath + } + + return moduleRootPath +} + +func GetTestAssetsPath() string { + if assetsPath == "" { + moduleRootPath := GetModuleRootPath() + assetsPath = path.Join(moduleRootPath, "assets") + } + + return assetsPath +} + +func GetTestDataFilepath() string { + assetsPath := GetTestAssetsPath() + filepath := path.Join(assetsPath, testDataRelFilepath) + + return filepath +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/LICENSE b/vendor/github.com/dsoprea/go-utility/v2/LICENSE new file mode 100644 index 000000000..8941063e1 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/LICENSE @@ -0,0 +1,7 @@ +Copyright 2019 Random Ingenuity InformationWorks + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/README.md b/vendor/github.com/dsoprea/go-utility/v2/filesystem/README.md new file mode 100644 index 000000000..eb03fea7c --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/README.md @@ -0,0 +1,64 @@ +[![GoDoc](https://godoc.org/github.com/dsoprea/go-utility/filesystem?status.svg)](https://godoc.org/github.com/dsoprea/go-utility/filesystem) +[![Build Status](https://travis-ci.org/dsoprea/go-utility.svg?branch=master)](https://travis-ci.org/dsoprea/go-utility) +[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-utility/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-utility?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-utility)](https://goreportcard.com/report/github.com/dsoprea/go-utility) + +# bounceback + +An `io.ReadSeeker` and `io.WriteSeeker` that returns to the right place before +reading or writing. Useful when the same file resource is being reused for reads +or writes throughout that file. + +# list_files + +A recursive path walker that supports filters. + +# seekable_buffer + +A memory structure that satisfies `io.ReadWriteSeeker`. + +# copy_bytes_between_positions + +Given an `io.ReadWriteSeeker`, copy N bytes from one position to an earlier +position. + +# read_counter, write_counter + +Wrap `io.Reader` and `io.Writer` structs in order to report how many bytes were +transferred. + +# readseekwritecloser + +Provides the ReadWriteSeekCloser interface that combines a RWS and a Closer. +Also provides a no-op wrapper to augment a plain RWS with a closer. + +# boundedreadwriteseek + +Wraps a ReadWriteSeeker such that no seeks can be at an offset less than a +specific-offset. + +# calculateseek + +Provides a reusable function with which to calculate seek offsets. + +# progress_wrapper + +Provides `io.Reader` and `io.Writer` wrappers that also trigger callbacks after +each call. The reader wrapper also invokes the callback upon EOF. + +# does_exist + +Check whether a file/directory exists using a file-path. + +# graceful_copy + +Do a copy but correctly handle short-writes and reads that might return a non- +zero read count *and* EOF. + +# readseeker_to_readerat + +A wrapper that allows an `io.ReadSeeker` to be used as a `io.ReaderAt`. + +# simplefileinfo + +An implementation of `os.FileInfo` to support testing. diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/bounceback.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/bounceback.go new file mode 100644 index 000000000..1112a10ef --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/bounceback.go @@ -0,0 +1,273 @@ +package rifs + +import ( + "fmt" + "io" + + "github.com/dsoprea/go-logging" +) + +// BouncebackStats describes operation counts. +type BouncebackStats struct { + reads int + writes int + seeks int + syncs int +} + +func (bbs BouncebackStats) String() string { + return fmt.Sprintf( + "BouncebackStats", + bbs.reads, bbs.writes, bbs.seeks, bbs.syncs) +} + +type bouncebackBase struct { + currentPosition int64 + + stats BouncebackStats +} + +// Position returns the position that we're supposed to be at. +func (bb *bouncebackBase) Position() int64 { + + // TODO(dustin): Add test + + return bb.currentPosition +} + +// StatsReads returns the number of reads that have been attempted. +func (bb *bouncebackBase) StatsReads() int { + + // TODO(dustin): Add test + + return bb.stats.reads +} + +// StatsWrites returns the number of write operations. +func (bb *bouncebackBase) StatsWrites() int { + + // TODO(dustin): Add test + + return bb.stats.writes +} + +// StatsSeeks returns the number of seeks. +func (bb *bouncebackBase) StatsSeeks() int { + + // TODO(dustin): Add test + + return bb.stats.seeks +} + +// StatsSyncs returns the number of corrective seeks ("bounce-backs"). +func (bb *bouncebackBase) StatsSyncs() int { + + // TODO(dustin): Add test + + return bb.stats.syncs +} + +// Seek does a seek to an arbitrary place in the `io.ReadSeeker`. +func (bb *bouncebackBase) seek(s io.Seeker, offset int64, whence int) (newPosition int64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // If the seek is relative, make sure we're where we're supposed to be *first*. + if whence != io.SeekStart { + err = bb.checkPosition(s) + log.PanicIf(err) + } + + bb.stats.seeks++ + + newPosition, err = s.Seek(offset, whence) + log.PanicIf(err) + + // Update our internal tracking. + bb.currentPosition = newPosition + + return newPosition, nil +} + +func (bb *bouncebackBase) checkPosition(s io.Seeker) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // Make sure we're where we're supposed to be. + + // This should have no overhead, and enables us to collect stats. + realCurrentPosition, err := s.Seek(0, io.SeekCurrent) + log.PanicIf(err) + + if realCurrentPosition != bb.currentPosition { + bb.stats.syncs++ + + _, err = s.Seek(bb.currentPosition, io.SeekStart) + log.PanicIf(err) + } + + return nil +} + +// BouncebackReader wraps a ReadSeeker, keeps track of our position, and +// seeks back to it before writing. This allows an underlying ReadWriteSeeker +// with an unstable position can still be used for a prolonged series of writes. +type BouncebackReader struct { + rs io.ReadSeeker + + bouncebackBase +} + +// NewBouncebackReader returns a `*BouncebackReader` struct. +func NewBouncebackReader(rs io.ReadSeeker) (br *BouncebackReader, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + initialPosition, err := rs.Seek(0, io.SeekCurrent) + log.PanicIf(err) + + bb := bouncebackBase{ + currentPosition: initialPosition, + } + + br = &BouncebackReader{ + rs: rs, + bouncebackBase: bb, + } + + return br, nil +} + +// Seek does a seek to an arbitrary place in the `io.ReadSeeker`. +func (br *BouncebackReader) Seek(offset int64, whence int) (newPosition int64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + newPosition, err = br.bouncebackBase.seek(br.rs, offset, whence) + log.PanicIf(err) + + return newPosition, nil +} + +// Seek does a standard read. +func (br *BouncebackReader) Read(p []byte) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + br.bouncebackBase.stats.reads++ + + err = br.bouncebackBase.checkPosition(br.rs) + log.PanicIf(err) + + // Do read. + + n, err = br.rs.Read(p) + if err != nil { + if err == io.EOF { + return 0, io.EOF + } + + log.Panic(err) + } + + // Update our internal tracking. + br.bouncebackBase.currentPosition += int64(n) + + return n, nil +} + +// BouncebackWriter wraps a WriteSeeker, keeps track of our position, and +// seeks back to it before writing. This allows an underlying ReadWriteSeeker +// with an unstable position can still be used for a prolonged series of writes. +type BouncebackWriter struct { + ws io.WriteSeeker + + bouncebackBase +} + +// NewBouncebackWriter returns a new `BouncebackWriter` struct. +func NewBouncebackWriter(ws io.WriteSeeker) (bw *BouncebackWriter, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + initialPosition, err := ws.Seek(0, io.SeekCurrent) + log.PanicIf(err) + + bb := bouncebackBase{ + currentPosition: initialPosition, + } + + bw = &BouncebackWriter{ + ws: ws, + bouncebackBase: bb, + } + + return bw, nil +} + +// Seek puts us at a specific position in the internal writer for the next +// write/seek. +func (bw *BouncebackWriter) Seek(offset int64, whence int) (newPosition int64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + newPosition, err = bw.bouncebackBase.seek(bw.ws, offset, whence) + log.PanicIf(err) + + return newPosition, nil +} + +// Write performs a write against the internal `WriteSeeker` starting at the +// position that we're supposed to be at. +func (bw *BouncebackWriter) Write(p []byte) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + bw.bouncebackBase.stats.writes++ + + // Make sure we're where we're supposed to be. + + realCurrentPosition, err := bw.ws.Seek(0, io.SeekCurrent) + log.PanicIf(err) + + if realCurrentPosition != bw.bouncebackBase.currentPosition { + bw.bouncebackBase.stats.seeks++ + + _, err = bw.ws.Seek(bw.bouncebackBase.currentPosition, io.SeekStart) + log.PanicIf(err) + } + + // Do write. + + n, err = bw.ws.Write(p) + log.PanicIf(err) + + // Update our internal tracking. + bw.bouncebackBase.currentPosition += int64(n) + + return n, nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseekcloser.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseekcloser.go new file mode 100644 index 000000000..3d2e840fa --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseekcloser.go @@ -0,0 +1,95 @@ +package rifs + +import ( + "io" + + "github.com/dsoprea/go-logging" +) + +// BoundedReadWriteSeekCloser wraps a RWS that is also a closer with boundaries. +// This proxies the RWS methods to the inner BRWS inside. +type BoundedReadWriteSeekCloser struct { + io.Closer + *BoundedReadWriteSeeker +} + +// NewBoundedReadWriteSeekCloser returns a new BoundedReadWriteSeekCloser. +func NewBoundedReadWriteSeekCloser(rwsc ReadWriteSeekCloser, minimumOffset int64, staticFileSize int64) (brwsc *BoundedReadWriteSeekCloser, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + bs, err := NewBoundedReadWriteSeeker(rwsc, minimumOffset, staticFileSize) + log.PanicIf(err) + + brwsc = &BoundedReadWriteSeekCloser{ + Closer: rwsc, + BoundedReadWriteSeeker: bs, + } + + return brwsc, nil +} + +// Seek forwards calls to the inner RWS. +func (rwsc *BoundedReadWriteSeekCloser) Seek(offset int64, whence int) (newOffset int64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + newOffset, err = rwsc.BoundedReadWriteSeeker.Seek(offset, whence) + log.PanicIf(err) + + return newOffset, nil +} + +// Read forwards calls to the inner RWS. +func (rwsc *BoundedReadWriteSeekCloser) Read(buffer []byte) (readCount int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + readCount, err = rwsc.BoundedReadWriteSeeker.Read(buffer) + if err != nil { + if err == io.EOF { + return 0, err + } + + log.Panic(err) + } + + return readCount, nil +} + +// Write forwards calls to the inner RWS. +func (rwsc *BoundedReadWriteSeekCloser) Write(buffer []byte) (writtenCount int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + writtenCount, err = rwsc.BoundedReadWriteSeeker.Write(buffer) + log.PanicIf(err) + + return writtenCount, nil +} + +// Close forwards calls to the inner RWS. +func (rwsc *BoundedReadWriteSeekCloser) Close() (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + err = rwsc.Closer.Close() + log.PanicIf(err) + + return nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseeker.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseeker.go new file mode 100644 index 000000000..d29657b05 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseeker.go @@ -0,0 +1,156 @@ +package rifs + +import ( + "errors" + "io" + "os" + + "github.com/dsoprea/go-logging" +) + +var ( + // ErrSeekBeyondBound is returned when a seek is requested beyond the + // statically-given file-size. No writes or seeks beyond boundaries are + // supported with a statically-given file size. + ErrSeekBeyondBound = errors.New("seek beyond boundary") +) + +// BoundedReadWriteSeeker is a thin filter that ensures that no seeks can be done +// to offsets smaller than the one we were given. This supports libraries that +// might be expecting to read from the front of the stream being used on data +// that is in the middle of a stream instead. +type BoundedReadWriteSeeker struct { + io.ReadWriteSeeker + + currentOffset int64 + minimumOffset int64 + + staticFileSize int64 +} + +// NewBoundedReadWriteSeeker returns a new BoundedReadWriteSeeker instance. +func NewBoundedReadWriteSeeker(rws io.ReadWriteSeeker, minimumOffset int64, staticFileSize int64) (brws *BoundedReadWriteSeeker, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if minimumOffset < 0 { + log.Panicf("BoundedReadWriteSeeker minimum offset must be zero or larger: (%d)", minimumOffset) + } + + // We'll always started at a relative offset of zero. + _, err = rws.Seek(minimumOffset, os.SEEK_SET) + log.PanicIf(err) + + brws = &BoundedReadWriteSeeker{ + ReadWriteSeeker: rws, + + currentOffset: 0, + minimumOffset: minimumOffset, + + staticFileSize: staticFileSize, + } + + return brws, nil +} + +// Seek moves the offset to the given offset. Prevents offset from ever being +// moved left of `brws.minimumOffset`. +func (brws *BoundedReadWriteSeeker) Seek(offset int64, whence int) (updatedOffset int64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + fileSize := brws.staticFileSize + + // If we weren't given a static file-size, look it up whenever it is needed. + if whence == os.SEEK_END && fileSize == 0 { + realFileSizeRaw, err := brws.ReadWriteSeeker.Seek(0, os.SEEK_END) + log.PanicIf(err) + + fileSize = realFileSizeRaw - brws.minimumOffset + } + + updatedOffset, err = CalculateSeek(brws.currentOffset, offset, whence, fileSize) + log.PanicIf(err) + + if brws.staticFileSize != 0 && updatedOffset > brws.staticFileSize { + //updatedOffset = int64(brws.staticFileSize) + + // NOTE(dustin): Presumably, this will only be disruptive to writes that are beyond the boundaries, which, if we're being used at all, should already account for the boundary and prevent this error from ever happening. So, time will tell how disruptive this is. + return 0, ErrSeekBeyondBound + } + + if updatedOffset != brws.currentOffset { + updatedRealOffset := updatedOffset + brws.minimumOffset + + _, err = brws.ReadWriteSeeker.Seek(updatedRealOffset, os.SEEK_SET) + log.PanicIf(err) + + brws.currentOffset = updatedOffset + } + + return updatedOffset, nil +} + +// Read forwards writes to the inner RWS. +func (brws *BoundedReadWriteSeeker) Read(buffer []byte) (readCount int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if brws.staticFileSize != 0 { + availableCount := brws.staticFileSize - brws.currentOffset + if availableCount == 0 { + return 0, io.EOF + } + + if int64(len(buffer)) > availableCount { + buffer = buffer[:availableCount] + } + } + + readCount, err = brws.ReadWriteSeeker.Read(buffer) + brws.currentOffset += int64(readCount) + + if err != nil { + if err == io.EOF { + return 0, err + } + + log.Panic(err) + } + + return readCount, nil +} + +// Write forwards writes to the inner RWS. +func (brws *BoundedReadWriteSeeker) Write(buffer []byte) (writtenCount int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if brws.staticFileSize != 0 { + log.Panicf("writes can not be performed if a static file-size was given") + } + + writtenCount, err = brws.ReadWriteSeeker.Write(buffer) + brws.currentOffset += int64(writtenCount) + + log.PanicIf(err) + + return writtenCount, nil +} + +// MinimumOffset returns the configured minimum-offset. +func (brws *BoundedReadWriteSeeker) MinimumOffset() int64 { + return brws.minimumOffset +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/calculate_seek.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/calculate_seek.go new file mode 100644 index 000000000..cd59d727c --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/calculate_seek.go @@ -0,0 +1,52 @@ +package rifs + +import ( + "io" + "os" + + "github.com/dsoprea/go-logging" +) + +// SeekType is a convenience type to associate the different seek-types with +// printable descriptions. +type SeekType int + +// String returns a descriptive string. +func (n SeekType) String() string { + if n == io.SeekCurrent { + return "SEEK-CURRENT" + } else if n == io.SeekEnd { + return "SEEK-END" + } else if n == io.SeekStart { + return "SEEK-START" + } + + log.Panicf("unknown seek-type: (%d)", n) + return "" +} + +// CalculateSeek calculates an offset in a file-stream given the parameters. +func CalculateSeek(currentOffset int64, delta int64, whence int, fileSize int64) (finalOffset int64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + finalOffset = 0 + } + }() + + if whence == os.SEEK_SET { + finalOffset = delta + } else if whence == os.SEEK_CUR { + finalOffset = currentOffset + delta + } else if whence == os.SEEK_END { + finalOffset = fileSize + delta + } else { + log.Panicf("whence not valid: (%d)", whence) + } + + if finalOffset < 0 { + finalOffset = 0 + } + + return finalOffset, nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/common.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/common.go new file mode 100644 index 000000000..256333d40 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/common.go @@ -0,0 +1,15 @@ +package rifs + +import ( + "os" + "path" +) + +var ( + appPath string +) + +func init() { + goPath := os.Getenv("GOPATH") + appPath = path.Join(goPath, "src", "github.com", "dsoprea", "go-utility", "filesystem") +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/copy_bytes_between_positions.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/copy_bytes_between_positions.go new file mode 100644 index 000000000..89ee9a92c --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/copy_bytes_between_positions.go @@ -0,0 +1,40 @@ +package rifs + +import ( + "io" + "os" + + "github.com/dsoprea/go-logging" +) + +// CopyBytesBetweenPositions will copy bytes from one position in the given RWS +// to an earlier position in the same RWS. +func CopyBytesBetweenPositions(rws io.ReadWriteSeeker, fromPosition, toPosition int64, count int) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if fromPosition <= toPosition { + log.Panicf("from position (%d) must be larger than to position (%d)", fromPosition, toPosition) + } + + br, err := NewBouncebackReader(rws) + log.PanicIf(err) + + _, err = br.Seek(fromPosition, os.SEEK_SET) + log.PanicIf(err) + + bw, err := NewBouncebackWriter(rws) + log.PanicIf(err) + + _, err = bw.Seek(toPosition, os.SEEK_SET) + log.PanicIf(err) + + written, err := io.CopyN(bw, br, int64(count)) + log.PanicIf(err) + + n = int(written) + return n, nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/does_exist.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/does_exist.go new file mode 100644 index 000000000..f5e6cd20a --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/does_exist.go @@ -0,0 +1,19 @@ +package rifs + +import ( + "os" +) + +// DoesExist returns true if we can open the given file/path without error. We +// can't simply use `os.IsNotExist()` because we'll get a different error when +// the parent directory doesn't exist, and really the only important thing is if +// it exists *and* it's readable. +func DoesExist(filepath string) bool { + f, err := os.Open(filepath) + if err != nil { + return false + } + + f.Close() + return true +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/graceful_copy.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/graceful_copy.go new file mode 100644 index 000000000..8705e5fe0 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/graceful_copy.go @@ -0,0 +1,54 @@ +package rifs + +import ( + "fmt" + "io" +) + +const ( + defaultCopyBufferSize = 1024 * 1024 +) + +// GracefulCopy willcopy while enduring lesser normal issues. +// +// - We'll ignore EOF if the read byte-count is more than zero. Only an EOF when +// zero bytes were read will terminate the loop. +// +// - Ignore short-writes. If less bytes were written than the bytes that were +// given, we'll keep trying until done. +func GracefulCopy(w io.Writer, r io.Reader, buffer []byte) (copyCount int, err error) { + if buffer == nil { + buffer = make([]byte, defaultCopyBufferSize) + } + + for { + readCount, err := r.Read(buffer) + if err != nil { + if err != io.EOF { + err = fmt.Errorf("read error: %s", err.Error()) + return 0, err + } + + // Only break on EOF if no bytes were actually read. + if readCount == 0 { + break + } + } + + writeBuffer := buffer[:readCount] + + for len(writeBuffer) > 0 { + writtenCount, err := w.Write(writeBuffer) + if err != nil { + err = fmt.Errorf("write error: %s", err.Error()) + return 0, err + } + + writeBuffer = writeBuffer[writtenCount:] + } + + copyCount += readCount + } + + return copyCount, nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/list_files.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/list_files.go new file mode 100644 index 000000000..bcdbd67cb --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/list_files.go @@ -0,0 +1,143 @@ +package rifs + +import ( + "io" + "os" + "path" + + "github.com/dsoprea/go-logging" +) + +// FileListFilterPredicate is the callback predicate used for filtering. +type FileListFilterPredicate func(parent string, child os.FileInfo) (hit bool, err error) + +// VisitedFile is one visited file. +type VisitedFile struct { + Filepath string + Info os.FileInfo + Index int +} + +// ListFiles feeds a continuous list of files from a recursive folder scan. An +// optional predicate can be provided in order to filter. When done, the +// `filesC` channel is closed. If there's an error, the `errC` channel will +// receive it. +func ListFiles(rootPath string, cb FileListFilterPredicate) (filesC chan VisitedFile, count int, errC chan error) { + defer func() { + if state := recover(); state != nil { + err := log.Wrap(state.(error)) + log.Panic(err) + } + }() + + // Make sure the path exists. + + f, err := os.Open(rootPath) + log.PanicIf(err) + + f.Close() + + // Do our thing. + + filesC = make(chan VisitedFile, 100) + errC = make(chan error, 1) + index := 0 + + go func() { + defer func() { + if state := recover(); state != nil { + err := log.Wrap(state.(error)) + errC <- err + } + }() + + queue := []string{rootPath} + for len(queue) > 0 { + // Pop the next folder to process off the queue. + var thisPath string + thisPath, queue = queue[0], queue[1:] + + // Skip path if a symlink. + + fi, err := os.Lstat(thisPath) + log.PanicIf(err) + + if (fi.Mode() & os.ModeSymlink) > 0 { + continue + } + + // Read information. + + folderF, err := os.Open(thisPath) + if err != nil { + errC <- log.Wrap(err) + return + } + + // Iterate through children. + + for { + children, err := folderF.Readdir(1000) + if err == io.EOF { + break + } else if err != nil { + errC <- log.Wrap(err) + return + } + + for _, child := range children { + filepath := path.Join(thisPath, child.Name()) + + // Skip if a file symlink. + + fi, err := os.Lstat(filepath) + log.PanicIf(err) + + if (fi.Mode() & os.ModeSymlink) > 0 { + continue + } + + // If a predicate was given, determine if this child will be + // left behind. + if cb != nil { + hit, err := cb(thisPath, child) + + if err != nil { + errC <- log.Wrap(err) + return + } + + if hit == false { + continue + } + } + + index++ + + // Push file to channel. + + vf := VisitedFile{ + Filepath: filepath, + Info: child, + Index: index, + } + + filesC <- vf + + // If a folder, queue for later processing. + + if child.IsDir() == true { + queue = append(queue, filepath) + } + } + } + + folderF.Close() + } + + close(filesC) + close(errC) + }() + + return filesC, index, errC +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/progress_wrapper.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/progress_wrapper.go new file mode 100644 index 000000000..0a064c53d --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/progress_wrapper.go @@ -0,0 +1,93 @@ +package rifs + +import ( + "io" + "time" + + "github.com/dsoprea/go-logging" +) + +// ProgressFunc receives progress updates. +type ProgressFunc func(n int, duration time.Duration, isEof bool) error + +// WriteProgressWrapper wraps a reader and calls a callback after each read with +// count and duration info. +type WriteProgressWrapper struct { + w io.Writer + progressCb ProgressFunc +} + +// NewWriteProgressWrapper returns a new WPW instance. +func NewWriteProgressWrapper(w io.Writer, progressCb ProgressFunc) io.Writer { + return &WriteProgressWrapper{ + w: w, + progressCb: progressCb, + } +} + +// Write does a write and calls the callback. +func (wpw *WriteProgressWrapper) Write(buffer []byte) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + startAt := time.Now() + + n, err = wpw.w.Write(buffer) + log.PanicIf(err) + + duration := time.Since(startAt) + + err = wpw.progressCb(n, duration, false) + log.PanicIf(err) + + return n, nil +} + +// ReadProgressWrapper wraps a reader and calls a callback after each read with +// count and duration info. +type ReadProgressWrapper struct { + r io.Reader + progressCb ProgressFunc +} + +// NewReadProgressWrapper returns a new RPW instance. +func NewReadProgressWrapper(r io.Reader, progressCb ProgressFunc) io.Reader { + return &ReadProgressWrapper{ + r: r, + progressCb: progressCb, + } +} + +// Read reads data and calls the callback. +func (rpw *ReadProgressWrapper) Read(buffer []byte) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + startAt := time.Now() + + n, err = rpw.r.Read(buffer) + + duration := time.Since(startAt) + + if err != nil { + if err == io.EOF { + errInner := rpw.progressCb(n, duration, true) + log.PanicIf(errInner) + + return n, err + } + + log.Panic(err) + } + + err = rpw.progressCb(n, duration, false) + log.PanicIf(err) + + return n, nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/read_counter.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/read_counter.go new file mode 100644 index 000000000..d878ca4e6 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/read_counter.go @@ -0,0 +1,36 @@ +package rifs + +import ( + "io" +) + +// ReadCounter proxies read requests and maintains a counter of bytes read. +type ReadCounter struct { + r io.Reader + counter int +} + +// NewReadCounter returns a new `ReadCounter` struct wrapping a `Reader`. +func NewReadCounter(r io.Reader) *ReadCounter { + return &ReadCounter{ + r: r, + } +} + +// Count returns the total number of bytes read. +func (rc *ReadCounter) Count() int { + return rc.counter +} + +// Reset resets the counter to zero. +func (rc *ReadCounter) Reset() { + rc.counter = 0 +} + +// Read forwards a read to the underlying `Reader` while bumping the counter. +func (rc *ReadCounter) Read(b []byte) (n int, err error) { + n, err = rc.r.Read(b) + rc.counter += n + + return n, err +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/readseeker_to_readerat.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/readseeker_to_readerat.go new file mode 100644 index 000000000..3f3ec44dd --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/readseeker_to_readerat.go @@ -0,0 +1,63 @@ +package rifs + +import ( + "io" + + "github.com/dsoprea/go-logging" +) + +// ReadSeekerToReaderAt is a wrapper that allows a ReadSeeker to masquerade as a +// ReaderAt. +type ReadSeekerToReaderAt struct { + rs io.ReadSeeker +} + +// NewReadSeekerToReaderAt returns a new ReadSeekerToReaderAt instance. +func NewReadSeekerToReaderAt(rs io.ReadSeeker) *ReadSeekerToReaderAt { + return &ReadSeekerToReaderAt{ + rs: rs, + } +} + +// ReadAt is a wrapper that satisfies the ReaderAt interface. +// +// Note that a requirement of ReadAt is that it doesn't have an effect on the +// offset in the underlying resource as well as that concurrent calls can be +// made to it. Since we're capturing the current offset in the underlying +// resource and then seeking back to it before returning, it is the +// responsibility of the caller to serialize (i.e. use a mutex with) these +// requests in order to eliminate race-conditions in the parallel-usage +// scenario. +// +// Note also that, since ReadAt() is going to be called on a particular +// instance, that instance is going to internalize a file resource, that file- +// resource is provided by the OS, and [most] OSs are only gonna support one +// file-position per resource, locking is already going to be a necessary +// internal semantic of a ReaderAt implementation. +func (rstra *ReadSeekerToReaderAt) ReadAt(p []byte, offset int64) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + originalOffset, err := rstra.rs.Seek(0, io.SeekCurrent) + log.PanicIf(err) + + defer func() { + _, err := rstra.rs.Seek(originalOffset, io.SeekStart) + log.PanicIf(err) + }() + + _, err = rstra.rs.Seek(offset, io.SeekStart) + log.PanicIf(err) + + // Note that all errors will be wrapped, here. The usage of this method is + // such that typically no specific errors would be expected as part of + // normal operation (in which case we'd check for those first and return + // them directly). + n, err = io.ReadFull(rstra.rs, p) + log.PanicIf(err) + + return n, nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/readwriteseekcloser.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/readwriteseekcloser.go new file mode 100644 index 000000000..c583a8024 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/readwriteseekcloser.go @@ -0,0 +1,29 @@ +package rifs + +import ( + "io" +) + +// ReadWriteSeekCloser satisfies `io.ReadWriteSeeker` and `io.Closer` +// interfaces. +type ReadWriteSeekCloser interface { + io.ReadWriteSeeker + io.Closer +} + +type readWriteSeekNoopCloser struct { + io.ReadWriteSeeker +} + +// ReadWriteSeekNoopCloser wraps a `io.ReadWriteSeeker` with a no-op Close() +// call. +func ReadWriteSeekNoopCloser(rws io.ReadWriteSeeker) ReadWriteSeekCloser { + return readWriteSeekNoopCloser{ + ReadWriteSeeker: rws, + } +} + +// Close does nothing but allows the RWS to satisfy `io.Closer`.:wq +func (readWriteSeekNoopCloser) Close() (err error) { + return nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/seekable_buffer.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/seekable_buffer.go new file mode 100644 index 000000000..5d41bb5df --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/seekable_buffer.go @@ -0,0 +1,146 @@ +package rifs + +import ( + "io" + "os" + + "github.com/dsoprea/go-logging" +) + +// SeekableBuffer is a simple memory structure that satisfies +// `io.ReadWriteSeeker`. +type SeekableBuffer struct { + data []byte + position int64 +} + +// NewSeekableBuffer is a factory that returns a `*SeekableBuffer`. +func NewSeekableBuffer() *SeekableBuffer { + data := make([]byte, 0) + + return &SeekableBuffer{ + data: data, + } +} + +// NewSeekableBufferWithBytes is a factory that returns a `*SeekableBuffer`. +func NewSeekableBufferWithBytes(originalData []byte) *SeekableBuffer { + data := make([]byte, len(originalData)) + copy(data, originalData) + + return &SeekableBuffer{ + data: data, + } +} + +func len64(data []byte) int64 { + return int64(len(data)) +} + +// Bytes returns the underlying slice. +func (sb *SeekableBuffer) Bytes() []byte { + return sb.data +} + +// Len returns the number of bytes currently stored. +func (sb *SeekableBuffer) Len() int { + return len(sb.data) +} + +// Write does a standard write to the internal slice. +func (sb *SeekableBuffer) Write(p []byte) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // The current position we're already at is past the end of the data we + // actually have. Extend our buffer up to our current position. + if sb.position > len64(sb.data) { + extra := make([]byte, sb.position-len64(sb.data)) + sb.data = append(sb.data, extra...) + } + + positionFromEnd := len64(sb.data) - sb.position + tailCount := positionFromEnd - len64(p) + + var tailBytes []byte + if tailCount > 0 { + tailBytes = sb.data[len64(sb.data)-tailCount:] + sb.data = append(sb.data[:sb.position], p...) + } else { + sb.data = append(sb.data[:sb.position], p...) + } + + if tailBytes != nil { + sb.data = append(sb.data, tailBytes...) + } + + dataSize := len64(p) + sb.position += dataSize + + return int(dataSize), nil +} + +// Read does a standard read against the internal slice. +func (sb *SeekableBuffer) Read(p []byte) (n int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if sb.position >= len64(sb.data) { + return 0, io.EOF + } + + n = copy(p, sb.data[sb.position:]) + sb.position += int64(n) + + return n, nil +} + +// Truncate either chops or extends the internal buffer. +func (sb *SeekableBuffer) Truncate(size int64) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + sizeInt := int(size) + if sizeInt < len(sb.data)-1 { + sb.data = sb.data[:sizeInt] + } else { + new := make([]byte, sizeInt-len(sb.data)) + sb.data = append(sb.data, new...) + } + + return nil +} + +// Seek does a standard seek on the internal slice. +func (sb *SeekableBuffer) Seek(offset int64, whence int) (n int64, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if whence == os.SEEK_SET { + sb.position = offset + } else if whence == os.SEEK_END { + sb.position = len64(sb.data) + offset + } else if whence == os.SEEK_CUR { + sb.position += offset + } else { + log.Panicf("seek whence is not valid: (%d)", whence) + } + + if sb.position < 0 { + sb.position = 0 + } + + return sb.position, nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/simplefileinfo.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/simplefileinfo.go new file mode 100644 index 000000000..a227b0b00 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/simplefileinfo.go @@ -0,0 +1,69 @@ +package rifs + +import ( + "os" + "time" +) + +// SimpleFileInfo is a simple `os.FileInfo` implementation useful for testing +// with the bare minimum. +type SimpleFileInfo struct { + filename string + isDir bool + size int64 + mode os.FileMode + modTime time.Time +} + +// NewSimpleFileInfoWithFile returns a new file-specific SimpleFileInfo. +func NewSimpleFileInfoWithFile(filename string, size int64, mode os.FileMode, modTime time.Time) *SimpleFileInfo { + return &SimpleFileInfo{ + filename: filename, + isDir: false, + size: size, + mode: mode, + modTime: modTime, + } +} + +// NewSimpleFileInfoWithDirectory returns a new directory-specific +// SimpleFileInfo. +func NewSimpleFileInfoWithDirectory(filename string, modTime time.Time) *SimpleFileInfo { + return &SimpleFileInfo{ + filename: filename, + isDir: true, + mode: os.ModeDir, + modTime: modTime, + } +} + +// Name returns the base name of the file. +func (sfi *SimpleFileInfo) Name() string { + return sfi.filename +} + +// Size returns the length in bytes for regular files; system-dependent for +// others. +func (sfi *SimpleFileInfo) Size() int64 { + return sfi.size +} + +// Mode returns the file mode bits. +func (sfi *SimpleFileInfo) Mode() os.FileMode { + return sfi.mode +} + +// ModTime returns the modification time. +func (sfi *SimpleFileInfo) ModTime() time.Time { + return sfi.modTime +} + +// IsDir returns true if a directory. +func (sfi *SimpleFileInfo) IsDir() bool { + return sfi.isDir +} + +// Sys returns internal state. +func (sfi *SimpleFileInfo) Sys() interface{} { + return nil +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/utility.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/utility.go new file mode 100644 index 000000000..4b33b41a9 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/utility.go @@ -0,0 +1,17 @@ +package rifs + +import ( + "io" + "os" + + "github.com/dsoprea/go-logging" +) + +// GetOffset returns the current offset of the Seeker and just panics if unable +// to find it. +func GetOffset(s io.Seeker) int64 { + offsetRaw, err := s.Seek(0, os.SEEK_CUR) + log.PanicIf(err) + + return offsetRaw +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/write_counter.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/write_counter.go new file mode 100644 index 000000000..dc39901d5 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/filesystem/write_counter.go @@ -0,0 +1,36 @@ +package rifs + +import ( + "io" +) + +// WriteCounter proxies write requests and maintains a counter of bytes written. +type WriteCounter struct { + w io.Writer + counter int +} + +// NewWriteCounter returns a new `WriteCounter` struct wrapping a `Writer`. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + w: w, + } +} + +// Count returns the total number of bytes read. +func (wc *WriteCounter) Count() int { + return wc.counter +} + +// Reset resets the counter to zero. +func (wc *WriteCounter) Reset() { + wc.counter = 0 +} + +// Write forwards a write to the underlying `Writer` while bumping the counter. +func (wc *WriteCounter) Write(b []byte) (n int, err error) { + n, err = wc.w.Write(b) + wc.counter += n + + return n, err +} diff --git a/vendor/github.com/dsoprea/go-utility/v2/image/README.md b/vendor/github.com/dsoprea/go-utility/v2/image/README.md new file mode 100644 index 000000000..1509ff666 --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/image/README.md @@ -0,0 +1,9 @@ +[![GoDoc](https://godoc.org/github.com/dsoprea/go-utility/image?status.svg)](https://godoc.org/github.com/dsoprea/go-utility/image) +[![Build Status](https://travis-ci.org/dsoprea/go-utility.svg?branch=master)](https://travis-ci.org/dsoprea/go-utility) +[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-utility/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-utility?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-utility)](https://goreportcard.com/report/github.com/dsoprea/go-utility) + +# media_parser_type + +Common image-parsing interfaces. Used for JPEG, PNG, and HEIC parsers used by +go-exif-knife. diff --git a/vendor/github.com/dsoprea/go-utility/v2/image/media_parser_type.go b/vendor/github.com/dsoprea/go-utility/v2/image/media_parser_type.go new file mode 100644 index 000000000..8776a1fdd --- /dev/null +++ b/vendor/github.com/dsoprea/go-utility/v2/image/media_parser_type.go @@ -0,0 +1,34 @@ +package riimage + +import ( + "io" + + "github.com/dsoprea/go-exif/v3" +) + +// MediaContext is an accessor that knows how to extract specific metadata from +// the media. +type MediaContext interface { + // Exif returns the EXIF's root IFD. + Exif() (rootIfd *exif.Ifd, data []byte, err error) +} + +// MediaParser prescribes a specific structure for the parser types that are +// imported from other projects. We don't use it directly, but we use this to +// impose structure. +type MediaParser interface { + // Parse parses a stream using an `io.ReadSeeker`. `mc` should *actually* be + // a `ExifContext`. + Parse(r io.ReadSeeker, size int) (mc MediaContext, err error) + + // ParseFile parses a stream using a file. `mc` should *actually* be a + // `ExifContext`. + ParseFile(filepath string) (mc MediaContext, err error) + + // ParseBytes parses a stream direct from bytes. `mc` should *actually* be + // a `ExifContext`. + ParseBytes(data []byte) (mc MediaContext, err error) + + // Parses the data to determine if it's a compatible format. + LooksLikeFormat(data []byte) bool +} diff --git a/vendor/github.com/go-errors/errors/.travis.yml b/vendor/github.com/go-errors/errors/.travis.yml new file mode 100644 index 000000000..fddfc4e3a --- /dev/null +++ b/vendor/github.com/go-errors/errors/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - "1.8.x" + - "1.10.x" + - "1.13.x" + - "1.14.x" diff --git a/vendor/github.com/go-errors/errors/LICENSE.MIT b/vendor/github.com/go-errors/errors/LICENSE.MIT new file mode 100644 index 000000000..c9a5b2eeb --- /dev/null +++ b/vendor/github.com/go-errors/errors/LICENSE.MIT @@ -0,0 +1,7 @@ +Copyright (c) 2015 Conrad Irwin + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-errors/errors/README.md b/vendor/github.com/go-errors/errors/README.md new file mode 100644 index 000000000..a13004c33 --- /dev/null +++ b/vendor/github.com/go-errors/errors/README.md @@ -0,0 +1,69 @@ +go-errors/errors +================ + +[![Build Status](https://travis-ci.org/go-errors/errors.svg?branch=master)](https://travis-ci.org/go-errors/errors) + +Package errors adds stacktrace support to errors in go. + +This is particularly useful when you want to understand the state of execution +when an error was returned unexpectedly. + +It provides the type \*Error which implements the standard golang error +interface, so you can use this library interchangably with code that is +expecting a normal error return. + +Usage +----- + +Full documentation is available on +[godoc](https://godoc.org/github.com/go-errors/errors), but here's a simple +example: + +```go +package crashy + +import "github.com/go-errors/errors" + +var Crashed = errors.Errorf("oh dear") + +func Crash() error { + return errors.New(Crashed) +} +``` + +This can be called as follows: + +```go +package main + +import ( + "crashy" + "fmt" + "github.com/go-errors/errors" +) + +func main() { + err := crashy.Crash() + if err != nil { + if errors.Is(err, crashy.Crashed) { + fmt.Println(err.(*errors.Error).ErrorStack()) + } else { + panic(err) + } + } +} +``` + +Meta-fu +------- + +This package was original written to allow reporting to +[Bugsnag](https://bugsnag.com/) from +[bugsnag-go](https://github.com/bugsnag/bugsnag-go), but after I found similar +packages by Facebook and Dropbox, it was moved to one canonical location so +everyone can benefit. + +This package is licensed under the MIT license, see LICENSE.MIT for details. + +## Changelog +* v1.1.0 updated to use go1.13's standard-library errors.Is method instead of == in errors.Is \ No newline at end of file diff --git a/vendor/github.com/go-errors/errors/cover.out b/vendor/github.com/go-errors/errors/cover.out new file mode 100644 index 000000000..ab18b0519 --- /dev/null +++ b/vendor/github.com/go-errors/errors/cover.out @@ -0,0 +1,89 @@ +mode: set +github.com/go-errors/errors/stackframe.go:27.51,30.25 2 1 +github.com/go-errors/errors/stackframe.go:33.2,38.8 3 1 +github.com/go-errors/errors/stackframe.go:30.25,32.3 1 0 +github.com/go-errors/errors/stackframe.go:43.47,44.31 1 1 +github.com/go-errors/errors/stackframe.go:47.2,47.48 1 1 +github.com/go-errors/errors/stackframe.go:44.31,46.3 1 1 +github.com/go-errors/errors/stackframe.go:52.42,56.16 3 1 +github.com/go-errors/errors/stackframe.go:60.2,60.60 1 1 +github.com/go-errors/errors/stackframe.go:56.16,58.3 1 0 +github.com/go-errors/errors/stackframe.go:64.55,67.16 2 1 +github.com/go-errors/errors/stackframe.go:71.2,72.61 2 1 +github.com/go-errors/errors/stackframe.go:76.2,76.66 1 1 +github.com/go-errors/errors/stackframe.go:67.16,69.3 1 0 +github.com/go-errors/errors/stackframe.go:72.61,74.3 1 0 +github.com/go-errors/errors/stackframe.go:79.56,91.63 3 1 +github.com/go-errors/errors/stackframe.go:95.2,95.53 1 1 +github.com/go-errors/errors/stackframe.go:100.2,101.18 2 1 +github.com/go-errors/errors/stackframe.go:91.63,94.3 2 1 +github.com/go-errors/errors/stackframe.go:95.53,98.3 2 1 +github.com/go-errors/errors/error.go:70.32,73.23 2 1 +github.com/go-errors/errors/error.go:80.2,85.3 3 1 +github.com/go-errors/errors/error.go:74.2,75.10 1 1 +github.com/go-errors/errors/error.go:76.2,77.28 1 1 +github.com/go-errors/errors/error.go:92.43,95.23 2 1 +github.com/go-errors/errors/error.go:104.2,109.3 3 1 +github.com/go-errors/errors/error.go:96.2,97.11 1 1 +github.com/go-errors/errors/error.go:98.2,99.10 1 1 +github.com/go-errors/errors/error.go:100.2,101.28 1 1 +github.com/go-errors/errors/error.go:115.39,117.19 1 1 +github.com/go-errors/errors/error.go:121.2,121.29 1 1 +github.com/go-errors/errors/error.go:125.2,125.43 1 1 +github.com/go-errors/errors/error.go:129.2,129.14 1 1 +github.com/go-errors/errors/error.go:117.19,119.3 1 1 +github.com/go-errors/errors/error.go:121.29,123.3 1 1 +github.com/go-errors/errors/error.go:125.43,127.3 1 1 +github.com/go-errors/errors/error.go:135.53,137.2 1 1 +github.com/go-errors/errors/error.go:140.34,142.2 1 1 +github.com/go-errors/errors/error.go:146.34,149.42 2 1 +github.com/go-errors/errors/error.go:153.2,153.20 1 1 +github.com/go-errors/errors/error.go:149.42,151.3 1 1 +github.com/go-errors/errors/error.go:158.39,160.2 1 1 +github.com/go-errors/errors/error.go:164.46,165.23 1 1 +github.com/go-errors/errors/error.go:173.2,173.19 1 1 +github.com/go-errors/errors/error.go:165.23,168.32 2 1 +github.com/go-errors/errors/error.go:168.32,170.4 1 1 +github.com/go-errors/errors/error.go:177.37,178.42 1 1 +github.com/go-errors/errors/error.go:181.2,181.41 1 1 +github.com/go-errors/errors/error.go:178.42,180.3 1 1 +github.com/go-errors/errors/parse_panic.go:10.39,12.2 1 1 +github.com/go-errors/errors/parse_panic.go:16.46,24.34 5 1 +github.com/go-errors/errors/parse_panic.go:70.2,70.43 1 1 +github.com/go-errors/errors/parse_panic.go:73.2,73.55 1 0 +github.com/go-errors/errors/parse_panic.go:24.34,27.23 2 1 +github.com/go-errors/errors/parse_panic.go:27.23,28.42 1 1 +github.com/go-errors/errors/parse_panic.go:28.42,31.5 2 1 +github.com/go-errors/errors/parse_panic.go:31.6,33.5 1 0 +github.com/go-errors/errors/parse_panic.go:35.5,35.29 1 1 +github.com/go-errors/errors/parse_panic.go:35.29,36.86 1 1 +github.com/go-errors/errors/parse_panic.go:36.86,38.5 1 1 +github.com/go-errors/errors/parse_panic.go:40.5,40.32 1 1 +github.com/go-errors/errors/parse_panic.go:40.32,41.18 1 1 +github.com/go-errors/errors/parse_panic.go:45.4,46.46 2 1 +github.com/go-errors/errors/parse_panic.go:51.4,53.23 2 1 +github.com/go-errors/errors/parse_panic.go:57.4,58.18 2 1 +github.com/go-errors/errors/parse_panic.go:62.4,63.17 2 1 +github.com/go-errors/errors/parse_panic.go:41.18,43.10 2 1 +github.com/go-errors/errors/parse_panic.go:46.46,49.5 2 1 +github.com/go-errors/errors/parse_panic.go:53.23,55.5 1 0 +github.com/go-errors/errors/parse_panic.go:58.18,60.5 1 0 +github.com/go-errors/errors/parse_panic.go:63.17,65.10 2 1 +github.com/go-errors/errors/parse_panic.go:70.43,72.3 1 1 +github.com/go-errors/errors/parse_panic.go:80.85,82.29 2 1 +github.com/go-errors/errors/parse_panic.go:85.2,85.15 1 1 +github.com/go-errors/errors/parse_panic.go:88.2,90.63 2 1 +github.com/go-errors/errors/parse_panic.go:94.2,94.53 1 1 +github.com/go-errors/errors/parse_panic.go:99.2,101.36 2 1 +github.com/go-errors/errors/parse_panic.go:105.2,106.15 2 1 +github.com/go-errors/errors/parse_panic.go:109.2,112.49 3 1 +github.com/go-errors/errors/parse_panic.go:116.2,117.16 2 1 +github.com/go-errors/errors/parse_panic.go:121.2,126.8 1 1 +github.com/go-errors/errors/parse_panic.go:82.29,84.3 1 0 +github.com/go-errors/errors/parse_panic.go:85.15,87.3 1 1 +github.com/go-errors/errors/parse_panic.go:90.63,93.3 2 1 +github.com/go-errors/errors/parse_panic.go:94.53,97.3 2 1 +github.com/go-errors/errors/parse_panic.go:101.36,103.3 1 0 +github.com/go-errors/errors/parse_panic.go:106.15,108.3 1 0 +github.com/go-errors/errors/parse_panic.go:112.49,114.3 1 1 +github.com/go-errors/errors/parse_panic.go:117.16,119.3 1 0 diff --git a/vendor/github.com/go-errors/errors/error.go b/vendor/github.com/go-errors/errors/error.go new file mode 100644 index 000000000..13aa35ac8 --- /dev/null +++ b/vendor/github.com/go-errors/errors/error.go @@ -0,0 +1,205 @@ +// Package errors provides errors that have stack-traces. +// +// This is particularly useful when you want to understand the +// state of execution when an error was returned unexpectedly. +// +// It provides the type *Error which implements the standard +// golang error interface, so you can use this library interchangably +// with code that is expecting a normal error return. +// +// For example: +// +// package crashy +// +// import "github.com/go-errors/errors" +// +// var Crashed = errors.Errorf("oh dear") +// +// func Crash() error { +// return errors.New(Crashed) +// } +// +// This can be called as follows: +// +// package main +// +// import ( +// "crashy" +// "fmt" +// "github.com/go-errors/errors" +// ) +// +// func main() { +// err := crashy.Crash() +// if err != nil { +// if errors.Is(err, crashy.Crashed) { +// fmt.Println(err.(*errors.Error).ErrorStack()) +// } else { +// panic(err) +// } +// } +// } +// +// This package was original written to allow reporting to Bugsnag, +// but after I found similar packages by Facebook and Dropbox, it +// was moved to one canonical location so everyone can benefit. +package errors + +import ( + "bytes" + "fmt" + "reflect" + "runtime" +) + +// The maximum number of stackframes on any error. +var MaxStackDepth = 50 + +// Error is an error with an attached stacktrace. It can be used +// wherever the builtin error interface is expected. +type Error struct { + Err error + stack []uintptr + frames []StackFrame + prefix string +} + +// New makes an Error from the given value. If that value is already an +// error then it will be used directly, if not, it will be passed to +// fmt.Errorf("%v"). The stacktrace will point to the line of code that +// called New. +func New(e interface{}) *Error { + var err error + + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf("%v", e) + } + + stack := make([]uintptr, MaxStackDepth) + length := runtime.Callers(2, stack[:]) + return &Error{ + Err: err, + stack: stack[:length], + } +} + +// Wrap makes an Error from the given value. If that value is already an +// error then it will be used directly, if not, it will be passed to +// fmt.Errorf("%v"). The skip parameter indicates how far up the stack +// to start the stacktrace. 0 is from the current call, 1 from its caller, etc. +func Wrap(e interface{}, skip int) *Error { + if e == nil { + return nil + } + + var err error + + switch e := e.(type) { + case *Error: + return e + case error: + err = e + default: + err = fmt.Errorf("%v", e) + } + + stack := make([]uintptr, MaxStackDepth) + length := runtime.Callers(2+skip, stack[:]) + return &Error{ + Err: err, + stack: stack[:length], + } +} + +// WrapPrefix makes an Error from the given value. If that value is already an +// error then it will be used directly, if not, it will be passed to +// fmt.Errorf("%v"). The prefix parameter is used to add a prefix to the +// error message when calling Error(). The skip parameter indicates how far +// up the stack to start the stacktrace. 0 is from the current call, +// 1 from its caller, etc. +func WrapPrefix(e interface{}, prefix string, skip int) *Error { + if e == nil { + return nil + } + + err := Wrap(e, 1+skip) + + if err.prefix != "" { + prefix = fmt.Sprintf("%s: %s", prefix, err.prefix) + } + + return &Error{ + Err: err.Err, + stack: err.stack, + prefix: prefix, + } + +} + + +// Errorf creates a new error with the given message. You can use it +// as a drop-in replacement for fmt.Errorf() to provide descriptive +// errors in return values. +func Errorf(format string, a ...interface{}) *Error { + return Wrap(fmt.Errorf(format, a...), 1) +} + +// Error returns the underlying error's message. +func (err *Error) Error() string { + + msg := err.Err.Error() + if err.prefix != "" { + msg = fmt.Sprintf("%s: %s", err.prefix, msg) + } + + return msg +} + +// Stack returns the callstack formatted the same way that go does +// in runtime/debug.Stack() +func (err *Error) Stack() []byte { + buf := bytes.Buffer{} + + for _, frame := range err.StackFrames() { + buf.WriteString(frame.String()) + } + + return buf.Bytes() +} + +// Callers satisfies the bugsnag ErrorWithCallerS() interface +// so that the stack can be read out. +func (err *Error) Callers() []uintptr { + return err.stack +} + +// ErrorStack returns a string that contains both the +// error message and the callstack. +func (err *Error) ErrorStack() string { + return err.TypeName() + " " + err.Error() + "\n" + string(err.Stack()) +} + +// StackFrames returns an array of frames containing information about the +// stack. +func (err *Error) StackFrames() []StackFrame { + if err.frames == nil { + err.frames = make([]StackFrame, len(err.stack)) + + for i, pc := range err.stack { + err.frames[i] = NewStackFrame(pc) + } + } + + return err.frames +} + +// TypeName returns the type this error. e.g. *errors.stringError. +func (err *Error) TypeName() string { + if _, ok := err.Err.(uncaughtPanic); ok { + return "panic" + } + return reflect.TypeOf(err.Err).String() +} diff --git a/vendor/github.com/go-errors/errors/error_1_13.go b/vendor/github.com/go-errors/errors/error_1_13.go new file mode 100644 index 000000000..a81e24eae --- /dev/null +++ b/vendor/github.com/go-errors/errors/error_1_13.go @@ -0,0 +1,26 @@ +// +build go1.13 + +package errors + +import ( + baseErrors "errors" +) + +// Is detects whether the error is equal to a given error. Errors +// are considered equal by this function if they are matched by errors.Is +// or if their contained errors are matched through errors.Is +func Is(e error, original error) bool { + if baseErrors.Is(e, original) { + return true + } + + if e, ok := e.(*Error); ok { + return Is(e.Err, original) + } + + if original, ok := original.(*Error); ok { + return Is(e, original.Err) + } + + return false +} diff --git a/vendor/github.com/go-errors/errors/error_backward.go b/vendor/github.com/go-errors/errors/error_backward.go new file mode 100644 index 000000000..d7e09a824 --- /dev/null +++ b/vendor/github.com/go-errors/errors/error_backward.go @@ -0,0 +1,22 @@ +// +build !go1.13 + +package errors + +// Is detects whether the error is equal to a given error. Errors +// are considered equal by this function if they are the same object, +// or if they both contain the same error inside an errors.Error. +func Is(e error, original error) bool { + if e == original { + return true + } + + if e, ok := e.(*Error); ok { + return Is(e.Err, original) + } + + if original, ok := original.(*Error); ok { + return Is(e, original.Err) + } + + return false +} \ No newline at end of file diff --git a/vendor/github.com/go-errors/errors/parse_panic.go b/vendor/github.com/go-errors/errors/parse_panic.go new file mode 100644 index 000000000..cc37052d7 --- /dev/null +++ b/vendor/github.com/go-errors/errors/parse_panic.go @@ -0,0 +1,127 @@ +package errors + +import ( + "strconv" + "strings" +) + +type uncaughtPanic struct{ message string } + +func (p uncaughtPanic) Error() string { + return p.message +} + +// ParsePanic allows you to get an error object from the output of a go program +// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap. +func ParsePanic(text string) (*Error, error) { + lines := strings.Split(text, "\n") + + state := "start" + + var message string + var stack []StackFrame + + for i := 0; i < len(lines); i++ { + line := lines[i] + + if state == "start" { + if strings.HasPrefix(line, "panic: ") { + message = strings.TrimPrefix(line, "panic: ") + state = "seek" + } else { + return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line) + } + + } else if state == "seek" { + if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") { + state = "parsing" + } + + } else if state == "parsing" { + if line == "" { + state = "done" + break + } + createdBy := false + if strings.HasPrefix(line, "created by ") { + line = strings.TrimPrefix(line, "created by ") + createdBy = true + } + + i++ + + if i >= len(lines) { + return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line) + } + + frame, err := parsePanicFrame(line, lines[i], createdBy) + if err != nil { + return nil, err + } + + stack = append(stack, *frame) + if createdBy { + state = "done" + break + } + } + } + + if state == "done" || state == "parsing" { + return &Error{Err: uncaughtPanic{message}, frames: stack}, nil + } + return nil, Errorf("could not parse panic: %v", text) +} + +// The lines we're passing look like this: +// +// main.(*foo).destruct(0xc208067e98) +// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151 +func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) { + idx := strings.LastIndex(name, "(") + if idx == -1 && !createdBy { + return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name) + } + if idx != -1 { + name = name[:idx] + } + pkg := "" + + if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { + pkg += name[:lastslash] + "/" + name = name[lastslash+1:] + } + if period := strings.Index(name, "."); period >= 0 { + pkg += name[:period] + name = name[period+1:] + } + + name = strings.Replace(name, "·", ".", -1) + + if !strings.HasPrefix(line, "\t") { + return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line) + } + + idx = strings.LastIndex(line, ":") + if idx == -1 { + return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line) + } + file := line[1:idx] + + number := line[idx+1:] + if idx = strings.Index(number, " +"); idx > -1 { + number = number[:idx] + } + + lno, err := strconv.ParseInt(number, 10, 32) + if err != nil { + return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line) + } + + return &StackFrame{ + File: file, + LineNumber: int(lno), + Package: pkg, + Name: name, + }, nil +} diff --git a/vendor/github.com/go-errors/errors/stackframe.go b/vendor/github.com/go-errors/errors/stackframe.go new file mode 100644 index 000000000..f420849d2 --- /dev/null +++ b/vendor/github.com/go-errors/errors/stackframe.go @@ -0,0 +1,114 @@ +package errors + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "strings" +) + +// A StackFrame contains all necessary information about to generate a line +// in a callstack. +type StackFrame struct { + // The path to the file containing this ProgramCounter + File string + // The LineNumber in that file + LineNumber int + // The Name of the function that contains this ProgramCounter + Name string + // The Package that contains this function + Package string + // The underlying ProgramCounter + ProgramCounter uintptr +} + +// NewStackFrame popoulates a stack frame object from the program counter. +func NewStackFrame(pc uintptr) (frame StackFrame) { + + frame = StackFrame{ProgramCounter: pc} + if frame.Func() == nil { + return + } + frame.Package, frame.Name = packageAndName(frame.Func()) + + // pc -1 because the program counters we use are usually return addresses, + // and we want to show the line that corresponds to the function call + frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1) + return + +} + +// Func returns the function that contained this frame. +func (frame *StackFrame) Func() *runtime.Func { + if frame.ProgramCounter == 0 { + return nil + } + return runtime.FuncForPC(frame.ProgramCounter) +} + +// String returns the stackframe formatted in the same way as go does +// in runtime/debug.Stack() +func (frame *StackFrame) String() string { + str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter) + + source, err := frame.SourceLine() + if err != nil { + return str + } + + return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source) +} + +// SourceLine gets the line of code (from File and Line) of the original source if possible. +func (frame *StackFrame) SourceLine() (string, error) { + if frame.LineNumber <= 0 { + return "???", nil + } + + file, err := os.Open(frame.File) + if err != nil { + return "", New(err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + currentLine := 1 + for scanner.Scan() { + if currentLine == frame.LineNumber { + return string(bytes.Trim(scanner.Bytes(), " \t")), nil + } + currentLine++ + } + if err := scanner.Err(); err != nil { + return "", New(err) + } + + return "???", nil +} + +func packageAndName(fn *runtime.Func) (string, string) { + name := fn.Name() + pkg := "" + + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + // Since the package path might contains dots (e.g. code.google.com/...), + // we first remove the path prefix if there is one. + if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { + pkg += name[:lastslash] + "/" + name = name[lastslash+1:] + } + if period := strings.Index(name, "."); period >= 0 { + pkg += name[:period] + name = name[period+1:] + } + + name = strings.Replace(name, "·", ".", -1) + return pkg, name +} diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE b/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE new file mode 100644 index 000000000..890776ab7 --- /dev/null +++ b/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 go-xmlfmt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/README.md b/vendor/github.com/go-xmlfmt/xmlfmt/README.md new file mode 100644 index 000000000..4eb6d69a0 --- /dev/null +++ b/vendor/github.com/go-xmlfmt/xmlfmt/README.md @@ -0,0 +1,178 @@ +# Go XML Formatter + +[![MIT License](http://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) +[![Go Doc](https://img.shields.io/badge/godoc-reference-4b68a3.svg)](https://godoc.org/github.com/go-xmlfmt/xmlfmt) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-xmlfmt/xmlfmt)](https://goreportcard.com/report/github.com/go-xmlfmt/xmlfmt) +[![Codeship Status](https://codeship.com/projects/c49f02b0-a384-0134-fb20-2e0351080565/status?branch=master)](https://codeship.com/projects/190297) + +## Synopsis + +The Go XML Formatter, xmlfmt, will format the XML string in a readable way. + +```go +package main + +import "github.com/go-xmlfmt/xmlfmt" + +func main() { + xml1 := `aSome org-or-otherWouldnt you like to knowPatCalifia` + x := xmlfmt.FormatXML(xml1, "\t", " ") + print(x) +} + +``` + +Output: + +```xml + + + a + + + + + + Some org-or-other + + Wouldnt you like to know + + + + Pat + + Califia + + + + + +``` + +There is no XML decoding and encoding involved, only pure regular expression matching and replacing. So it is much faster than going through decoding and encoding procedures. Moreover, the exact XML source string is preserved, instead of being changed by the encoder. This is why this package exists in the first place. + +## Command + +To use it on command line, check out [xmlfmt](https://github.com/AntonioSun/xmlfmt): + + +``` +$ xmlfmt +XML Formatter +built on 2019-12-08 + +The xmlfmt will format the XML string without rewriting the document + +Options: + + -h, --help display help information + -f, --file *The xml file to read from (or stdin) + -p, --prefix each element begins on a new line and this prefix + -i, --indent[= ] indent string for nested elements +``` + + +## Justification + +### The format + +The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- some, but not all, closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e., + +When it comes to very big XML strings, which is what I’m dealing every day, saving spaces by not allowing those closing tags taking extra lines is plus instead of negative to me. + +### The alternative + +To format it “properly”, i.e., as what people would normally see, is very hard using pure regular expression. In fact, according to Sam Whited from the go-nuts mlist, + +> Regular expression is, well, regular. This means that they can parse regular grammars, but can't parse context free grammars (like XML). It is actually impossible to use a regex to do this task; it will always be fragile, unfortunately. + +So if the output format is so important to you, then unfortunately you have to go through decoding and encoding procedures. But there are some drawbacks as well, as put by James McGill, in http://stackoverflow.com/questions/21117161, besides such method being slow: + +> I like this solution, but am still in search of a Golang XML formatter/prettyprinter that doesn't rewrite the document (other than formatting whitespace). Marshalling or using the Encoder will change namespace declarations. +> +> For example an element like "< ns1:Element />" will be translated to something like '< Element xmlns="http://bla...bla/ns1" >< /Element >' which seems harmless enough except when the intent is to not alter the xml other than formatting. -- James McGill Nov 12 '15 + +Using Sam's code as an example, + +https://play.golang.org/p/JUqQY3WpW5 + +The above code formats the following XML + +```xml + + + + + + 123 + John Brown + + + + +``` + +into this: + +```xml + +
+ + + + 123 + John Brown + + + +
+``` + +I know they are syntactically the same, however the problem is that they *look* totally different. + +That's why there is this package, an XML Beautifier that doesn't rewrite the document. + +## Credit + +The credit goes to **diotalevi** from his post at http://www.perlmonks.org/?node_id=261292. + +However, it does not work for all cases. For example, + +```sh +$ echo '
123John Brown
' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^/>]+)(/?)>\s*(?=(".($1&&($4 eq"
+123 +John Brown + + + + +``` + +I simplified the algorithm, and now it should work for all cases: + +```sh +echo '
123John Brown
' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' +``` +```xml + +
+
+ + + + + 123 + + John Brown + + + +
+``` + +This package is a direct translate from above Perl code into Go, +then further enhanced by @ruandao. diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go new file mode 100644 index 000000000..b744f5b35 --- /dev/null +++ b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go @@ -0,0 +1,56 @@ +//////////////////////////////////////////////////////////////////////////// +// Porgram: xmlfmt.go +// Purpose: Go XML Beautify from XML string using pure string manipulation +// Authors: Antonio Sun (c) 2016-2019, All rights reserved +//////////////////////////////////////////////////////////////////////////// + +package xmlfmt + +import ( + "regexp" + "strings" +) + +var ( + reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) + // NL is the newline string used in XML output, define for DOS-convenient. + NL = "\r\n" +) + +// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure +func FormatXML(xmls, prefix, indent string) string { + src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") + + rf := replaceTag(prefix, indent) + return (prefix + reg.ReplaceAllStringFunc(src, rf)) +} + +// replaceTag returns a closure function to do 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+?)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' as in Perl +// and deal with comments as well +func replaceTag(prefix, indent string) func(string) string { + indentLevel := 0 + return func(m string) string { + // head elem + if strings.HasPrefix(m, "") { + return NL + prefix + strings.Repeat(indent, indentLevel) + m + } + // comment elem + if strings.HasPrefix(m, " Hi then the interval is empty. +type Interval struct { + Lo, Hi float64 +} + +// EmptyInterval returns an empty interval. +func EmptyInterval() Interval { return Interval{1, 0} } + +// IntervalFromPoint returns an interval representing a single point. +func IntervalFromPoint(p float64) Interval { return Interval{p, p} } + +// IsEmpty reports whether the interval is empty. +func (i Interval) IsEmpty() bool { return i.Lo > i.Hi } + +// Equal returns true iff the interval contains the same points as oi. +func (i Interval) Equal(oi Interval) bool { + return i == oi || i.IsEmpty() && oi.IsEmpty() +} + +// Center returns the midpoint of the interval. +// It is undefined for empty intervals. +func (i Interval) Center() float64 { return 0.5 * (i.Lo + i.Hi) } + +// Length returns the length of the interval. +// The length of an empty interval is negative. +func (i Interval) Length() float64 { return i.Hi - i.Lo } + +// Contains returns true iff the interval contains p. +func (i Interval) Contains(p float64) bool { return i.Lo <= p && p <= i.Hi } + +// ContainsInterval returns true iff the interval contains oi. +func (i Interval) ContainsInterval(oi Interval) bool { + if oi.IsEmpty() { + return true + } + return i.Lo <= oi.Lo && oi.Hi <= i.Hi +} + +// InteriorContains returns true iff the interval strictly contains p. +func (i Interval) InteriorContains(p float64) bool { + return i.Lo < p && p < i.Hi +} + +// InteriorContainsInterval returns true iff the interval strictly contains oi. +func (i Interval) InteriorContainsInterval(oi Interval) bool { + if oi.IsEmpty() { + return true + } + return i.Lo < oi.Lo && oi.Hi < i.Hi +} + +// Intersects returns true iff the interval contains any points in common with oi. +func (i Interval) Intersects(oi Interval) bool { + if i.Lo <= oi.Lo { + return oi.Lo <= i.Hi && oi.Lo <= oi.Hi // oi.Lo ∈ i and oi is not empty + } + return i.Lo <= oi.Hi && i.Lo <= i.Hi // i.Lo ∈ oi and i is not empty +} + +// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary. +func (i Interval) InteriorIntersects(oi Interval) bool { + return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= oi.Hi +} + +// Intersection returns the interval containing all points common to i and j. +func (i Interval) Intersection(j Interval) Interval { + // Empty intervals do not need to be special-cased. + return Interval{ + Lo: math.Max(i.Lo, j.Lo), + Hi: math.Min(i.Hi, j.Hi), + } +} + +// AddPoint returns the interval expanded so that it contains the given point. +func (i Interval) AddPoint(p float64) Interval { + if i.IsEmpty() { + return Interval{p, p} + } + if p < i.Lo { + return Interval{p, i.Hi} + } + if p > i.Hi { + return Interval{i.Lo, p} + } + return i +} + +// ClampPoint returns the closest point in the interval to the given point "p". +// The interval must be non-empty. +func (i Interval) ClampPoint(p float64) float64 { + return math.Max(i.Lo, math.Min(i.Hi, p)) +} + +// Expanded returns an interval that has been expanded on each side by margin. +// If margin is negative, then the function shrinks the interval on +// each side by margin instead. The resulting interval may be empty. Any +// expansion of an empty interval remains empty. +func (i Interval) Expanded(margin float64) Interval { + if i.IsEmpty() { + return i + } + return Interval{i.Lo - margin, i.Hi + margin} +} + +// Union returns the smallest interval that contains this interval and the given interval. +func (i Interval) Union(other Interval) Interval { + if i.IsEmpty() { + return other + } + if other.IsEmpty() { + return i + } + return Interval{math.Min(i.Lo, other.Lo), math.Max(i.Hi, other.Hi)} +} + +func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) } + +const ( + // epsilon is a small number that represents a reasonable level of noise between two + // values that can be considered to be equal. + epsilon = 1e-15 + // dblEpsilon is a smaller number for values that require more precision. + // This is the C++ DBL_EPSILON equivalent. + dblEpsilon = 2.220446049250313e-16 +) + +// ApproxEqual reports whether the interval can be transformed into the +// given interval by moving each endpoint a small distance. +// The empty interval is considered to be positioned arbitrarily on the +// real line, so any interval with a small enough length will match +// the empty interval. +func (i Interval) ApproxEqual(other Interval) bool { + if i.IsEmpty() { + return other.Length() <= 2*epsilon + } + if other.IsEmpty() { + return i.Length() <= 2*epsilon + } + return math.Abs(other.Lo-i.Lo) <= epsilon && + math.Abs(other.Hi-i.Hi) <= epsilon +} + +// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. For two +// intervals x and y, this distance is defined as +// h(x, y) = max_{p in x} min_{q in y} d(p, q). +func (i Interval) DirectedHausdorffDistance(other Interval) float64 { + if i.IsEmpty() { + return 0 + } + if other.IsEmpty() { + return math.Inf(1) + } + return math.Max(0, math.Max(i.Hi-other.Hi, other.Lo-i.Lo)) +} diff --git a/vendor/github.com/golang/geo/r2/doc.go b/vendor/github.com/golang/geo/r2/doc.go new file mode 100644 index 000000000..05b155543 --- /dev/null +++ b/vendor/github.com/golang/geo/r2/doc.go @@ -0,0 +1,20 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package r2 implements types and functions for working with geometry in ℝ². + +See package s2 for a more detailed overview. +*/ +package r2 diff --git a/vendor/github.com/golang/geo/r2/rect.go b/vendor/github.com/golang/geo/r2/rect.go new file mode 100644 index 000000000..495545bba --- /dev/null +++ b/vendor/github.com/golang/geo/r2/rect.go @@ -0,0 +1,255 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package r2 + +import ( + "fmt" + "math" + + "github.com/golang/geo/r1" +) + +// Point represents a point in ℝ². +type Point struct { + X, Y float64 +} + +// Add returns the sum of p and op. +func (p Point) Add(op Point) Point { return Point{p.X + op.X, p.Y + op.Y} } + +// Sub returns the difference of p and op. +func (p Point) Sub(op Point) Point { return Point{p.X - op.X, p.Y - op.Y} } + +// Mul returns the scalar product of p and m. +func (p Point) Mul(m float64) Point { return Point{m * p.X, m * p.Y} } + +// Ortho returns a counterclockwise orthogonal point with the same norm. +func (p Point) Ortho() Point { return Point{-p.Y, p.X} } + +// Dot returns the dot product between p and op. +func (p Point) Dot(op Point) float64 { return p.X*op.X + p.Y*op.Y } + +// Cross returns the cross product of p and op. +func (p Point) Cross(op Point) float64 { return p.X*op.Y - p.Y*op.X } + +// Norm returns the vector's norm. +func (p Point) Norm() float64 { return math.Hypot(p.X, p.Y) } + +// Normalize returns a unit point in the same direction as p. +func (p Point) Normalize() Point { + if p.X == 0 && p.Y == 0 { + return p + } + return p.Mul(1 / p.Norm()) +} + +func (p Point) String() string { return fmt.Sprintf("(%.12f, %.12f)", p.X, p.Y) } + +// Rect represents a closed axis-aligned rectangle in the (x,y) plane. +type Rect struct { + X, Y r1.Interval +} + +// RectFromPoints constructs a rect that contains the given points. +func RectFromPoints(pts ...Point) Rect { + // Because the default value on interval is 0,0, we need to manually + // define the interval from the first point passed in as our starting + // interval, otherwise we end up with the case of passing in + // Point{0.2, 0.3} and getting the starting Rect of {0, 0.2}, {0, 0.3} + // instead of the Rect {0.2, 0.2}, {0.3, 0.3} which is not correct. + if len(pts) == 0 { + return Rect{} + } + + r := Rect{ + X: r1.Interval{Lo: pts[0].X, Hi: pts[0].X}, + Y: r1.Interval{Lo: pts[0].Y, Hi: pts[0].Y}, + } + + for _, p := range pts[1:] { + r = r.AddPoint(p) + } + return r +} + +// RectFromCenterSize constructs a rectangle with the given center and size. +// Both dimensions of size must be non-negative. +func RectFromCenterSize(center, size Point) Rect { + return Rect{ + r1.Interval{Lo: center.X - size.X/2, Hi: center.X + size.X/2}, + r1.Interval{Lo: center.Y - size.Y/2, Hi: center.Y + size.Y/2}, + } +} + +// EmptyRect constructs the canonical empty rectangle. Use IsEmpty() to test +// for empty rectangles, since they have more than one representation. A Rect{} +// is not the same as the EmptyRect. +func EmptyRect() Rect { + return Rect{r1.EmptyInterval(), r1.EmptyInterval()} +} + +// IsValid reports whether the rectangle is valid. +// This requires the width to be empty iff the height is empty. +func (r Rect) IsValid() bool { + return r.X.IsEmpty() == r.Y.IsEmpty() +} + +// IsEmpty reports whether the rectangle is empty. +func (r Rect) IsEmpty() bool { + return r.X.IsEmpty() +} + +// Vertices returns all four vertices of the rectangle. Vertices are returned in +// CCW direction starting with the lower left corner. +func (r Rect) Vertices() [4]Point { + return [4]Point{ + {r.X.Lo, r.Y.Lo}, + {r.X.Hi, r.Y.Lo}, + {r.X.Hi, r.Y.Hi}, + {r.X.Lo, r.Y.Hi}, + } +} + +// VertexIJ returns the vertex in direction i along the X-axis (0=left, 1=right) and +// direction j along the Y-axis (0=down, 1=up). +func (r Rect) VertexIJ(i, j int) Point { + x := r.X.Lo + if i == 1 { + x = r.X.Hi + } + y := r.Y.Lo + if j == 1 { + y = r.Y.Hi + } + return Point{x, y} +} + +// Lo returns the low corner of the rect. +func (r Rect) Lo() Point { + return Point{r.X.Lo, r.Y.Lo} +} + +// Hi returns the high corner of the rect. +func (r Rect) Hi() Point { + return Point{r.X.Hi, r.Y.Hi} +} + +// Center returns the center of the rectangle in (x,y)-space +func (r Rect) Center() Point { + return Point{r.X.Center(), r.Y.Center()} +} + +// Size returns the width and height of this rectangle in (x,y)-space. Empty +// rectangles have a negative width and height. +func (r Rect) Size() Point { + return Point{r.X.Length(), r.Y.Length()} +} + +// ContainsPoint reports whether the rectangle contains the given point. +// Rectangles are closed regions, i.e. they contain their boundary. +func (r Rect) ContainsPoint(p Point) bool { + return r.X.Contains(p.X) && r.Y.Contains(p.Y) +} + +// InteriorContainsPoint returns true iff the given point is contained in the interior +// of the region (i.e. the region excluding its boundary). +func (r Rect) InteriorContainsPoint(p Point) bool { + return r.X.InteriorContains(p.X) && r.Y.InteriorContains(p.Y) +} + +// Contains reports whether the rectangle contains the given rectangle. +func (r Rect) Contains(other Rect) bool { + return r.X.ContainsInterval(other.X) && r.Y.ContainsInterval(other.Y) +} + +// InteriorContains reports whether the interior of this rectangle contains all of the +// points of the given other rectangle (including its boundary). +func (r Rect) InteriorContains(other Rect) bool { + return r.X.InteriorContainsInterval(other.X) && r.Y.InteriorContainsInterval(other.Y) +} + +// Intersects reports whether this rectangle and the other rectangle have any points in common. +func (r Rect) Intersects(other Rect) bool { + return r.X.Intersects(other.X) && r.Y.Intersects(other.Y) +} + +// InteriorIntersects reports whether the interior of this rectangle intersects +// any point (including the boundary) of the given other rectangle. +func (r Rect) InteriorIntersects(other Rect) bool { + return r.X.InteriorIntersects(other.X) && r.Y.InteriorIntersects(other.Y) +} + +// AddPoint expands the rectangle to include the given point. The rectangle is +// expanded by the minimum amount possible. +func (r Rect) AddPoint(p Point) Rect { + return Rect{r.X.AddPoint(p.X), r.Y.AddPoint(p.Y)} +} + +// AddRect expands the rectangle to include the given rectangle. This is the +// same as replacing the rectangle by the union of the two rectangles, but +// is more efficient. +func (r Rect) AddRect(other Rect) Rect { + return Rect{r.X.Union(other.X), r.Y.Union(other.Y)} +} + +// ClampPoint returns the closest point in the rectangle to the given point. +// The rectangle must be non-empty. +func (r Rect) ClampPoint(p Point) Point { + return Point{r.X.ClampPoint(p.X), r.Y.ClampPoint(p.Y)} +} + +// Expanded returns a rectangle that has been expanded in the x-direction +// by margin.X, and in y-direction by margin.Y. If either margin is empty, +// then shrink the interval on the corresponding sides instead. The resulting +// rectangle may be empty. Any expansion of an empty rectangle remains empty. +func (r Rect) Expanded(margin Point) Rect { + xx := r.X.Expanded(margin.X) + yy := r.Y.Expanded(margin.Y) + if xx.IsEmpty() || yy.IsEmpty() { + return EmptyRect() + } + return Rect{xx, yy} +} + +// ExpandedByMargin returns a Rect that has been expanded by the amount on all sides. +func (r Rect) ExpandedByMargin(margin float64) Rect { + return r.Expanded(Point{margin, margin}) +} + +// Union returns the smallest rectangle containing the union of this rectangle and +// the given rectangle. +func (r Rect) Union(other Rect) Rect { + return Rect{r.X.Union(other.X), r.Y.Union(other.Y)} +} + +// Intersection returns the smallest rectangle containing the intersection of this +// rectangle and the given rectangle. +func (r Rect) Intersection(other Rect) Rect { + xx := r.X.Intersection(other.X) + yy := r.Y.Intersection(other.Y) + if xx.IsEmpty() || yy.IsEmpty() { + return EmptyRect() + } + + return Rect{xx, yy} +} + +// ApproxEqual returns true if the x- and y-intervals of the two rectangles are +// the same up to the given tolerance. +func (r Rect) ApproxEqual(r2 Rect) bool { + return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y) +} + +func (r Rect) String() string { return fmt.Sprintf("[Lo%s, Hi%s]", r.Lo(), r.Hi()) } diff --git a/vendor/github.com/golang/geo/r3/doc.go b/vendor/github.com/golang/geo/r3/doc.go new file mode 100644 index 000000000..1eb4710c8 --- /dev/null +++ b/vendor/github.com/golang/geo/r3/doc.go @@ -0,0 +1,20 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package r3 implements types and functions for working with geometry in ℝ³. + +See ../s2 for a more detailed overview. +*/ +package r3 diff --git a/vendor/github.com/golang/geo/r3/precisevector.go b/vendor/github.com/golang/geo/r3/precisevector.go new file mode 100644 index 000000000..b13393dbc --- /dev/null +++ b/vendor/github.com/golang/geo/r3/precisevector.go @@ -0,0 +1,198 @@ +// Copyright 2016 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package r3 + +import ( + "fmt" + "math/big" +) + +const ( + // prec is the number of bits of precision to use for the Float values. + // To keep things simple, we use the maximum allowable precision on big + // values. This allows us to handle all values we expect in the s2 library. + prec = big.MaxPrec +) + +// define some commonly referenced values. +var ( + precise0 = precInt(0) + precise1 = precInt(1) +) + +// precStr wraps the conversion from a string into a big.Float. For results that +// actually can be represented exactly, this should only be used on values that +// are integer multiples of integer powers of 2. +func precStr(s string) *big.Float { + // Explicitly ignoring the bool return for this usage. + f, _ := new(big.Float).SetPrec(prec).SetString(s) + return f +} + +func precInt(i int64) *big.Float { + return new(big.Float).SetPrec(prec).SetInt64(i) +} + +func precFloat(f float64) *big.Float { + return new(big.Float).SetPrec(prec).SetFloat64(f) +} + +func precAdd(a, b *big.Float) *big.Float { + return new(big.Float).SetPrec(prec).Add(a, b) +} + +func precSub(a, b *big.Float) *big.Float { + return new(big.Float).SetPrec(prec).Sub(a, b) +} + +func precMul(a, b *big.Float) *big.Float { + return new(big.Float).SetPrec(prec).Mul(a, b) +} + +// PreciseVector represents a point in ℝ³ using high-precision values. +// Note that this is NOT a complete implementation because there are some +// operations that Vector supports that are not feasible with arbitrary precision +// math. (e.g., methods that need division like Normalize, or methods needing a +// square root operation such as Norm) +type PreciseVector struct { + X, Y, Z *big.Float +} + +// PreciseVectorFromVector creates a high precision vector from the given Vector. +func PreciseVectorFromVector(v Vector) PreciseVector { + return NewPreciseVector(v.X, v.Y, v.Z) +} + +// NewPreciseVector creates a high precision vector from the given floating point values. +func NewPreciseVector(x, y, z float64) PreciseVector { + return PreciseVector{ + X: precFloat(x), + Y: precFloat(y), + Z: precFloat(z), + } +} + +// Vector returns this precise vector converted to a Vector. +func (v PreciseVector) Vector() Vector { + // The accuracy flag is ignored on these conversions back to float64. + x, _ := v.X.Float64() + y, _ := v.Y.Float64() + z, _ := v.Z.Float64() + return Vector{x, y, z}.Normalize() +} + +// Equal reports whether v and ov are equal. +func (v PreciseVector) Equal(ov PreciseVector) bool { + return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0 +} + +func (v PreciseVector) String() string { + return fmt.Sprintf("(%10g, %10g, %10g)", v.X, v.Y, v.Z) +} + +// Norm2 returns the square of the norm. +func (v PreciseVector) Norm2() *big.Float { return v.Dot(v) } + +// IsUnit reports whether this vector is of unit length. +func (v PreciseVector) IsUnit() bool { + return v.Norm2().Cmp(precise1) == 0 +} + +// Abs returns the vector with nonnegative components. +func (v PreciseVector) Abs() PreciseVector { + return PreciseVector{ + X: new(big.Float).Abs(v.X), + Y: new(big.Float).Abs(v.Y), + Z: new(big.Float).Abs(v.Z), + } +} + +// Add returns the standard vector sum of v and ov. +func (v PreciseVector) Add(ov PreciseVector) PreciseVector { + return PreciseVector{ + X: precAdd(v.X, ov.X), + Y: precAdd(v.Y, ov.Y), + Z: precAdd(v.Z, ov.Z), + } +} + +// Sub returns the standard vector difference of v and ov. +func (v PreciseVector) Sub(ov PreciseVector) PreciseVector { + return PreciseVector{ + X: precSub(v.X, ov.X), + Y: precSub(v.Y, ov.Y), + Z: precSub(v.Z, ov.Z), + } +} + +// Mul returns the standard scalar product of v and f. +func (v PreciseVector) Mul(f *big.Float) PreciseVector { + return PreciseVector{ + X: precMul(v.X, f), + Y: precMul(v.Y, f), + Z: precMul(v.Z, f), + } +} + +// MulByFloat64 returns the standard scalar product of v and f. +func (v PreciseVector) MulByFloat64(f float64) PreciseVector { + return v.Mul(precFloat(f)) +} + +// Dot returns the standard dot product of v and ov. +func (v PreciseVector) Dot(ov PreciseVector) *big.Float { + return precAdd(precMul(v.X, ov.X), precAdd(precMul(v.Y, ov.Y), precMul(v.Z, ov.Z))) +} + +// Cross returns the standard cross product of v and ov. +func (v PreciseVector) Cross(ov PreciseVector) PreciseVector { + return PreciseVector{ + X: precSub(precMul(v.Y, ov.Z), precMul(v.Z, ov.Y)), + Y: precSub(precMul(v.Z, ov.X), precMul(v.X, ov.Z)), + Z: precSub(precMul(v.X, ov.Y), precMul(v.Y, ov.X)), + } +} + +// LargestComponent returns the axis that represents the largest component in this vector. +func (v PreciseVector) LargestComponent() Axis { + t := v.Abs() + + if t.X.Cmp(t.Y) > 0 { + if t.X.Cmp(t.Z) > 0 { + return XAxis + } + return ZAxis + } + if t.Y.Cmp(t.Z) > 0 { + return YAxis + } + return ZAxis +} + +// SmallestComponent returns the axis that represents the smallest component in this vector. +func (v PreciseVector) SmallestComponent() Axis { + t := v.Abs() + + if t.X.Cmp(t.Y) < 0 { + if t.X.Cmp(t.Z) < 0 { + return XAxis + } + return ZAxis + } + if t.Y.Cmp(t.Z) < 0 { + return YAxis + } + return ZAxis +} diff --git a/vendor/github.com/golang/geo/r3/vector.go b/vendor/github.com/golang/geo/r3/vector.go new file mode 100644 index 000000000..ccda622f4 --- /dev/null +++ b/vendor/github.com/golang/geo/r3/vector.go @@ -0,0 +1,183 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package r3 + +import ( + "fmt" + "math" + + "github.com/golang/geo/s1" +) + +// Vector represents a point in ℝ³. +type Vector struct { + X, Y, Z float64 +} + +// ApproxEqual reports whether v and ov are equal within a small epsilon. +func (v Vector) ApproxEqual(ov Vector) bool { + const epsilon = 1e-16 + return math.Abs(v.X-ov.X) < epsilon && math.Abs(v.Y-ov.Y) < epsilon && math.Abs(v.Z-ov.Z) < epsilon +} + +func (v Vector) String() string { return fmt.Sprintf("(%0.24f, %0.24f, %0.24f)", v.X, v.Y, v.Z) } + +// Norm returns the vector's norm. +func (v Vector) Norm() float64 { return math.Sqrt(v.Dot(v)) } + +// Norm2 returns the square of the norm. +func (v Vector) Norm2() float64 { return v.Dot(v) } + +// Normalize returns a unit vector in the same direction as v. +func (v Vector) Normalize() Vector { + n2 := v.Norm2() + if n2 == 0 { + return Vector{0, 0, 0} + } + return v.Mul(1 / math.Sqrt(n2)) +} + +// IsUnit returns whether this vector is of approximately unit length. +func (v Vector) IsUnit() bool { + const epsilon = 5e-14 + return math.Abs(v.Norm2()-1) <= epsilon +} + +// Abs returns the vector with nonnegative components. +func (v Vector) Abs() Vector { return Vector{math.Abs(v.X), math.Abs(v.Y), math.Abs(v.Z)} } + +// Add returns the standard vector sum of v and ov. +func (v Vector) Add(ov Vector) Vector { return Vector{v.X + ov.X, v.Y + ov.Y, v.Z + ov.Z} } + +// Sub returns the standard vector difference of v and ov. +func (v Vector) Sub(ov Vector) Vector { return Vector{v.X - ov.X, v.Y - ov.Y, v.Z - ov.Z} } + +// Mul returns the standard scalar product of v and m. +func (v Vector) Mul(m float64) Vector { return Vector{m * v.X, m * v.Y, m * v.Z} } + +// Dot returns the standard dot product of v and ov. +func (v Vector) Dot(ov Vector) float64 { return v.X*ov.X + v.Y*ov.Y + v.Z*ov.Z } + +// Cross returns the standard cross product of v and ov. +func (v Vector) Cross(ov Vector) Vector { + return Vector{ + v.Y*ov.Z - v.Z*ov.Y, + v.Z*ov.X - v.X*ov.Z, + v.X*ov.Y - v.Y*ov.X, + } +} + +// Distance returns the Euclidean distance between v and ov. +func (v Vector) Distance(ov Vector) float64 { return v.Sub(ov).Norm() } + +// Angle returns the angle between v and ov. +func (v Vector) Angle(ov Vector) s1.Angle { + return s1.Angle(math.Atan2(v.Cross(ov).Norm(), v.Dot(ov))) * s1.Radian +} + +// Axis enumerates the 3 axes of ℝ³. +type Axis int + +// The three axes of ℝ³. +const ( + XAxis Axis = iota + YAxis + ZAxis +) + +// Ortho returns a unit vector that is orthogonal to v. +// Ortho(-v) = -Ortho(v) for all v. +func (v Vector) Ortho() Vector { + ov := Vector{0.012, 0.0053, 0.00457} + switch v.LargestComponent() { + case XAxis: + ov.Z = 1 + case YAxis: + ov.X = 1 + default: + ov.Y = 1 + } + return v.Cross(ov).Normalize() +} + +// LargestComponent returns the axis that represents the largest component in this vector. +func (v Vector) LargestComponent() Axis { + t := v.Abs() + + if t.X > t.Y { + if t.X > t.Z { + return XAxis + } + return ZAxis + } + if t.Y > t.Z { + return YAxis + } + return ZAxis +} + +// SmallestComponent returns the axis that represents the smallest component in this vector. +func (v Vector) SmallestComponent() Axis { + t := v.Abs() + + if t.X < t.Y { + if t.X < t.Z { + return XAxis + } + return ZAxis + } + if t.Y < t.Z { + return YAxis + } + return ZAxis +} + +// Cmp compares v and ov lexicographically and returns: +// +// -1 if v < ov +// 0 if v == ov +// +1 if v > ov +// +// This method is based on C++'s std::lexicographical_compare. Two entities +// are compared element by element with the given operator. The first mismatch +// defines which is less (or greater) than the other. If both have equivalent +// values they are lexicographically equal. +func (v Vector) Cmp(ov Vector) int { + if v.X < ov.X { + return -1 + } + if v.X > ov.X { + return 1 + } + + // First elements were the same, try the next. + if v.Y < ov.Y { + return -1 + } + if v.Y > ov.Y { + return 1 + } + + // Second elements were the same return the final compare. + if v.Z < ov.Z { + return -1 + } + if v.Z > ov.Z { + return 1 + } + + // Both are equal + return 0 +} diff --git a/vendor/github.com/golang/geo/s1/angle.go b/vendor/github.com/golang/geo/s1/angle.go new file mode 100644 index 000000000..747b23dea --- /dev/null +++ b/vendor/github.com/golang/geo/s1/angle.go @@ -0,0 +1,120 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s1 + +import ( + "math" + "strconv" +) + +// Angle represents a 1D angle. The internal representation is a double precision +// value in radians, so conversion to and from radians is exact. +// Conversions between E5, E6, E7, and Degrees are not always +// exact. For example, Degrees(3.1) is different from E6(3100000) or E7(31000000). +// +// The following conversions between degrees and radians are exact: +// +// Degree*180 == Radian*math.Pi +// Degree*(180/n) == Radian*(math.Pi/n) for n == 0..8 +// +// These identities hold when the arguments are scaled up or down by any power +// of 2. Some similar identities are also true, for example, +// +// Degree*60 == Radian*(math.Pi/3) +// +// But be aware that this type of identity does not hold in general. For example, +// +// Degree*3 != Radian*(math.Pi/60) +// +// Similarly, the conversion to radians means that (Angle(x)*Degree).Degrees() +// does not always equal x. For example, +// +// (Angle(45*n)*Degree).Degrees() == 45*n for n == 0..8 +// +// but +// +// (60*Degree).Degrees() != 60 +// +// When testing for equality, you should allow for numerical errors (ApproxEqual) +// or convert to discrete E5/E6/E7 values first. +type Angle float64 + +// Angle units. +const ( + Radian Angle = 1 + Degree = (math.Pi / 180) * Radian + + E5 = 1e-5 * Degree + E6 = 1e-6 * Degree + E7 = 1e-7 * Degree +) + +// Radians returns the angle in radians. +func (a Angle) Radians() float64 { return float64(a) } + +// Degrees returns the angle in degrees. +func (a Angle) Degrees() float64 { return float64(a / Degree) } + +// round returns the value rounded to nearest as an int32. +// This does not match C++ exactly for the case of x.5. +func round(val float64) int32 { + if val < 0 { + return int32(val - 0.5) + } + return int32(val + 0.5) +} + +// InfAngle returns an angle larger than any finite angle. +func InfAngle() Angle { + return Angle(math.Inf(1)) +} + +// isInf reports whether this Angle is infinite. +func (a Angle) isInf() bool { + return math.IsInf(float64(a), 0) +} + +// E5 returns the angle in hundred thousandths of degrees. +func (a Angle) E5() int32 { return round(a.Degrees() * 1e5) } + +// E6 returns the angle in millionths of degrees. +func (a Angle) E6() int32 { return round(a.Degrees() * 1e6) } + +// E7 returns the angle in ten millionths of degrees. +func (a Angle) E7() int32 { return round(a.Degrees() * 1e7) } + +// Abs returns the absolute value of the angle. +func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) } + +// Normalized returns an equivalent angle in (-π, π]. +func (a Angle) Normalized() Angle { + rad := math.Remainder(float64(a), 2*math.Pi) + if rad <= -math.Pi { + rad = math.Pi + } + return Angle(rad) +} + +func (a Angle) String() string { + return strconv.FormatFloat(a.Degrees(), 'f', 7, 64) // like "%.7f" +} + +// ApproxEqual reports whether the two angles are the same up to a small tolerance. +func (a Angle) ApproxEqual(other Angle) bool { + return math.Abs(float64(a)-float64(other)) <= epsilon +} + +// BUG(dsymonds): The major differences from the C++ version are: +// - no unsigned E5/E6/E7 methods diff --git a/vendor/github.com/golang/geo/s1/chordangle.go b/vendor/github.com/golang/geo/s1/chordangle.go new file mode 100644 index 000000000..406c69ef1 --- /dev/null +++ b/vendor/github.com/golang/geo/s1/chordangle.go @@ -0,0 +1,250 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s1 + +import ( + "math" +) + +// ChordAngle represents the angle subtended by a chord (i.e., the straight +// line segment connecting two points on the sphere). Its representation +// makes it very efficient for computing and comparing distances, but unlike +// Angle it is only capable of representing angles between 0 and π radians. +// Generally, ChordAngle should only be used in loops where many angles need +// to be calculated and compared. Otherwise it is simpler to use Angle. +// +// ChordAngle loses some accuracy as the angle approaches π radians. +// Specifically, the representation of (π - x) radians has an error of about +// (1e-15 / x), with a maximum error of about 2e-8 radians (about 13cm on the +// Earth's surface). For comparison, for angles up to π/2 radians (10000km) +// the worst-case representation error is about 2e-16 radians (1 nanonmeter), +// which is about the same as Angle. +// +// ChordAngles are represented by the squared chord length, which can +// range from 0 to 4. Positive infinity represents an infinite squared length. +type ChordAngle float64 + +const ( + // NegativeChordAngle represents a chord angle smaller than the zero angle. + // The only valid operations on a NegativeChordAngle are comparisons, + // Angle conversions, and Successor/Predecessor. + NegativeChordAngle = ChordAngle(-1) + + // RightChordAngle represents a chord angle of 90 degrees (a "right angle"). + RightChordAngle = ChordAngle(2) + + // StraightChordAngle represents a chord angle of 180 degrees (a "straight angle"). + // This is the maximum finite chord angle. + StraightChordAngle = ChordAngle(4) + + // maxLength2 is the square of the maximum length allowed in a ChordAngle. + maxLength2 = 4.0 +) + +// ChordAngleFromAngle returns a ChordAngle from the given Angle. +func ChordAngleFromAngle(a Angle) ChordAngle { + if a < 0 { + return NegativeChordAngle + } + if a.isInf() { + return InfChordAngle() + } + l := 2 * math.Sin(0.5*math.Min(math.Pi, a.Radians())) + return ChordAngle(l * l) +} + +// ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length. +// Note that the argument is automatically clamped to a maximum of 4 to +// handle possible roundoff errors. The argument must be non-negative. +func ChordAngleFromSquaredLength(length2 float64) ChordAngle { + if length2 > maxLength2 { + return StraightChordAngle + } + return ChordAngle(length2) +} + +// Expanded returns a new ChordAngle that has been adjusted by the given error +// bound (which can be positive or negative). Error should be the value +// returned by either MaxPointError or MaxAngleError. For example: +// a := ChordAngleFromPoints(x, y) +// a1 := a.Expanded(a.MaxPointError()) +func (c ChordAngle) Expanded(e float64) ChordAngle { + // If the angle is special, don't change it. Otherwise clamp it to the valid range. + if c.isSpecial() { + return c + } + return ChordAngle(math.Max(0.0, math.Min(maxLength2, float64(c)+e))) +} + +// Angle converts this ChordAngle to an Angle. +func (c ChordAngle) Angle() Angle { + if c < 0 { + return -1 * Radian + } + if c.isInf() { + return InfAngle() + } + return Angle(2 * math.Asin(0.5*math.Sqrt(float64(c)))) +} + +// InfChordAngle returns a chord angle larger than any finite chord angle. +// The only valid operations on an InfChordAngle are comparisons, Angle +// conversions, and Successor/Predecessor. +func InfChordAngle() ChordAngle { + return ChordAngle(math.Inf(1)) +} + +// isInf reports whether this ChordAngle is infinite. +func (c ChordAngle) isInf() bool { + return math.IsInf(float64(c), 1) +} + +// isSpecial reports whether this ChordAngle is one of the special cases. +func (c ChordAngle) isSpecial() bool { + return c < 0 || c.isInf() +} + +// isValid reports whether this ChordAngle is valid or not. +func (c ChordAngle) isValid() bool { + return (c >= 0 && c <= maxLength2) || c.isSpecial() +} + +// Successor returns the smallest representable ChordAngle larger than this one. +// This can be used to convert a "<" comparison to a "<=" comparison. +// +// Note the following special cases: +// NegativeChordAngle.Successor == 0 +// StraightChordAngle.Successor == InfChordAngle +// InfChordAngle.Successor == InfChordAngle +func (c ChordAngle) Successor() ChordAngle { + if c >= maxLength2 { + return InfChordAngle() + } + if c < 0 { + return 0 + } + return ChordAngle(math.Nextafter(float64(c), 10.0)) +} + +// Predecessor returns the largest representable ChordAngle less than this one. +// +// Note the following special cases: +// InfChordAngle.Predecessor == StraightChordAngle +// ChordAngle(0).Predecessor == NegativeChordAngle +// NegativeChordAngle.Predecessor == NegativeChordAngle +func (c ChordAngle) Predecessor() ChordAngle { + if c <= 0 { + return NegativeChordAngle + } + if c > maxLength2 { + return StraightChordAngle + } + + return ChordAngle(math.Nextafter(float64(c), -10.0)) +} + +// MaxPointError returns the maximum error size for a ChordAngle constructed +// from 2 Points x and y, assuming that x and y are normalized to within the +// bounds guaranteed by s2.Point.Normalize. The error is defined with respect to +// the true distance after the points are projected to lie exactly on the sphere. +func (c ChordAngle) MaxPointError() float64 { + // There is a relative error of (2.5*dblEpsilon) when computing the squared + // distance, plus a relative error of 2 * dblEpsilon, plus an absolute error + // of (16 * dblEpsilon**2) because the lengths of the input points may differ + // from 1 by up to (2*dblEpsilon) each. (This is the maximum error in Normalize). + return 4.5*dblEpsilon*float64(c) + 16*dblEpsilon*dblEpsilon +} + +// MaxAngleError returns the maximum error for a ChordAngle constructed +// as an Angle distance. +func (c ChordAngle) MaxAngleError() float64 { + return dblEpsilon * float64(c) +} + +// Add adds the other ChordAngle to this one and returns the resulting value. +// This method assumes the ChordAngles are not special. +func (c ChordAngle) Add(other ChordAngle) ChordAngle { + // Note that this method (and Sub) is much more efficient than converting + // the ChordAngle to an Angle and adding those and converting back. It + // requires only one square root plus a few additions and multiplications. + + // Optimization for the common case where b is an error tolerance + // parameter that happens to be set to zero. + if other == 0 { + return c + } + + // Clamp the angle sum to at most 180 degrees. + if c+other >= maxLength2 { + return StraightChordAngle + } + + // Let a and b be the (non-squared) chord lengths, and let c = a+b. + // Let A, B, and C be the corresponding half-angles (a = 2*sin(A), etc). + // Then the formula below can be derived from c = 2 * sin(A+B) and the + // relationships sin(A+B) = sin(A)*cos(B) + sin(B)*cos(A) + // cos(X) = sqrt(1 - sin^2(X)) + x := float64(c * (1 - 0.25*other)) + y := float64(other * (1 - 0.25*c)) + return ChordAngle(math.Min(maxLength2, x+y+2*math.Sqrt(x*y))) +} + +// Sub subtracts the other ChordAngle from this one and returns the resulting +// value. This method assumes the ChordAngles are not special. +func (c ChordAngle) Sub(other ChordAngle) ChordAngle { + if other == 0 { + return c + } + if c <= other { + return 0 + } + x := float64(c * (1 - 0.25*other)) + y := float64(other * (1 - 0.25*c)) + return ChordAngle(math.Max(0.0, x+y-2*math.Sqrt(x*y))) +} + +// Sin returns the sine of this chord angle. This method is more efficient +// than converting to Angle and performing the computation. +func (c ChordAngle) Sin() float64 { + return math.Sqrt(c.Sin2()) +} + +// Sin2 returns the square of the sine of this chord angle. +// It is more efficient than Sin. +func (c ChordAngle) Sin2() float64 { + // Let a be the (non-squared) chord length, and let A be the corresponding + // half-angle (a = 2*sin(A)). The formula below can be derived from: + // sin(2*A) = 2 * sin(A) * cos(A) + // cos^2(A) = 1 - sin^2(A) + // This is much faster than converting to an angle and computing its sine. + return float64(c * (1 - 0.25*c)) +} + +// Cos returns the cosine of this chord angle. This method is more efficient +// than converting to Angle and performing the computation. +func (c ChordAngle) Cos() float64 { + // cos(2*A) = cos^2(A) - sin^2(A) = 1 - 2*sin^2(A) + return float64(1 - 0.5*c) +} + +// Tan returns the tangent of this chord angle. +func (c ChordAngle) Tan() float64 { + return c.Sin() / c.Cos() +} + +// TODO(roberts): Differences from C++: +// Helpers to/from E5/E6/E7 +// Helpers to/from degrees and radians directly. +// FastUpperBoundFrom(angle Angle) diff --git a/vendor/github.com/golang/geo/s1/doc.go b/vendor/github.com/golang/geo/s1/doc.go new file mode 100644 index 000000000..52a2c526d --- /dev/null +++ b/vendor/github.com/golang/geo/s1/doc.go @@ -0,0 +1,20 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package s1 implements types and functions for working with geometry in S¹ (circular geometry). + +See ../s2 for a more detailed overview. +*/ +package s1 diff --git a/vendor/github.com/golang/geo/s1/interval.go b/vendor/github.com/golang/geo/s1/interval.go new file mode 100644 index 000000000..6fea5221f --- /dev/null +++ b/vendor/github.com/golang/geo/s1/interval.go @@ -0,0 +1,462 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s1 + +import ( + "math" + "strconv" +) + +// An Interval represents a closed interval on a unit circle (also known +// as a 1-dimensional sphere). It is capable of representing the empty +// interval (containing no points), the full interval (containing all +// points), and zero-length intervals (containing a single point). +// +// Points are represented by the angle they make with the positive x-axis in +// the range [-π, π]. An interval is represented by its lower and upper +// bounds (both inclusive, since the interval is closed). The lower bound may +// be greater than the upper bound, in which case the interval is "inverted" +// (i.e. it passes through the point (-1, 0)). +// +// The point (-1, 0) has two valid representations, π and -π. The +// normalized representation of this point is π, so that endpoints +// of normal intervals are in the range (-π, π]. We normalize the latter to +// the former in IntervalFromEndpoints. However, we take advantage of the point +// -π to construct two special intervals: +// The full interval is [-π, π] +// The empty interval is [π, -π]. +// +// Treat the exported fields as read-only. +type Interval struct { + Lo, Hi float64 +} + +// IntervalFromEndpoints constructs a new interval from endpoints. +// Both arguments must be in the range [-π,π]. This function allows inverted intervals +// to be created. +func IntervalFromEndpoints(lo, hi float64) Interval { + i := Interval{lo, hi} + if lo == -math.Pi && hi != math.Pi { + i.Lo = math.Pi + } + if hi == -math.Pi && lo != math.Pi { + i.Hi = math.Pi + } + return i +} + +// IntervalFromPointPair returns the minimal interval containing the two given points. +// Both arguments must be in [-π,π]. +func IntervalFromPointPair(a, b float64) Interval { + if a == -math.Pi { + a = math.Pi + } + if b == -math.Pi { + b = math.Pi + } + if positiveDistance(a, b) <= math.Pi { + return Interval{a, b} + } + return Interval{b, a} +} + +// EmptyInterval returns an empty interval. +func EmptyInterval() Interval { return Interval{math.Pi, -math.Pi} } + +// FullInterval returns a full interval. +func FullInterval() Interval { return Interval{-math.Pi, math.Pi} } + +// IsValid reports whether the interval is valid. +func (i Interval) IsValid() bool { + return (math.Abs(i.Lo) <= math.Pi && math.Abs(i.Hi) <= math.Pi && + !(i.Lo == -math.Pi && i.Hi != math.Pi) && + !(i.Hi == -math.Pi && i.Lo != math.Pi)) +} + +// IsFull reports whether the interval is full. +func (i Interval) IsFull() bool { return i.Lo == -math.Pi && i.Hi == math.Pi } + +// IsEmpty reports whether the interval is empty. +func (i Interval) IsEmpty() bool { return i.Lo == math.Pi && i.Hi == -math.Pi } + +// IsInverted reports whether the interval is inverted; that is, whether Lo > Hi. +func (i Interval) IsInverted() bool { return i.Lo > i.Hi } + +// Invert returns the interval with endpoints swapped. +func (i Interval) Invert() Interval { + return Interval{i.Hi, i.Lo} +} + +// Center returns the midpoint of the interval. +// It is undefined for full and empty intervals. +func (i Interval) Center() float64 { + c := 0.5 * (i.Lo + i.Hi) + if !i.IsInverted() { + return c + } + if c <= 0 { + return c + math.Pi + } + return c - math.Pi +} + +// Length returns the length of the interval. +// The length of an empty interval is negative. +func (i Interval) Length() float64 { + l := i.Hi - i.Lo + if l >= 0 { + return l + } + l += 2 * math.Pi + if l > 0 { + return l + } + return -1 +} + +// Assumes p ∈ (-π,π]. +func (i Interval) fastContains(p float64) bool { + if i.IsInverted() { + return (p >= i.Lo || p <= i.Hi) && !i.IsEmpty() + } + return p >= i.Lo && p <= i.Hi +} + +// Contains returns true iff the interval contains p. +// Assumes p ∈ [-π,π]. +func (i Interval) Contains(p float64) bool { + if p == -math.Pi { + p = math.Pi + } + return i.fastContains(p) +} + +// ContainsInterval returns true iff the interval contains oi. +func (i Interval) ContainsInterval(oi Interval) bool { + if i.IsInverted() { + if oi.IsInverted() { + return oi.Lo >= i.Lo && oi.Hi <= i.Hi + } + return (oi.Lo >= i.Lo || oi.Hi <= i.Hi) && !i.IsEmpty() + } + if oi.IsInverted() { + return i.IsFull() || oi.IsEmpty() + } + return oi.Lo >= i.Lo && oi.Hi <= i.Hi +} + +// InteriorContains returns true iff the interior of the interval contains p. +// Assumes p ∈ [-π,π]. +func (i Interval) InteriorContains(p float64) bool { + if p == -math.Pi { + p = math.Pi + } + if i.IsInverted() { + return p > i.Lo || p < i.Hi + } + return (p > i.Lo && p < i.Hi) || i.IsFull() +} + +// InteriorContainsInterval returns true iff the interior of the interval contains oi. +func (i Interval) InteriorContainsInterval(oi Interval) bool { + if i.IsInverted() { + if oi.IsInverted() { + return (oi.Lo > i.Lo && oi.Hi < i.Hi) || oi.IsEmpty() + } + return oi.Lo > i.Lo || oi.Hi < i.Hi + } + if oi.IsInverted() { + return i.IsFull() || oi.IsEmpty() + } + return (oi.Lo > i.Lo && oi.Hi < i.Hi) || i.IsFull() +} + +// Intersects returns true iff the interval contains any points in common with oi. +func (i Interval) Intersects(oi Interval) bool { + if i.IsEmpty() || oi.IsEmpty() { + return false + } + if i.IsInverted() { + return oi.IsInverted() || oi.Lo <= i.Hi || oi.Hi >= i.Lo + } + if oi.IsInverted() { + return oi.Lo <= i.Hi || oi.Hi >= i.Lo + } + return oi.Lo <= i.Hi && oi.Hi >= i.Lo +} + +// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary. +func (i Interval) InteriorIntersects(oi Interval) bool { + if i.IsEmpty() || oi.IsEmpty() || i.Lo == i.Hi { + return false + } + if i.IsInverted() { + return oi.IsInverted() || oi.Lo < i.Hi || oi.Hi > i.Lo + } + if oi.IsInverted() { + return oi.Lo < i.Hi || oi.Hi > i.Lo + } + return (oi.Lo < i.Hi && oi.Hi > i.Lo) || i.IsFull() +} + +// Compute distance from a to b in [0,2π], in a numerically stable way. +func positiveDistance(a, b float64) float64 { + d := b - a + if d >= 0 { + return d + } + return (b + math.Pi) - (a - math.Pi) +} + +// Union returns the smallest interval that contains both the interval and oi. +func (i Interval) Union(oi Interval) Interval { + if oi.IsEmpty() { + return i + } + if i.fastContains(oi.Lo) { + if i.fastContains(oi.Hi) { + // Either oi ⊂ i, or i ∪ oi is the full interval. + if i.ContainsInterval(oi) { + return i + } + return FullInterval() + } + return Interval{i.Lo, oi.Hi} + } + if i.fastContains(oi.Hi) { + return Interval{oi.Lo, i.Hi} + } + + // Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint. + if i.IsEmpty() || oi.fastContains(i.Lo) { + return oi + } + + // This is the only hard case where we need to find the closest pair of endpoints. + if positiveDistance(oi.Hi, i.Lo) < positiveDistance(i.Hi, oi.Lo) { + return Interval{oi.Lo, i.Hi} + } + return Interval{i.Lo, oi.Hi} +} + +// Intersection returns the smallest interval that contains the intersection of the interval and oi. +func (i Interval) Intersection(oi Interval) Interval { + if oi.IsEmpty() { + return EmptyInterval() + } + if i.fastContains(oi.Lo) { + if i.fastContains(oi.Hi) { + // Either oi ⊂ i, or i and oi intersect twice. Neither are empty. + // In the first case we want to return i (which is shorter than oi). + // In the second case one of them is inverted, and the smallest interval + // that covers the two disjoint pieces is the shorter of i and oi. + // We thus want to pick the shorter of i and oi in both cases. + if oi.Length() < i.Length() { + return oi + } + return i + } + return Interval{oi.Lo, i.Hi} + } + if i.fastContains(oi.Hi) { + return Interval{i.Lo, oi.Hi} + } + + // Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint. + if oi.fastContains(i.Lo) { + return i + } + return EmptyInterval() +} + +// AddPoint returns the interval expanded by the minimum amount necessary such +// that it contains the given point "p" (an angle in the range [-π, π]). +func (i Interval) AddPoint(p float64) Interval { + if math.Abs(p) > math.Pi { + return i + } + if p == -math.Pi { + p = math.Pi + } + if i.fastContains(p) { + return i + } + if i.IsEmpty() { + return Interval{p, p} + } + if positiveDistance(p, i.Lo) < positiveDistance(i.Hi, p) { + return Interval{p, i.Hi} + } + return Interval{i.Lo, p} +} + +// Define the maximum rounding error for arithmetic operations. Depending on the +// platform the mantissa precision may be different than others, so we choose to +// use specific values to be consistent across all. +// The values come from the C++ implementation. +var ( + // epsilon is a small number that represents a reasonable level of noise between two + // values that can be considered to be equal. + epsilon = 1e-15 + // dblEpsilon is a smaller number for values that require more precision. + dblEpsilon = 2.220446049e-16 +) + +// Expanded returns an interval that has been expanded on each side by margin. +// If margin is negative, then the function shrinks the interval on +// each side by margin instead. The resulting interval may be empty or +// full. Any expansion (positive or negative) of a full interval remains +// full, and any expansion of an empty interval remains empty. +func (i Interval) Expanded(margin float64) Interval { + if margin >= 0 { + if i.IsEmpty() { + return i + } + // Check whether this interval will be full after expansion, allowing + // for a rounding error when computing each endpoint. + if i.Length()+2*margin+2*dblEpsilon >= 2*math.Pi { + return FullInterval() + } + } else { + if i.IsFull() { + return i + } + // Check whether this interval will be empty after expansion, allowing + // for a rounding error when computing each endpoint. + if i.Length()+2*margin-2*dblEpsilon <= 0 { + return EmptyInterval() + } + } + result := IntervalFromEndpoints( + math.Remainder(i.Lo-margin, 2*math.Pi), + math.Remainder(i.Hi+margin, 2*math.Pi), + ) + if result.Lo <= -math.Pi { + result.Lo = math.Pi + } + return result +} + +// ApproxEqual reports whether this interval can be transformed into the given +// interval by moving each endpoint by at most ε, without the +// endpoints crossing (which would invert the interval). Empty and full +// intervals are considered to start at an arbitrary point on the unit circle, +// so any interval with (length <= 2*ε) matches the empty interval, and +// any interval with (length >= 2*π - 2*ε) matches the full interval. +func (i Interval) ApproxEqual(other Interval) bool { + // Full and empty intervals require special cases because the endpoints + // are considered to be positioned arbitrarily. + if i.IsEmpty() { + return other.Length() <= 2*epsilon + } + if other.IsEmpty() { + return i.Length() <= 2*epsilon + } + if i.IsFull() { + return other.Length() >= 2*(math.Pi-epsilon) + } + if other.IsFull() { + return i.Length() >= 2*(math.Pi-epsilon) + } + + // The purpose of the last test below is to verify that moving the endpoints + // does not invert the interval, e.g. [-1e20, 1e20] vs. [1e20, -1e20]. + return (math.Abs(math.Remainder(other.Lo-i.Lo, 2*math.Pi)) <= epsilon && + math.Abs(math.Remainder(other.Hi-i.Hi, 2*math.Pi)) <= epsilon && + math.Abs(i.Length()-other.Length()) <= 2*epsilon) + +} + +func (i Interval) String() string { + // like "[%.7f, %.7f]" + return "[" + strconv.FormatFloat(i.Lo, 'f', 7, 64) + ", " + strconv.FormatFloat(i.Hi, 'f', 7, 64) + "]" +} + +// Complement returns the complement of the interior of the interval. An interval and +// its complement have the same boundary but do not share any interior +// values. The complement operator is not a bijection, since the complement +// of a singleton interval (containing a single value) is the same as the +// complement of an empty interval. +func (i Interval) Complement() Interval { + if i.Lo == i.Hi { + // Singleton. The interval just contains a single point. + return FullInterval() + } + // Handles empty and full. + return Interval{i.Hi, i.Lo} +} + +// ComplementCenter returns the midpoint of the complement of the interval. For full and empty +// intervals, the result is arbitrary. For a singleton interval (containing a +// single point), the result is its antipodal point on S1. +func (i Interval) ComplementCenter() float64 { + if i.Lo != i.Hi { + return i.Complement().Center() + } + // Singleton. The interval just contains a single point. + if i.Hi <= 0 { + return i.Hi + math.Pi + } + return i.Hi - math.Pi +} + +// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. +// For two intervals i and y, this distance is defined by +// h(i, y) = max_{p in i} min_{q in y} d(p, q), +// where d(.,.) is measured along S1. +func (i Interval) DirectedHausdorffDistance(y Interval) Angle { + if y.ContainsInterval(i) { + return 0 // This includes the case i is empty. + } + if y.IsEmpty() { + return Angle(math.Pi) // maximum possible distance on s1. + } + yComplementCenter := y.ComplementCenter() + if i.Contains(yComplementCenter) { + return Angle(positiveDistance(y.Hi, yComplementCenter)) + } + + // The Hausdorff distance is realized by either two i.Hi endpoints or two + // i.Lo endpoints, whichever is farther apart. + hiHi := 0.0 + if IntervalFromEndpoints(y.Hi, yComplementCenter).Contains(i.Hi) { + hiHi = positiveDistance(y.Hi, i.Hi) + } + + loLo := 0.0 + if IntervalFromEndpoints(yComplementCenter, y.Lo).Contains(i.Lo) { + loLo = positiveDistance(i.Lo, y.Lo) + } + + return Angle(math.Max(hiHi, loLo)) +} + +// Project returns the closest point in the interval to the given point p. +// The interval must be non-empty. +func (i Interval) Project(p float64) float64 { + if p == -math.Pi { + p = math.Pi + } + if i.fastContains(p) { + return p + } + // Compute distance from p to each endpoint. + dlo := positiveDistance(p, i.Lo) + dhi := positiveDistance(i.Hi, p) + if dlo < dhi { + return i.Lo + } + return i.Hi +} diff --git a/vendor/github.com/golang/geo/s2/bits_go18.go b/vendor/github.com/golang/geo/s2/bits_go18.go new file mode 100644 index 000000000..10a674da5 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/bits_go18.go @@ -0,0 +1,53 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package s2 + +// This file is for the bit manipulation code pre-Go 1.9. + +// findMSBSetNonZero64 returns the index (between 0 and 63) of the most +// significant set bit. Passing zero to this function returns zero. +func findMSBSetNonZero64(x uint64) int { + val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000} + shift := []uint64{1, 2, 4, 8, 16, 32} + var msbPos uint64 + for i := 5; i >= 0; i-- { + if x&val[i] != 0 { + x >>= shift[i] + msbPos |= shift[i] + } + } + return int(msbPos) +} + +const deBruijn64 = 0x03f79d71b4ca8b09 +const digitMask = uint64(1<<64 - 1) + +var deBruijn64Lookup = []byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +// findLSBSetNonZero64 returns the index (between 0 and 63) of the least +// significant set bit. Passing zero to this function returns zero. +// +// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go +// which references (Knuth, volume 4, section 7.3.1). +func findLSBSetNonZero64(x uint64) int { + return int(deBruijn64Lookup[((x&-x)*(deBruijn64&digitMask))>>58]) +} diff --git a/vendor/github.com/golang/geo/s2/bits_go19.go b/vendor/github.com/golang/geo/s2/bits_go19.go new file mode 100644 index 000000000..9532b377d --- /dev/null +++ b/vendor/github.com/golang/geo/s2/bits_go19.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package s2 + +// This file is for the bit manipulation code post-Go 1.9. + +import "math/bits" + +// findMSBSetNonZero64 returns the index (between 0 and 63) of the most +// significant set bit. Passing zero to this function return zero. +func findMSBSetNonZero64(x uint64) int { + if x == 0 { + return 0 + } + return 63 - bits.LeadingZeros64(x) +} + +// findLSBSetNonZero64 returns the index (between 0 and 63) of the least +// significant set bit. Passing zero to this function return zero. +func findLSBSetNonZero64(x uint64) int { + if x == 0 { + return 0 + } + return bits.TrailingZeros64(x) +} diff --git a/vendor/github.com/golang/geo/s2/cap.go b/vendor/github.com/golang/geo/s2/cap.go new file mode 100644 index 000000000..c4fb2e1e0 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/cap.go @@ -0,0 +1,519 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/s1" +) + +var ( + // centerPoint is the default center for Caps + centerPoint = PointFromCoords(1.0, 0, 0) +) + +// Cap represents a disc-shaped region defined by a center and radius. +// Technically this shape is called a "spherical cap" (rather than disc) +// because it is not planar; the cap represents a portion of the sphere that +// has been cut off by a plane. The boundary of the cap is the circle defined +// by the intersection of the sphere and the plane. For containment purposes, +// the cap is a closed set, i.e. it contains its boundary. +// +// For the most part, you can use a spherical cap wherever you would use a +// disc in planar geometry. The radius of the cap is measured along the +// surface of the sphere (rather than the straight-line distance through the +// interior). Thus a cap of radius π/2 is a hemisphere, and a cap of radius +// π covers the entire sphere. +// +// The center is a point on the surface of the unit sphere. (Hence the need for +// it to be of unit length.) +// +// A cap can also be defined by its center point and height. The height is the +// distance from the center point to the cutoff plane. There is also support for +// "empty" and "full" caps, which contain no points and all points respectively. +// +// Here are some useful relationships between the cap height (h), the cap +// radius (r), the maximum chord length from the cap's center (d), and the +// radius of cap's base (a). +// +// h = 1 - cos(r) +// = 2 * sin^2(r/2) +// d^2 = 2 * h +// = a^2 + h^2 +// +// The zero value of Cap is an invalid cap. Use EmptyCap to get a valid empty cap. +type Cap struct { + center Point + radius s1.ChordAngle +} + +// CapFromPoint constructs a cap containing a single point. +func CapFromPoint(p Point) Cap { + return CapFromCenterChordAngle(p, 0) +} + +// CapFromCenterAngle constructs a cap with the given center and angle. +func CapFromCenterAngle(center Point, angle s1.Angle) Cap { + return CapFromCenterChordAngle(center, s1.ChordAngleFromAngle(angle)) +} + +// CapFromCenterChordAngle constructs a cap where the angle is expressed as an +// s1.ChordAngle. This constructor is more efficient than using an s1.Angle. +func CapFromCenterChordAngle(center Point, radius s1.ChordAngle) Cap { + return Cap{ + center: center, + radius: radius, + } +} + +// CapFromCenterHeight constructs a cap with the given center and height. A +// negative height yields an empty cap; a height of 2 or more yields a full cap. +// The center should be unit length. +func CapFromCenterHeight(center Point, height float64) Cap { + return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(2*height)) +} + +// CapFromCenterArea constructs a cap with the given center and surface area. +// Note that the area can also be interpreted as the solid angle subtended by the +// cap (because the sphere has unit radius). A negative area yields an empty cap; +// an area of 4*π or more yields a full cap. +func CapFromCenterArea(center Point, area float64) Cap { + return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(area/math.Pi)) +} + +// EmptyCap returns a cap that contains no points. +func EmptyCap() Cap { + return CapFromCenterChordAngle(centerPoint, s1.NegativeChordAngle) +} + +// FullCap returns a cap that contains all points. +func FullCap() Cap { + return CapFromCenterChordAngle(centerPoint, s1.StraightChordAngle) +} + +// IsValid reports whether the Cap is considered valid. +func (c Cap) IsValid() bool { + return c.center.Vector.IsUnit() && c.radius <= s1.StraightChordAngle +} + +// IsEmpty reports whether the cap is empty, i.e. it contains no points. +func (c Cap) IsEmpty() bool { + return c.radius < 0 +} + +// IsFull reports whether the cap is full, i.e. it contains all points. +func (c Cap) IsFull() bool { + return c.radius == s1.StraightChordAngle +} + +// Center returns the cap's center point. +func (c Cap) Center() Point { + return c.center +} + +// Height returns the height of the cap. This is the distance from the center +// point to the cutoff plane. +func (c Cap) Height() float64 { + return float64(0.5 * c.radius) +} + +// Radius returns the cap radius as an s1.Angle. (Note that the cap angle +// is stored internally as a ChordAngle, so this method requires a trigonometric +// operation and may yield a slightly different result than the value passed +// to CapFromCenterAngle). +func (c Cap) Radius() s1.Angle { + return c.radius.Angle() +} + +// Area returns the surface area of the Cap on the unit sphere. +func (c Cap) Area() float64 { + return 2.0 * math.Pi * math.Max(0, c.Height()) +} + +// Contains reports whether this cap contains the other. +func (c Cap) Contains(other Cap) bool { + // In a set containment sense, every cap contains the empty cap. + if c.IsFull() || other.IsEmpty() { + return true + } + return c.radius >= ChordAngleBetweenPoints(c.center, other.center).Add(other.radius) +} + +// Intersects reports whether this cap intersects the other cap. +// i.e. whether they have any points in common. +func (c Cap) Intersects(other Cap) bool { + if c.IsEmpty() || other.IsEmpty() { + return false + } + + return c.radius.Add(other.radius) >= ChordAngleBetweenPoints(c.center, other.center) +} + +// InteriorIntersects reports whether this caps interior intersects the other cap. +func (c Cap) InteriorIntersects(other Cap) bool { + // Make sure this cap has an interior and the other cap is non-empty. + if c.radius <= 0 || other.IsEmpty() { + return false + } + + return c.radius.Add(other.radius) > ChordAngleBetweenPoints(c.center, other.center) +} + +// ContainsPoint reports whether this cap contains the point. +func (c Cap) ContainsPoint(p Point) bool { + return ChordAngleBetweenPoints(c.center, p) <= c.radius +} + +// InteriorContainsPoint reports whether the point is within the interior of this cap. +func (c Cap) InteriorContainsPoint(p Point) bool { + return c.IsFull() || ChordAngleBetweenPoints(c.center, p) < c.radius +} + +// Complement returns the complement of the interior of the cap. A cap and its +// complement have the same boundary but do not share any interior points. +// The complement operator is not a bijection because the complement of a +// singleton cap (containing a single point) is the same as the complement +// of an empty cap. +func (c Cap) Complement() Cap { + if c.IsFull() { + return EmptyCap() + } + if c.IsEmpty() { + return FullCap() + } + + return CapFromCenterChordAngle(Point{c.center.Mul(-1)}, s1.StraightChordAngle.Sub(c.radius)) +} + +// CapBound returns a bounding spherical cap. This is not guaranteed to be exact. +func (c Cap) CapBound() Cap { + return c +} + +// RectBound returns a bounding latitude-longitude rectangle. +// The bounds are not guaranteed to be tight. +func (c Cap) RectBound() Rect { + if c.IsEmpty() { + return EmptyRect() + } + + capAngle := c.Radius().Radians() + allLongitudes := false + lat := r1.Interval{ + Lo: latitude(c.center).Radians() - capAngle, + Hi: latitude(c.center).Radians() + capAngle, + } + lng := s1.FullInterval() + + // Check whether cap includes the south pole. + if lat.Lo <= -math.Pi/2 { + lat.Lo = -math.Pi / 2 + allLongitudes = true + } + + // Check whether cap includes the north pole. + if lat.Hi >= math.Pi/2 { + lat.Hi = math.Pi / 2 + allLongitudes = true + } + + if !allLongitudes { + // Compute the range of longitudes covered by the cap. We use the law + // of sines for spherical triangles. Consider the triangle ABC where + // A is the north pole, B is the center of the cap, and C is the point + // of tangency between the cap boundary and a line of longitude. Then + // C is a right angle, and letting a,b,c denote the sides opposite A,B,C, + // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c). + // Here "a" is the cap angle, and "c" is the colatitude (90 degrees + // minus the latitude). This formula also works for negative latitudes. + // + // The formula for sin(a) follows from the relationship h = 1 - cos(a). + sinA := c.radius.Sin() + sinC := math.Cos(latitude(c.center).Radians()) + if sinA <= sinC { + angleA := math.Asin(sinA / sinC) + lng.Lo = math.Remainder(longitude(c.center).Radians()-angleA, math.Pi*2) + lng.Hi = math.Remainder(longitude(c.center).Radians()+angleA, math.Pi*2) + } + } + return Rect{lat, lng} +} + +// Equal reports whether this cap is equal to the other cap. +func (c Cap) Equal(other Cap) bool { + return (c.radius == other.radius && c.center == other.center) || + (c.IsEmpty() && other.IsEmpty()) || + (c.IsFull() && other.IsFull()) +} + +// ApproxEqual reports whether this cap is equal to the other cap within the given tolerance. +func (c Cap) ApproxEqual(other Cap) bool { + const epsilon = 1e-14 + r2 := float64(c.radius) + otherR2 := float64(other.radius) + return c.center.ApproxEqual(other.center) && + math.Abs(r2-otherR2) <= epsilon || + c.IsEmpty() && otherR2 <= epsilon || + other.IsEmpty() && r2 <= epsilon || + c.IsFull() && otherR2 >= 2-epsilon || + other.IsFull() && r2 >= 2-epsilon +} + +// AddPoint increases the cap if necessary to include the given point. If this cap is empty, +// then the center is set to the point with a zero height. p must be unit-length. +func (c Cap) AddPoint(p Point) Cap { + if c.IsEmpty() { + c.center = p + c.radius = 0 + return c + } + + // After calling cap.AddPoint(p), cap.Contains(p) must be true. However + // we don't need to do anything special to achieve this because Contains() + // does exactly the same distance calculation that we do here. + if newRad := ChordAngleBetweenPoints(c.center, p); newRad > c.radius { + c.radius = newRad + } + return c +} + +// AddCap increases the cap height if necessary to include the other cap. If this cap is empty, +// it is set to the other cap. +func (c Cap) AddCap(other Cap) Cap { + if c.IsEmpty() { + return other + } + if other.IsEmpty() { + return c + } + + // We round up the distance to ensure that the cap is actually contained. + // TODO(roberts): Do some error analysis in order to guarantee this. + dist := ChordAngleBetweenPoints(c.center, other.center).Add(other.radius) + if newRad := dist.Expanded(dblEpsilon * float64(dist)); newRad > c.radius { + c.radius = newRad + } + return c +} + +// Expanded returns a new cap expanded by the given angle. If the cap is empty, +// it returns an empty cap. +func (c Cap) Expanded(distance s1.Angle) Cap { + if c.IsEmpty() { + return EmptyCap() + } + return CapFromCenterChordAngle(c.center, c.radius.Add(s1.ChordAngleFromAngle(distance))) +} + +func (c Cap) String() string { + return fmt.Sprintf("[Center=%v, Radius=%f]", c.center.Vector, c.Radius().Degrees()) +} + +// radiusToHeight converts an s1.Angle into the height of the cap. +func radiusToHeight(r s1.Angle) float64 { + if r.Radians() < 0 { + return float64(s1.NegativeChordAngle) + } + if r.Radians() >= math.Pi { + return float64(s1.RightChordAngle) + } + return float64(0.5 * s1.ChordAngleFromAngle(r)) + +} + +// ContainsCell reports whether the cap contains the given cell. +func (c Cap) ContainsCell(cell Cell) bool { + // If the cap does not contain all cell vertices, return false. + var vertices [4]Point + for k := 0; k < 4; k++ { + vertices[k] = cell.Vertex(k) + if !c.ContainsPoint(vertices[k]) { + return false + } + } + // Otherwise, return true if the complement of the cap does not intersect the cell. + return !c.Complement().intersects(cell, vertices) +} + +// IntersectsCell reports whether the cap intersects the cell. +func (c Cap) IntersectsCell(cell Cell) bool { + // If the cap contains any cell vertex, return true. + var vertices [4]Point + for k := 0; k < 4; k++ { + vertices[k] = cell.Vertex(k) + if c.ContainsPoint(vertices[k]) { + return true + } + } + return c.intersects(cell, vertices) +} + +// intersects reports whether the cap intersects any point of the cell excluding +// its vertices (which are assumed to already have been checked). +func (c Cap) intersects(cell Cell, vertices [4]Point) bool { + // If the cap is a hemisphere or larger, the cell and the complement of the cap + // are both convex. Therefore since no vertex of the cell is contained, no other + // interior point of the cell is contained either. + if c.radius >= s1.RightChordAngle { + return false + } + + // We need to check for empty caps due to the center check just below. + if c.IsEmpty() { + return false + } + + // Optimization: return true if the cell contains the cap center. This allows half + // of the edge checks below to be skipped. + if cell.ContainsPoint(c.center) { + return true + } + + // At this point we know that the cell does not contain the cap center, and the cap + // does not contain any cell vertex. The only way that they can intersect is if the + // cap intersects the interior of some edge. + sin2Angle := c.radius.Sin2() + for k := 0; k < 4; k++ { + edge := cell.Edge(k).Vector + dot := c.center.Vector.Dot(edge) + if dot > 0 { + // The center is in the interior half-space defined by the edge. We do not need + // to consider these edges, since if the cap intersects this edge then it also + // intersects the edge on the opposite side of the cell, because the center is + // not contained with the cell. + continue + } + + // The Norm2() factor is necessary because "edge" is not normalized. + if dot*dot > sin2Angle*edge.Norm2() { + return false + } + + // Otherwise, the great circle containing this edge intersects the interior of the cap. We just + // need to check whether the point of closest approach occurs between the two edge endpoints. + dir := edge.Cross(c.center.Vector) + if dir.Dot(vertices[k].Vector) < 0 && dir.Dot(vertices[(k+1)&3].Vector) > 0 { + return true + } + } + return false +} + +// CellUnionBound computes a covering of the Cap. In general the covering +// consists of at most 4 cells except for very large caps, which may need +// up to 6 cells. The output is not sorted. +func (c Cap) CellUnionBound() []CellID { + // TODO(roberts): The covering could be made quite a bit tighter by mapping + // the cap to a rectangle in (i,j)-space and finding a covering for that. + + // Find the maximum level such that the cap contains at most one cell vertex + // and such that CellID.AppendVertexNeighbors() can be called. + level := MinWidthMetric.MaxLevel(c.Radius().Radians()) - 1 + + // If level < 0, more than three face cells are required. + if level < 0 { + cellIDs := make([]CellID, 6) + for face := 0; face < 6; face++ { + cellIDs[face] = CellIDFromFace(face) + } + return cellIDs + } + // The covering consists of the 4 cells at the given level that share the + // cell vertex that is closest to the cap center. + return cellIDFromPoint(c.center).VertexNeighbors(level) +} + +// Centroid returns the true centroid of the cap multiplied by its surface area +// The result lies on the ray from the origin through the cap's center, but it +// is not unit length. Note that if you just want the "surface centroid", i.e. +// the normalized result, then it is simpler to call Center. +// +// The reason for multiplying the result by the cap area is to make it +// easier to compute the centroid of more complicated shapes. The centroid +// of a union of disjoint regions can be computed simply by adding their +// Centroid() results. Caveat: for caps that contain a single point +// (i.e., zero radius), this method always returns the origin (0, 0, 0). +// This is because shapes with no area don't affect the centroid of a +// union whose total area is positive. +func (c Cap) Centroid() Point { + // From symmetry, the centroid of the cap must be somewhere on the line + // from the origin to the center of the cap on the surface of the sphere. + // When a sphere is divided into slices of constant thickness by a set of + // parallel planes, all slices have the same surface area. This implies + // that the radial component of the centroid is simply the midpoint of the + // range of radial distances spanned by the cap. That is easily computed + // from the cap height. + if c.IsEmpty() { + return Point{} + } + r := 1 - 0.5*c.Height() + return Point{c.center.Mul(r * c.Area())} +} + +// Union returns the smallest cap which encloses this cap and other. +func (c Cap) Union(other Cap) Cap { + // If the other cap is larger, swap c and other for the rest of the computations. + if c.radius < other.radius { + c, other = other, c + } + + if c.IsFull() || other.IsEmpty() { + return c + } + + // TODO: This calculation would be more efficient using s1.ChordAngles. + cRadius := c.Radius() + otherRadius := other.Radius() + distance := c.center.Distance(other.center) + if cRadius >= distance+otherRadius { + return c + } + + resRadius := 0.5 * (distance + cRadius + otherRadius) + resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center) + return CapFromCenterAngle(resCenter, resRadius) +} + +// Encode encodes the Cap. +func (c Cap) Encode(w io.Writer) error { + e := &encoder{w: w} + c.encode(e) + return e.err +} + +func (c Cap) encode(e *encoder) { + e.writeFloat64(c.center.X) + e.writeFloat64(c.center.Y) + e.writeFloat64(c.center.Z) + e.writeFloat64(float64(c.radius)) +} + +// Decode decodes the Cap. +func (c *Cap) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + c.decode(d) + return d.err +} + +func (c *Cap) decode(d *decoder) { + c.center.X = d.readFloat64() + c.center.Y = d.readFloat64() + c.center.Z = d.readFloat64() + c.radius = s1.ChordAngle(d.readFloat64()) +} diff --git a/vendor/github.com/golang/geo/s2/cell.go b/vendor/github.com/golang/geo/s2/cell.go new file mode 100644 index 000000000..0a01a4f1f --- /dev/null +++ b/vendor/github.com/golang/geo/s2/cell.go @@ -0,0 +1,698 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "io" + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// Cell is an S2 region object that represents a cell. Unlike CellIDs, +// it supports efficient containment and intersection tests. However, it is +// also a more expensive representation. +type Cell struct { + face int8 + level int8 + orientation int8 + id CellID + uv r2.Rect +} + +// CellFromCellID constructs a Cell corresponding to the given CellID. +func CellFromCellID(id CellID) Cell { + c := Cell{} + c.id = id + f, i, j, o := c.id.faceIJOrientation() + c.face = int8(f) + c.level = int8(c.id.Level()) + c.orientation = int8(o) + c.uv = ijLevelToBoundUV(i, j, int(c.level)) + return c +} + +// CellFromPoint constructs a cell for the given Point. +func CellFromPoint(p Point) Cell { + return CellFromCellID(cellIDFromPoint(p)) +} + +// CellFromLatLng constructs a cell for the given LatLng. +func CellFromLatLng(ll LatLng) Cell { + return CellFromCellID(CellIDFromLatLng(ll)) +} + +// Face returns the face this cell is on. +func (c Cell) Face() int { + return int(c.face) +} + +// oppositeFace returns the face opposite the given face. +func oppositeFace(face int) int { + return (face + 3) % 6 +} + +// Level returns the level of this cell. +func (c Cell) Level() int { + return int(c.level) +} + +// ID returns the CellID this cell represents. +func (c Cell) ID() CellID { + return c.id +} + +// IsLeaf returns whether this Cell is a leaf or not. +func (c Cell) IsLeaf() bool { + return c.level == maxLevel +} + +// SizeIJ returns the edge length of this cell in (i,j)-space. +func (c Cell) SizeIJ() int { + return sizeIJ(int(c.level)) +} + +// SizeST returns the edge length of this cell in (s,t)-space. +func (c Cell) SizeST() float64 { + return c.id.sizeST(int(c.level)) +} + +// Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order +// (lower left, lower right, upper right, upper left in the UV plane). +func (c Cell) Vertex(k int) Point { + return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y).Normalize()} +} + +// Edge returns the inward-facing normal of the great circle passing through +// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3). +func (c Cell) Edge(k int) Point { + switch k { + case 0: + return Point{vNorm(int(c.face), c.uv.Y.Lo).Normalize()} // Bottom + case 1: + return Point{uNorm(int(c.face), c.uv.X.Hi).Normalize()} // Right + case 2: + return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0).Normalize()} // Top + default: + return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0).Normalize()} // Left + } +} + +// BoundUV returns the bounds of this cell in (u,v)-space. +func (c Cell) BoundUV() r2.Rect { + return c.uv +} + +// Center returns the direction vector corresponding to the center in +// (s,t)-space of the given cell. This is the point at which the cell is +// divided into four subcells; it is not necessarily the centroid of the +// cell in (u,v)-space or (x,y,z)-space +func (c Cell) Center() Point { + return Point{c.id.rawPoint().Normalize()} +} + +// Children returns the four direct children of this cell in traversal order +// and returns true. If this is a leaf cell, or the children could not be created, +// false is returned. +// The C++ method is called Subdivide. +func (c Cell) Children() ([4]Cell, bool) { + var children [4]Cell + + if c.id.IsLeaf() { + return children, false + } + + // Compute the cell midpoint in uv-space. + uvMid := c.id.centerUV() + + // Create four children with the appropriate bounds. + cid := c.id.ChildBegin() + for pos := 0; pos < 4; pos++ { + children[pos] = Cell{ + face: c.face, + level: c.level + 1, + orientation: c.orientation ^ int8(posToOrientation[pos]), + id: cid, + } + + // We want to split the cell in half in u and v. To decide which + // side to set equal to the midpoint value, we look at cell's (i,j) + // position within its parent. The index for i is in bit 1 of ij. + ij := posToIJ[c.orientation][pos] + i := ij >> 1 + j := ij & 1 + if i == 1 { + children[pos].uv.X.Hi = c.uv.X.Hi + children[pos].uv.X.Lo = uvMid.X + } else { + children[pos].uv.X.Lo = c.uv.X.Lo + children[pos].uv.X.Hi = uvMid.X + } + if j == 1 { + children[pos].uv.Y.Hi = c.uv.Y.Hi + children[pos].uv.Y.Lo = uvMid.Y + } else { + children[pos].uv.Y.Lo = c.uv.Y.Lo + children[pos].uv.Y.Hi = uvMid.Y + } + cid = cid.Next() + } + return children, true +} + +// ExactArea returns the area of this cell as accurately as possible. +func (c Cell) ExactArea() float64 { + v0, v1, v2, v3 := c.Vertex(0), c.Vertex(1), c.Vertex(2), c.Vertex(3) + return PointArea(v0, v1, v2) + PointArea(v0, v2, v3) +} + +// ApproxArea returns the approximate area of this cell. This method is accurate +// to within 3% percent for all cell sizes and accurate to within 0.1% for cells +// at level 5 or higher (i.e. squares 350km to a side or smaller on the Earth's +// surface). It is moderately cheap to compute. +func (c Cell) ApproxArea() float64 { + // All cells at the first two levels have the same area. + if c.level < 2 { + return c.AverageArea() + } + + // First, compute the approximate area of the cell when projected + // perpendicular to its normal. The cross product of its diagonals gives + // the normal, and the length of the normal is twice the projected area. + flatArea := 0.5 * (c.Vertex(2).Sub(c.Vertex(0).Vector). + Cross(c.Vertex(3).Sub(c.Vertex(1).Vector)).Norm()) + + // Now, compensate for the curvature of the cell surface by pretending + // that the cell is shaped like a spherical cap. The ratio of the + // area of a spherical cap to the area of its projected disc turns out + // to be 2 / (1 + sqrt(1 - r*r)) where r is the radius of the disc. + // For example, when r=0 the ratio is 1, and when r=1 the ratio is 2. + // Here we set Pi*r*r == flatArea to find the equivalent disc. + return flatArea * 2 / (1 + math.Sqrt(1-math.Min(1/math.Pi*flatArea, 1))) +} + +// AverageArea returns the average area of cells at the level of this cell. +// This is accurate to within a factor of 1.7. +func (c Cell) AverageArea() float64 { + return AvgAreaMetric.Value(int(c.level)) +} + +// IntersectsCell reports whether the intersection of this cell and the other cell is not nil. +func (c Cell) IntersectsCell(oc Cell) bool { + return c.id.Intersects(oc.id) +} + +// ContainsCell reports whether this cell contains the other cell. +func (c Cell) ContainsCell(oc Cell) bool { + return c.id.Contains(oc.id) +} + +// CellUnionBound computes a covering of the Cell. +func (c Cell) CellUnionBound() []CellID { + return c.CapBound().CellUnionBound() +} + +// latitude returns the latitude of the cell vertex in radians given by (i,j), +// where i and j indicate the Hi (1) or Lo (0) corner. +func (c Cell) latitude(i, j int) float64 { + var u, v float64 + switch { + case i == 0 && j == 0: + u = c.uv.X.Lo + v = c.uv.Y.Lo + case i == 0 && j == 1: + u = c.uv.X.Lo + v = c.uv.Y.Hi + case i == 1 && j == 0: + u = c.uv.X.Hi + v = c.uv.Y.Lo + case i == 1 && j == 1: + u = c.uv.X.Hi + v = c.uv.Y.Hi + default: + panic("i and/or j is out of bounds") + } + return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians() +} + +// longitude returns the longitude of the cell vertex in radians given by (i,j), +// where i and j indicate the Hi (1) or Lo (0) corner. +func (c Cell) longitude(i, j int) float64 { + var u, v float64 + switch { + case i == 0 && j == 0: + u = c.uv.X.Lo + v = c.uv.Y.Lo + case i == 0 && j == 1: + u = c.uv.X.Lo + v = c.uv.Y.Hi + case i == 1 && j == 0: + u = c.uv.X.Hi + v = c.uv.Y.Lo + case i == 1 && j == 1: + u = c.uv.X.Hi + v = c.uv.Y.Hi + default: + panic("i and/or j is out of bounds") + } + return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians() +} + +var ( + poleMinLat = math.Asin(math.Sqrt(1.0/3)) - 0.5*dblEpsilon +) + +// RectBound returns the bounding rectangle of this cell. +func (c Cell) RectBound() Rect { + if c.level > 0 { + // Except for cells at level 0, the latitude and longitude extremes are + // attained at the vertices. Furthermore, the latitude range is + // determined by one pair of diagonally opposite vertices and the + // longitude range is determined by the other pair. + // + // We first determine which corner (i,j) of the cell has the largest + // absolute latitude. To maximize latitude, we want to find the point in + // the cell that has the largest absolute z-coordinate and the smallest + // absolute x- and y-coordinates. To do this we look at each coordinate + // (u and v), and determine whether we want to minimize or maximize that + // coordinate based on the axis direction and the cell's (u,v) quadrant. + u := c.uv.X.Lo + c.uv.X.Hi + v := c.uv.Y.Lo + c.uv.Y.Hi + var i, j int + if uAxis(int(c.face)).Z == 0 { + if u < 0 { + i = 1 + } + } else if u > 0 { + i = 1 + } + if vAxis(int(c.face)).Z == 0 { + if v < 0 { + j = 1 + } + } else if v > 0 { + j = 1 + } + lat := r1.IntervalFromPoint(c.latitude(i, j)).AddPoint(c.latitude(1-i, 1-j)) + lng := s1.EmptyInterval().AddPoint(c.longitude(i, 1-j)).AddPoint(c.longitude(1-i, j)) + + // We grow the bounds slightly to make sure that the bounding rectangle + // contains LatLngFromPoint(P) for any point P inside the loop L defined by the + // four *normalized* vertices. Note that normalization of a vector can + // change its direction by up to 0.5 * dblEpsilon radians, and it is not + // enough just to add Normalize calls to the code above because the + // latitude/longitude ranges are not necessarily determined by diagonally + // opposite vertex pairs after normalization. + // + // We would like to bound the amount by which the latitude/longitude of a + // contained point P can exceed the bounds computed above. In the case of + // longitude, the normalization error can change the direction of rounding + // leading to a maximum difference in longitude of 2 * dblEpsilon. In + // the case of latitude, the normalization error can shift the latitude by + // up to 0.5 * dblEpsilon and the other sources of error can cause the + // two latitudes to differ by up to another 1.5 * dblEpsilon, which also + // leads to a maximum difference of 2 * dblEpsilon. + return Rect{lat, lng}.expanded(LatLng{s1.Angle(2 * dblEpsilon), s1.Angle(2 * dblEpsilon)}).PolarClosure() + } + + // The 4 cells around the equator extend to +/-45 degrees latitude at the + // midpoints of their top and bottom edges. The two cells covering the + // poles extend down to +/-35.26 degrees at their vertices. The maximum + // error in this calculation is 0.5 * dblEpsilon. + var bound Rect + switch c.face { + case 0: + bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-math.Pi / 4, math.Pi / 4}} + case 1: + bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{math.Pi / 4, 3 * math.Pi / 4}} + case 2: + bound = Rect{r1.Interval{poleMinLat, math.Pi / 2}, s1.FullInterval()} + case 3: + bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{3 * math.Pi / 4, -3 * math.Pi / 4}} + case 4: + bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-3 * math.Pi / 4, -math.Pi / 4}} + default: + bound = Rect{r1.Interval{-math.Pi / 2, -poleMinLat}, s1.FullInterval()} + } + + // Finally, we expand the bound to account for the error when a point P is + // converted to an LatLng to test for containment. (The bound should be + // large enough so that it contains the computed LatLng of any contained + // point, not just the infinite-precision version.) We don't need to expand + // longitude because longitude is calculated via a single call to math.Atan2, + // which is guaranteed to be semi-monotonic. + return bound.expanded(LatLng{s1.Angle(dblEpsilon), s1.Angle(0)}) +} + +// CapBound returns the bounding cap of this cell. +func (c Cell) CapBound() Cap { + // We use the cell center in (u,v)-space as the cap axis. This vector is very close + // to GetCenter() and faster to compute. Neither one of these vectors yields the + // bounding cap with minimal surface area, but they are both pretty close. + cap := CapFromPoint(Point{faceUVToXYZ(int(c.face), c.uv.Center().X, c.uv.Center().Y).Normalize()}) + for k := 0; k < 4; k++ { + cap = cap.AddPoint(c.Vertex(k)) + } + return cap +} + +// ContainsPoint reports whether this cell contains the given point. Note that +// unlike Loop/Polygon, a Cell is considered to be a closed set. This means +// that a point on a Cell's edge or vertex belong to the Cell and the relevant +// adjacent Cells too. +// +// If you want every point to be contained by exactly one Cell, +// you will need to convert the Cell to a Loop. +func (c Cell) ContainsPoint(p Point) bool { + var uv r2.Point + var ok bool + if uv.X, uv.Y, ok = faceXYZToUV(int(c.face), p); !ok { + return false + } + + // Expand the (u,v) bound to ensure that + // + // CellFromPoint(p).ContainsPoint(p) + // + // is always true. To do this, we need to account for the error when + // converting from (u,v) coordinates to (s,t) coordinates. In the + // normal case the total error is at most dblEpsilon. + return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv) +} + +// Encode encodes the Cell. +func (c Cell) Encode(w io.Writer) error { + e := &encoder{w: w} + c.encode(e) + return e.err +} + +func (c Cell) encode(e *encoder) { + c.id.encode(e) +} + +// Decode decodes the Cell. +func (c *Cell) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + c.decode(d) + return d.err +} + +func (c *Cell) decode(d *decoder) { + c.id.decode(d) + *c = CellFromCellID(c.id) +} + +// vertexChordDist2 returns the squared chord distance from point P to the +// given corner vertex specified by the Hi or Lo values of each. +func (c Cell) vertexChordDist2(p Point, xHi, yHi bool) s1.ChordAngle { + x := c.uv.X.Lo + y := c.uv.Y.Lo + if xHi { + x = c.uv.X.Hi + } + if yHi { + y = c.uv.Y.Hi + } + + return ChordAngleBetweenPoints(p, PointFromCoords(x, y, 1)) +} + +// uEdgeIsClosest reports whether a point P is closer to the interior of the specified +// Cell edge (either the lower or upper edge of the Cell) or to the endpoints. +func (c Cell) uEdgeIsClosest(p Point, vHi bool) bool { + u0 := c.uv.X.Lo + u1 := c.uv.X.Hi + v := c.uv.Y.Lo + if vHi { + v = c.uv.Y.Hi + } + // These are the normals to the planes that are perpendicular to the edge + // and pass through one of its two endpoints. + dir0 := r3.Vector{v*v + 1, -u0 * v, -u0} + dir1 := r3.Vector{v*v + 1, -u1 * v, -u1} + return p.Dot(dir0) > 0 && p.Dot(dir1) < 0 +} + +// vEdgeIsClosest reports whether a point P is closer to the interior of the specified +// Cell edge (either the right or left edge of the Cell) or to the endpoints. +func (c Cell) vEdgeIsClosest(p Point, uHi bool) bool { + v0 := c.uv.Y.Lo + v1 := c.uv.Y.Hi + u := c.uv.X.Lo + if uHi { + u = c.uv.X.Hi + } + dir0 := r3.Vector{-u * v0, u*u + 1, -v0} + dir1 := r3.Vector{-u * v1, u*u + 1, -v1} + return p.Dot(dir0) > 0 && p.Dot(dir1) < 0 +} + +// edgeDistance reports the distance from a Point P to a given Cell edge. The point +// P is given by its dot product, and the uv edge by its normal in the +// given coordinate value. +func edgeDistance(ij, uv float64) s1.ChordAngle { + // Let P by the target point and let R be the closest point on the given + // edge AB. The desired distance PR can be expressed as PR^2 = PQ^2 + QR^2 + // where Q is the point P projected onto the plane through the great circle + // through AB. We can compute the distance PQ^2 perpendicular to the plane + // from "dirIJ" (the dot product of the target point P with the edge + // normal) and the squared length the edge normal (1 + uv**2). + pq2 := (ij * ij) / (1 + uv*uv) + + // We can compute the distance QR as (1 - OQ) where O is the sphere origin, + // and we can compute OQ^2 = 1 - PQ^2 using the Pythagorean theorem. + // (This calculation loses accuracy as angle POQ approaches Pi/2.) + qr := 1 - math.Sqrt(1-pq2) + return s1.ChordAngleFromSquaredLength(pq2 + qr*qr) +} + +// distanceInternal reports the distance from the given point to the interior of +// the cell if toInterior is true or to the boundary of the cell otherwise. +func (c Cell) distanceInternal(targetXYZ Point, toInterior bool) s1.ChordAngle { + // All calculations are done in the (u,v,w) coordinates of this cell's face. + target := faceXYZtoUVW(int(c.face), targetXYZ) + + // Compute dot products with all four upward or rightward-facing edge + // normals. dirIJ is the dot product for the edge corresponding to axis + // I, endpoint J. For example, dir01 is the right edge of the Cell + // (corresponding to the upper endpoint of the u-axis). + dir00 := target.X - target.Z*c.uv.X.Lo + dir01 := target.X - target.Z*c.uv.X.Hi + dir10 := target.Y - target.Z*c.uv.Y.Lo + dir11 := target.Y - target.Z*c.uv.Y.Hi + inside := true + if dir00 < 0 { + inside = false // Target is to the left of the cell + if c.vEdgeIsClosest(target, false) { + return edgeDistance(-dir00, c.uv.X.Lo) + } + } + if dir01 > 0 { + inside = false // Target is to the right of the cell + if c.vEdgeIsClosest(target, true) { + return edgeDistance(dir01, c.uv.X.Hi) + } + } + if dir10 < 0 { + inside = false // Target is below the cell + if c.uEdgeIsClosest(target, false) { + return edgeDistance(-dir10, c.uv.Y.Lo) + } + } + if dir11 > 0 { + inside = false // Target is above the cell + if c.uEdgeIsClosest(target, true) { + return edgeDistance(dir11, c.uv.Y.Hi) + } + } + if inside { + if toInterior { + return s1.ChordAngle(0) + } + // Although you might think of Cells as rectangles, they are actually + // arbitrary quadrilaterals after they are projected onto the sphere. + // Therefore the simplest approach is just to find the minimum distance to + // any of the four edges. + return minChordAngle(edgeDistance(-dir00, c.uv.X.Lo), + edgeDistance(dir01, c.uv.X.Hi), + edgeDistance(-dir10, c.uv.Y.Lo), + edgeDistance(dir11, c.uv.Y.Hi)) + } + + // Otherwise, the closest point is one of the four cell vertices. Note that + // it is *not* trivial to narrow down the candidates based on the edge sign + // tests above, because (1) the edges don't meet at right angles and (2) + // there are points on the far side of the sphere that are both above *and* + // below the cell, etc. + return minChordAngle(c.vertexChordDist2(target, false, false), + c.vertexChordDist2(target, true, false), + c.vertexChordDist2(target, false, true), + c.vertexChordDist2(target, true, true)) +} + +// Distance reports the distance from the cell to the given point. Returns zero if +// the point is inside the cell. +func (c Cell) Distance(target Point) s1.ChordAngle { + return c.distanceInternal(target, true) +} + +// MaxDistance reports the maximum distance from the cell (including its interior) to the +// given point. +func (c Cell) MaxDistance(target Point) s1.ChordAngle { + // First check the 4 cell vertices. If all are within the hemisphere + // centered around target, the max distance will be to one of these vertices. + targetUVW := faceXYZtoUVW(int(c.face), target) + maxDist := maxChordAngle(c.vertexChordDist2(targetUVW, false, false), + c.vertexChordDist2(targetUVW, true, false), + c.vertexChordDist2(targetUVW, false, true), + c.vertexChordDist2(targetUVW, true, true)) + + if maxDist <= s1.RightChordAngle { + return maxDist + } + + // Otherwise, find the minimum distance dMin to the antipodal point and the + // maximum distance will be pi - dMin. + return s1.StraightChordAngle - c.BoundaryDistance(Point{target.Mul(-1)}) +} + +// BoundaryDistance reports the distance from the cell boundary to the given point. +func (c Cell) BoundaryDistance(target Point) s1.ChordAngle { + return c.distanceInternal(target, false) +} + +// DistanceToEdge returns the minimum distance from the cell to the given edge AB. Returns +// zero if the edge intersects the cell interior. +func (c Cell) DistanceToEdge(a, b Point) s1.ChordAngle { + // Possible optimizations: + // - Currently the (cell vertex, edge endpoint) distances are computed + // twice each, and the length of AB is computed 4 times. + // - To fix this, refactor GetDistance(target) so that it skips calculating + // the distance to each cell vertex. Instead, compute the cell vertices + // and distances in this function, and add a low-level UpdateMinDistance + // that allows the XA, XB, and AB distances to be passed in. + // - It might also be more efficient to do all calculations in UVW-space, + // since this would involve transforming 2 points rather than 4. + + // First, check the minimum distance to the edge endpoints A and B. + // (This also detects whether either endpoint is inside the cell.) + minDist := minChordAngle(c.Distance(a), c.Distance(b)) + if minDist == 0 { + return minDist + } + + // Otherwise, check whether the edge crosses the cell boundary. + crosser := NewChainEdgeCrosser(a, b, c.Vertex(3)) + for i := 0; i < 4; i++ { + if crosser.ChainCrossingSign(c.Vertex(i)) != DoNotCross { + return 0 + } + } + + // Finally, check whether the minimum distance occurs between a cell vertex + // and the interior of the edge AB. (Some of this work is redundant, since + // it also checks the distance to the endpoints A and B again.) + // + // Note that we don't need to check the distance from the interior of AB to + // the interior of a cell edge, because the only way that this distance can + // be minimal is if the two edges cross (already checked above). + for i := 0; i < 4; i++ { + minDist, _ = UpdateMinDistance(c.Vertex(i), a, b, minDist) + } + return minDist +} + +// MaxDistanceToEdge returns the maximum distance from the cell (including its interior) +// to the given edge AB. +func (c Cell) MaxDistanceToEdge(a, b Point) s1.ChordAngle { + // If the maximum distance from both endpoints to the cell is less than π/2 + // then the maximum distance from the edge to the cell is the maximum of the + // two endpoint distances. + maxDist := maxChordAngle(c.MaxDistance(a), c.MaxDistance(b)) + if maxDist <= s1.RightChordAngle { + return maxDist + } + + return s1.StraightChordAngle - c.DistanceToEdge(Point{a.Mul(-1)}, Point{b.Mul(-1)}) +} + +// DistanceToCell returns the minimum distance from this cell to the given cell. +// It returns zero if one cell contains the other. +func (c Cell) DistanceToCell(target Cell) s1.ChordAngle { + // If the cells intersect, the distance is zero. We use the (u,v) ranges + // rather than CellID intersects so that cells that share a partial edge or + // corner are considered to intersect. + if c.face == target.face && c.uv.Intersects(target.uv) { + return 0 + } + + // Otherwise, the minimum distance always occurs between a vertex of one + // cell and an edge of the other cell (including the edge endpoints). This + // represents a total of 32 possible (vertex, edge) pairs. + // + // TODO(roberts): This could be optimized to be at least 5x faster by pruning + // the set of possible closest vertex/edge pairs using the faces and (u,v) + // ranges of both cells. + var va, vb [4]Point + for i := 0; i < 4; i++ { + va[i] = c.Vertex(i) + vb[i] = target.Vertex(i) + } + minDist := s1.InfChordAngle() + for i := 0; i < 4; i++ { + for j := 0; j < 4; j++ { + minDist, _ = UpdateMinDistance(va[i], vb[j], vb[(j+1)&3], minDist) + minDist, _ = UpdateMinDistance(vb[i], va[j], va[(j+1)&3], minDist) + } + } + return minDist +} + +// MaxDistanceToCell returns the maximum distance from the cell (including its +// interior) to the given target cell. +func (c Cell) MaxDistanceToCell(target Cell) s1.ChordAngle { + // Need to check the antipodal target for intersection with the cell. If it + // intersects, the distance is the straight ChordAngle. + // antipodalUV is the transpose of the original UV, interpreted within the opposite face. + antipodalUV := r2.Rect{target.uv.Y, target.uv.X} + if int(c.face) == oppositeFace(int(target.face)) && c.uv.Intersects(antipodalUV) { + return s1.StraightChordAngle + } + + // Otherwise, the maximum distance always occurs between a vertex of one + // cell and an edge of the other cell (including the edge endpoints). This + // represents a total of 32 possible (vertex, edge) pairs. + // + // TODO(roberts): When the maximum distance is at most π/2, the maximum is + // always attained between a pair of vertices, and this could be made much + // faster by testing each vertex pair once rather than the current 4 times. + var va, vb [4]Point + for i := 0; i < 4; i++ { + va[i] = c.Vertex(i) + vb[i] = target.Vertex(i) + } + maxDist := s1.NegativeChordAngle + for i := 0; i < 4; i++ { + for j := 0; j < 4; j++ { + maxDist, _ = UpdateMaxDistance(va[i], vb[j], vb[(j+1)&3], maxDist) + maxDist, _ = UpdateMaxDistance(vb[i], va[j], va[(j+1)&3], maxDist) + } + } + return maxDist +} diff --git a/vendor/github.com/golang/geo/s2/cellid.go b/vendor/github.com/golang/geo/s2/cellid.go new file mode 100644 index 000000000..37d488685 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/cellid.go @@ -0,0 +1,942 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "bytes" + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// CellID uniquely identifies a cell in the S2 cell decomposition. +// The most significant 3 bits encode the face number (0-5). The +// remaining 61 bits encode the position of the center of this cell +// along the Hilbert curve on that face. The zero value and the value +// (1<<64)-1 are invalid cell IDs. The first compares less than any +// valid cell ID, the second as greater than any valid cell ID. +// +// Sequentially increasing cell IDs follow a continuous space-filling curve +// over the entire sphere. They have the following properties: +// +// - The ID of a cell at level k consists of a 3-bit face number followed +// by k bit pairs that recursively select one of the four children of +// each cell. The next bit is always 1, and all other bits are 0. +// Therefore, the level of a cell is determined by the position of its +// lowest-numbered bit that is turned on (for a cell at level k, this +// position is 2 * (maxLevel - k)). +// +// - The ID of a parent cell is at the midpoint of the range of IDs spanned +// by its children (or by its descendants at any level). +// +// Leaf cells are often used to represent points on the unit sphere, and +// this type provides methods for converting directly between these two +// representations. For cells that represent 2D regions rather than +// discrete point, it is better to use Cells. +type CellID uint64 + +// SentinelCellID is an invalid cell ID guaranteed to be larger than any +// valid cell ID. It is used primarily by ShapeIndex. The value is also used +// by some S2 types when encoding data. +// Note that the sentinel's RangeMin == RangeMax == itself. +const SentinelCellID = CellID(^uint64(0)) + +// sortCellIDs sorts the slice of CellIDs in place. +func sortCellIDs(ci []CellID) { + sort.Sort(cellIDs(ci)) +} + +// cellIDs implements the Sort interface for slices of CellIDs. +type cellIDs []CellID + +func (c cellIDs) Len() int { return len(c) } +func (c cellIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c cellIDs) Less(i, j int) bool { return c[i] < c[j] } + +// TODO(dsymonds): Some of these constants should probably be exported. +const ( + faceBits = 3 + numFaces = 6 + + // This is the number of levels needed to specify a leaf cell. + maxLevel = 30 + + // The extra position bit (61 rather than 60) lets us encode each cell as its + // Hilbert curve position at the cell center (which is halfway along the + // portion of the Hilbert curve that fills that cell). + posBits = 2*maxLevel + 1 + + // The maximum index of a valid leaf cell plus one. The range of valid leaf + // cell indices is [0..maxSize-1]. + maxSize = 1 << maxLevel + + wrapOffset = uint64(numFaces) << posBits +) + +// CellIDFromFacePosLevel returns a cell given its face in the range +// [0,5], the 61-bit Hilbert curve position pos within that face, and +// the level in the range [0,maxLevel]. The position in the cell ID +// will be truncated to correspond to the Hilbert curve position at +// the center of the returned cell. +func CellIDFromFacePosLevel(face int, pos uint64, level int) CellID { + return CellID(uint64(face)< 16 { + return CellID(0) + } + n, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return CellID(0) + } + // Equivalent to right-padding string with zeros to 16 characters. + if len(s) < 16 { + n = n << (4 * uint(16-len(s))) + } + return CellID(n) +} + +// ToToken returns a hex-encoded string of the uint64 cell id, with leading +// zeros included but trailing zeros stripped. +func (ci CellID) ToToken() string { + s := strings.TrimRight(fmt.Sprintf("%016x", uint64(ci)), "0") + if len(s) == 0 { + return "X" + } + return s +} + +// IsValid reports whether ci represents a valid cell. +func (ci CellID) IsValid() bool { + return ci.Face() < numFaces && (ci.lsb()&0x1555555555555555 != 0) +} + +// Face returns the cube face for this cell ID, in the range [0,5]. +func (ci CellID) Face() int { return int(uint64(ci) >> posBits) } + +// Pos returns the position along the Hilbert curve of this cell ID, in the range [0,2^posBits-1]. +func (ci CellID) Pos() uint64 { return uint64(ci) & (^uint64(0) >> faceBits) } + +// Level returns the subdivision level of this cell ID, in the range [0, maxLevel]. +func (ci CellID) Level() int { + return maxLevel - findLSBSetNonZero64(uint64(ci))>>1 +} + +// IsLeaf returns whether this cell ID is at the deepest level; +// that is, the level at which the cells are smallest. +func (ci CellID) IsLeaf() bool { return uint64(ci)&1 != 0 } + +// ChildPosition returns the child position (0..3) of this cell's +// ancestor at the given level, relative to its parent. The argument +// should be in the range 1..kMaxLevel. For example, +// ChildPosition(1) returns the position of this cell's level-1 +// ancestor within its top-level face cell. +func (ci CellID) ChildPosition(level int) int { + return int(uint64(ci)>>uint64(2*(maxLevel-level)+1)) & 3 +} + +// lsbForLevel returns the lowest-numbered bit that is on for cells at the given level. +func lsbForLevel(level int) uint64 { return 1 << uint64(2*(maxLevel-level)) } + +// Parent returns the cell at the given level, which must be no greater than the current level. +func (ci CellID) Parent(level int) CellID { + lsb := lsbForLevel(level) + return CellID((uint64(ci) & -lsb) | lsb) +} + +// immediateParent is cheaper than Parent, but assumes !ci.isFace(). +func (ci CellID) immediateParent() CellID { + nlsb := CellID(ci.lsb() << 2) + return (ci & -nlsb) | nlsb +} + +// isFace returns whether this is a top-level (face) cell. +func (ci CellID) isFace() bool { return uint64(ci)&(lsbForLevel(0)-1) == 0 } + +// lsb returns the least significant bit that is set. +func (ci CellID) lsb() uint64 { return uint64(ci) & -uint64(ci) } + +// Children returns the four immediate children of this cell. +// If ci is a leaf cell, it returns four identical cells that are not the children. +func (ci CellID) Children() [4]CellID { + var ch [4]CellID + lsb := CellID(ci.lsb()) + ch[0] = ci - lsb + lsb>>2 + lsb >>= 1 + ch[1] = ch[0] + lsb + ch[2] = ch[1] + lsb + ch[3] = ch[2] + lsb + return ch +} + +func sizeIJ(level int) int { + return 1 << uint(maxLevel-level) +} + +// EdgeNeighbors returns the four cells that are adjacent across the cell's four edges. +// Edges 0, 1, 2, 3 are in the down, right, up, left directions in the face space. +// All neighbors are guaranteed to be distinct. +func (ci CellID) EdgeNeighbors() [4]CellID { + level := ci.Level() + size := sizeIJ(level) + f, i, j, _ := ci.faceIJOrientation() + return [4]CellID{ + cellIDFromFaceIJWrap(f, i, j-size).Parent(level), + cellIDFromFaceIJWrap(f, i+size, j).Parent(level), + cellIDFromFaceIJWrap(f, i, j+size).Parent(level), + cellIDFromFaceIJWrap(f, i-size, j).Parent(level), + } +} + +// VertexNeighbors returns the neighboring cellIDs with vertex closest to this cell at the given level. +// (Normally there are four neighbors, but the closest vertex may only have three neighbors if it is one of +// the 8 cube vertices.) +func (ci CellID) VertexNeighbors(level int) []CellID { + halfSize := sizeIJ(level + 1) + size := halfSize << 1 + f, i, j, _ := ci.faceIJOrientation() + + var isame, jsame bool + var ioffset, joffset int + if i&halfSize != 0 { + ioffset = size + isame = (i + size) < maxSize + } else { + ioffset = -size + isame = (i - size) >= 0 + } + if j&halfSize != 0 { + joffset = size + jsame = (j + size) < maxSize + } else { + joffset = -size + jsame = (j - size) >= 0 + } + + results := []CellID{ + ci.Parent(level), + cellIDFromFaceIJSame(f, i+ioffset, j, isame).Parent(level), + cellIDFromFaceIJSame(f, i, j+joffset, jsame).Parent(level), + } + + if isame || jsame { + results = append(results, cellIDFromFaceIJSame(f, i+ioffset, j+joffset, isame && jsame).Parent(level)) + } + + return results +} + +// AllNeighbors returns all neighbors of this cell at the given level. Two +// cells X and Y are neighbors if their boundaries intersect but their +// interiors do not. In particular, two cells that intersect at a single +// point are neighbors. Note that for cells adjacent to a face vertex, the +// same neighbor may be returned more than once. There could be up to eight +// neighbors including the diagonal ones that share the vertex. +// +// This requires level >= ci.Level(). +func (ci CellID) AllNeighbors(level int) []CellID { + var neighbors []CellID + + face, i, j, _ := ci.faceIJOrientation() + + // Find the coordinates of the lower left-hand leaf cell. We need to + // normalize (i,j) to a known position within the cell because level + // may be larger than this cell's level. + size := sizeIJ(ci.Level()) + i &= -size + j &= -size + + nbrSize := sizeIJ(level) + + // We compute the top-bottom, left-right, and diagonal neighbors in one + // pass. The loop test is at the end of the loop to avoid 32-bit overflow. + for k := -nbrSize; ; k += nbrSize { + var sameFace bool + if k < 0 { + sameFace = (j+k >= 0) + } else if k >= size { + sameFace = (j+k < maxSize) + } else { + sameFace = true + // Top and bottom neighbors. + neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j-nbrSize, + j-size >= 0).Parent(level)) + neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j+size, + j+size < maxSize).Parent(level)) + } + + // Left, right, and diagonal neighbors. + neighbors = append(neighbors, cellIDFromFaceIJSame(face, i-nbrSize, j+k, + sameFace && i-size >= 0).Parent(level)) + neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+size, j+k, + sameFace && i+size < maxSize).Parent(level)) + + if k >= size { + break + } + } + + return neighbors +} + +// RangeMin returns the minimum CellID that is contained within this cell. +func (ci CellID) RangeMin() CellID { return CellID(uint64(ci) - (ci.lsb() - 1)) } + +// RangeMax returns the maximum CellID that is contained within this cell. +func (ci CellID) RangeMax() CellID { return CellID(uint64(ci) + (ci.lsb() - 1)) } + +// Contains returns true iff the CellID contains oci. +func (ci CellID) Contains(oci CellID) bool { + return uint64(ci.RangeMin()) <= uint64(oci) && uint64(oci) <= uint64(ci.RangeMax()) +} + +// Intersects returns true iff the CellID intersects oci. +func (ci CellID) Intersects(oci CellID) bool { + return uint64(oci.RangeMin()) <= uint64(ci.RangeMax()) && uint64(oci.RangeMax()) >= uint64(ci.RangeMin()) +} + +// String returns the string representation of the cell ID in the form "1/3210". +func (ci CellID) String() string { + if !ci.IsValid() { + return "Invalid: " + strconv.FormatInt(int64(ci), 16) + } + var b bytes.Buffer + b.WriteByte("012345"[ci.Face()]) // values > 5 will have been picked off by !IsValid above + b.WriteByte('/') + for level := 1; level <= ci.Level(); level++ { + b.WriteByte("0123"[ci.ChildPosition(level)]) + } + return b.String() +} + +// cellIDFromString returns a CellID from a string in the form "1/3210". +func cellIDFromString(s string) CellID { + level := len(s) - 2 + if level < 0 || level > maxLevel { + return CellID(0) + } + face := int(s[0] - '0') + if face < 0 || face > 5 || s[1] != '/' { + return CellID(0) + } + id := CellIDFromFace(face) + for i := 2; i < len(s); i++ { + childPos := s[i] - '0' + if childPos < 0 || childPos > 3 { + return CellID(0) + } + id = id.Children()[childPos] + } + return id +} + +// Point returns the center of the s2 cell on the sphere as a Point. +// The maximum directional error in Point (compared to the exact +// mathematical result) is 1.5 * dblEpsilon radians, and the maximum length +// error is 2 * dblEpsilon (the same as Normalize). +func (ci CellID) Point() Point { return Point{ci.rawPoint().Normalize()} } + +// LatLng returns the center of the s2 cell on the sphere as a LatLng. +func (ci CellID) LatLng() LatLng { return LatLngFromPoint(Point{ci.rawPoint()}) } + +// ChildBegin returns the first child in a traversal of the children of this cell, in Hilbert curve order. +// +// for ci := c.ChildBegin(); ci != c.ChildEnd(); ci = ci.Next() { +// ... +// } +func (ci CellID) ChildBegin() CellID { + ol := ci.lsb() + return CellID(uint64(ci) - ol + ol>>2) +} + +// ChildBeginAtLevel returns the first cell in a traversal of children a given level deeper than this cell, in +// Hilbert curve order. The given level must be no smaller than the cell's level. +// See ChildBegin for example use. +func (ci CellID) ChildBeginAtLevel(level int) CellID { + return CellID(uint64(ci) - ci.lsb() + lsbForLevel(level)) +} + +// ChildEnd returns the first cell after a traversal of the children of this cell in Hilbert curve order. +// The returned cell may be invalid. +func (ci CellID) ChildEnd() CellID { + ol := ci.lsb() + return CellID(uint64(ci) + ol + ol>>2) +} + +// ChildEndAtLevel returns the first cell after the last child in a traversal of children a given level deeper +// than this cell, in Hilbert curve order. +// The given level must be no smaller than the cell's level. +// The returned cell may be invalid. +func (ci CellID) ChildEndAtLevel(level int) CellID { + return CellID(uint64(ci) + ci.lsb() + lsbForLevel(level)) +} + +// Next returns the next cell along the Hilbert curve. +// This is expected to be used with ChildBegin and ChildEnd, +// or ChildBeginAtLevel and ChildEndAtLevel. +func (ci CellID) Next() CellID { + return CellID(uint64(ci) + ci.lsb()<<1) +} + +// Prev returns the previous cell along the Hilbert curve. +func (ci CellID) Prev() CellID { + return CellID(uint64(ci) - ci.lsb()<<1) +} + +// NextWrap returns the next cell along the Hilbert curve, wrapping from last to +// first as necessary. This should not be used with ChildBegin and ChildEnd. +func (ci CellID) NextWrap() CellID { + n := ci.Next() + if uint64(n) < wrapOffset { + return n + } + return CellID(uint64(n) - wrapOffset) +} + +// PrevWrap returns the previous cell along the Hilbert curve, wrapping around from +// first to last as necessary. This should not be used with ChildBegin and ChildEnd. +func (ci CellID) PrevWrap() CellID { + p := ci.Prev() + if uint64(p) < wrapOffset { + return p + } + return CellID(uint64(p) + wrapOffset) +} + +// AdvanceWrap advances or retreats the indicated number of steps along the +// Hilbert curve at the current level and returns the new position. The +// position wraps between the first and last faces as necessary. +func (ci CellID) AdvanceWrap(steps int64) CellID { + if steps == 0 { + return ci + } + + // We clamp the number of steps if necessary to ensure that we do not + // advance past the End() or before the Begin() of this level. + shift := uint(2*(maxLevel-ci.Level()) + 1) + if steps < 0 { + if min := -int64(uint64(ci) >> shift); steps < min { + wrap := int64(wrapOffset >> shift) + steps %= wrap + if steps < min { + steps += wrap + } + } + } else { + // Unlike Advance(), we don't want to return End(level). + if max := int64((wrapOffset - uint64(ci)) >> shift); steps > max { + wrap := int64(wrapOffset >> shift) + steps %= wrap + if steps > max { + steps -= wrap + } + } + } + + // If steps is negative, then shifting it left has undefined behavior. + // Cast to uint64 for a 2's complement answer. + return CellID(uint64(ci) + (uint64(steps) << shift)) +} + +// Encode encodes the CellID. +func (ci CellID) Encode(w io.Writer) error { + e := &encoder{w: w} + ci.encode(e) + return e.err +} + +func (ci CellID) encode(e *encoder) { + e.writeUint64(uint64(ci)) +} + +// Decode decodes the CellID. +func (ci *CellID) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + ci.decode(d) + return d.err +} + +func (ci *CellID) decode(d *decoder) { + *ci = CellID(d.readUint64()) +} + +// TODO: the methods below are not exported yet. Settle on the entire API design +// before doing this. Do we want to mirror the C++ one as closely as possible? + +// distanceFromBegin returns the number of steps that this cell is from the first +// node in the S2 hierarchy at our level. (i.e., FromFace(0).ChildBeginAtLevel(ci.Level())). +// The return value is always non-negative. +func (ci CellID) distanceFromBegin() int64 { + return int64(ci >> uint64(2*(maxLevel-ci.Level())+1)) +} + +// rawPoint returns an unnormalized r3 vector from the origin through the center +// of the s2 cell on the sphere. +func (ci CellID) rawPoint() r3.Vector { + face, si, ti := ci.faceSiTi() + return faceUVToXYZ(face, stToUV((0.5/maxSize)*float64(si)), stToUV((0.5/maxSize)*float64(ti))) +} + +// faceSiTi returns the Face/Si/Ti coordinates of the center of the cell. +func (ci CellID) faceSiTi() (face int, si, ti uint32) { + face, i, j, _ := ci.faceIJOrientation() + delta := 0 + if ci.IsLeaf() { + delta = 1 + } else { + if (i^(int(ci)>>2))&1 != 0 { + delta = 2 + } + } + return face, uint32(2*i + delta), uint32(2*j + delta) +} + +// faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci. +func (ci CellID) faceIJOrientation() (f, i, j, orientation int) { + f = ci.Face() + orientation = f & swapMask + nbits := maxLevel - 7*lookupBits // first iteration + + // Each iteration maps 8 bits of the Hilbert curve position into + // 4 bits of "i" and "j". The lookup table transforms a key of the + // form "ppppppppoo" to a value of the form "iiiijjjjoo", where the + // letters [ijpo] represents bits of "i", "j", the Hilbert curve + // position, and the Hilbert curve orientation respectively. + // + // On the first iteration we need to be careful to clear out the bits + // representing the cube face. + for k := 7; k >= 0; k-- { + orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint(2*nbits)) - 1)) << 2 + orientation = lookupIJ[orientation] + i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits) + j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits) + orientation &= (swapMask | invertMask) + nbits = lookupBits // following iterations + } + + // The position of a non-leaf cell at level "n" consists of a prefix of + // 2*n bits that identifies the cell, followed by a suffix of + // 2*(maxLevel-n)+1 bits of the form 10*. If n==maxLevel, the suffix is + // just "1" and has no effect. Otherwise, it consists of "10", followed + // by (maxLevel-n-1) repetitions of "00", followed by "0". The "10" has + // no effect, while each occurrence of "00" has the effect of reversing + // the swapMask bit. + if ci.lsb()&0x1111111111111110 != 0 { + orientation ^= swapMask + } + + return +} + +// cellIDFromFaceIJ returns a leaf cell given its cube face (range 0..5) and IJ coordinates. +func cellIDFromFaceIJ(f, i, j int) CellID { + // Note that this value gets shifted one bit to the left at the end + // of the function. + n := uint64(f) << (posBits - 1) + // Alternating faces have opposite Hilbert curve orientations; this + // is necessary in order for all faces to have a right-handed + // coordinate system. + bits := f & swapMask + // Each iteration maps 4 bits of "i" and "j" into 8 bits of the Hilbert + // curve position. The lookup table transforms a 10-bit key of the form + // "iiiijjjjoo" to a 10-bit value of the form "ppppppppoo", where the + // letters [ijpo] denote bits of "i", "j", Hilbert curve position, and + // Hilbert curve orientation respectively. + for k := 7; k >= 0; k-- { + mask := (1 << lookupBits) - 1 + bits += ((i >> uint(k*lookupBits)) & mask) << (lookupBits + 2) + bits += ((j >> uint(k*lookupBits)) & mask) << 2 + bits = lookupPos[bits] + n |= uint64(bits>>2) << (uint(k) * 2 * lookupBits) + bits &= (swapMask | invertMask) + } + return CellID(n*2 + 1) +} + +func cellIDFromFaceIJWrap(f, i, j int) CellID { + // Convert i and j to the coordinates of a leaf cell just beyond the + // boundary of this face. This prevents 32-bit overflow in the case + // of finding the neighbors of a face cell. + i = clampInt(i, -1, maxSize) + j = clampInt(j, -1, maxSize) + + // We want to wrap these coordinates onto the appropriate adjacent face. + // The easiest way to do this is to convert the (i,j) coordinates to (x,y,z) + // (which yields a point outside the normal face boundary), and then call + // xyzToFaceUV to project back onto the correct face. + // + // The code below converts (i,j) to (si,ti), and then (si,ti) to (u,v) using + // the linear projection (u=2*s-1 and v=2*t-1). (The code further below + // converts back using the inverse projection, s=0.5*(u+1) and t=0.5*(v+1). + // Any projection would work here, so we use the simplest.) We also clamp + // the (u,v) coordinates so that the point is barely outside the + // [-1,1]x[-1,1] face rectangle, since otherwise the reprojection step + // (which divides by the new z coordinate) might change the other + // coordinates enough so that we end up in the wrong leaf cell. + const scale = 1.0 / maxSize + limit := math.Nextafter(1, 2) + u := math.Max(-limit, math.Min(limit, scale*float64((i<<1)+1-maxSize))) + v := math.Max(-limit, math.Min(limit, scale*float64((j<<1)+1-maxSize))) + + // Find the leaf cell coordinates on the adjacent face, and convert + // them to a cell id at the appropriate level. + f, u, v = xyzToFaceUV(faceUVToXYZ(f, u, v)) + return cellIDFromFaceIJ(f, stToIJ(0.5*(u+1)), stToIJ(0.5*(v+1))) +} + +func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID { + if sameFace { + return cellIDFromFaceIJ(f, i, j) + } + return cellIDFromFaceIJWrap(f, i, j) +} + +// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding +// s- or t-value contained by that cell. The argument must be in the range +// [0..2**30], i.e. up to one position beyond the normal range of valid leaf +// cell indices. +func ijToSTMin(i int) float64 { + return float64(i) / float64(maxSize) +} + +// stToIJ converts value in ST coordinates to a value in IJ coordinates. +func stToIJ(s float64) int { + return clampInt(int(math.Floor(maxSize*s)), 0, maxSize-1) +} + +// cellIDFromPoint returns a leaf cell containing point p. Usually there is +// exactly one such cell, but for points along the edge of a cell, any +// adjacent cell may be (deterministically) chosen. This is because +// s2.CellIDs are considered to be closed sets. The returned cell will +// always contain the given point, i.e. +// +// CellFromPoint(p).ContainsPoint(p) +// +// is always true. +func cellIDFromPoint(p Point) CellID { + f, u, v := xyzToFaceUV(r3.Vector{p.X, p.Y, p.Z}) + i := stToIJ(uvToST(u)) + j := stToIJ(uvToST(v)) + return cellIDFromFaceIJ(f, i, j) +} + +// ijLevelToBoundUV returns the bounds in (u,v)-space for the cell at the given +// level containing the leaf cell with the given (i,j)-coordinates. +func ijLevelToBoundUV(i, j, level int) r2.Rect { + cellSize := sizeIJ(level) + xLo := i & -cellSize + yLo := j & -cellSize + + return r2.Rect{ + X: r1.Interval{ + Lo: stToUV(ijToSTMin(xLo)), + Hi: stToUV(ijToSTMin(xLo + cellSize)), + }, + Y: r1.Interval{ + Lo: stToUV(ijToSTMin(yLo)), + Hi: stToUV(ijToSTMin(yLo + cellSize)), + }, + } +} + +// Constants related to the bit mangling in the Cell ID. +const ( + lookupBits = 4 + swapMask = 0x01 + invertMask = 0x02 +) + +// The following lookup tables are used to convert efficiently between an +// (i,j) cell index and the corresponding position along the Hilbert curve. +// +// lookupPos maps 4 bits of "i", 4 bits of "j", and 2 bits representing the +// orientation of the current cell into 8 bits representing the order in which +// that subcell is visited by the Hilbert curve, plus 2 bits indicating the +// new orientation of the Hilbert curve within that subcell. (Cell +// orientations are represented as combination of swapMask and invertMask.) +// +// lookupIJ is an inverted table used for mapping in the opposite +// direction. +// +// We also experimented with looking up 16 bits at a time (14 bits of position +// plus 2 of orientation) but found that smaller lookup tables gave better +// performance. (2KB fits easily in the primary cache.) +var ( + ijToPos = [4][4]int{ + {0, 1, 3, 2}, // canonical order + {0, 3, 1, 2}, // axes swapped + {2, 3, 1, 0}, // bits inverted + {2, 1, 3, 0}, // swapped & inverted + } + posToIJ = [4][4]int{ + {0, 1, 3, 2}, // canonical order: (0,0), (0,1), (1,1), (1,0) + {0, 2, 3, 1}, // axes swapped: (0,0), (1,0), (1,1), (0,1) + {3, 2, 0, 1}, // bits inverted: (1,1), (1,0), (0,0), (0,1) + {3, 1, 0, 2}, // swapped & inverted: (1,1), (0,1), (0,0), (1,0) + } + posToOrientation = [4]int{swapMask, 0, 0, invertMask | swapMask} + lookupIJ [1 << (2*lookupBits + 2)]int + lookupPos [1 << (2*lookupBits + 2)]int +) + +func init() { + initLookupCell(0, 0, 0, 0, 0, 0) + initLookupCell(0, 0, 0, swapMask, 0, swapMask) + initLookupCell(0, 0, 0, invertMask, 0, invertMask) + initLookupCell(0, 0, 0, swapMask|invertMask, 0, swapMask|invertMask) +} + +// initLookupCell initializes the lookupIJ table at init time. +func initLookupCell(level, i, j, origOrientation, pos, orientation int) { + if level == lookupBits { + ij := (i << lookupBits) + j + lookupPos[(ij<<2)+origOrientation] = (pos << 2) + orientation + lookupIJ[(pos<<2)+origOrientation] = (ij << 2) + orientation + return + } + + level++ + i <<= 1 + j <<= 1 + pos <<= 2 + r := posToIJ[orientation] + initLookupCell(level, i+(r[0]>>1), j+(r[0]&1), origOrientation, pos, orientation^posToOrientation[0]) + initLookupCell(level, i+(r[1]>>1), j+(r[1]&1), origOrientation, pos+1, orientation^posToOrientation[1]) + initLookupCell(level, i+(r[2]>>1), j+(r[2]&1), origOrientation, pos+2, orientation^posToOrientation[2]) + initLookupCell(level, i+(r[3]>>1), j+(r[3]&1), origOrientation, pos+3, orientation^posToOrientation[3]) +} + +// CommonAncestorLevel returns the level of the common ancestor of the two S2 CellIDs. +func (ci CellID) CommonAncestorLevel(other CellID) (level int, ok bool) { + bits := uint64(ci ^ other) + if bits < ci.lsb() { + bits = ci.lsb() + } + if bits < other.lsb() { + bits = other.lsb() + } + + msbPos := findMSBSetNonZero64(bits) + if msbPos > 60 { + return 0, false + } + return (60 - msbPos) >> 1, true +} + +// Advance advances or retreats the indicated number of steps along the +// Hilbert curve at the current level, and returns the new position. The +// position is never advanced past End() or before Begin(). +func (ci CellID) Advance(steps int64) CellID { + if steps == 0 { + return ci + } + + // We clamp the number of steps if necessary to ensure that we do not + // advance past the End() or before the Begin() of this level. Note that + // minSteps and maxSteps always fit in a signed 64-bit integer. + stepShift := uint(2*(maxLevel-ci.Level()) + 1) + if steps < 0 { + minSteps := -int64(uint64(ci) >> stepShift) + if steps < minSteps { + steps = minSteps + } + } else { + maxSteps := int64((wrapOffset + ci.lsb() - uint64(ci)) >> stepShift) + if steps > maxSteps { + steps = maxSteps + } + } + return ci + CellID(steps)<= limit.RangeMin() { + return limit + } + + if ci.RangeMax() >= limit { + // The cell is too large, shrink it. Note that when generating coverings + // of CellID ranges, this loop usually executes only once. Also because + // ci.RangeMin() < limit.RangeMin(), we will always exit the loop by the + // time we reach a leaf cell. + for { + ci = ci.Children()[0] + if ci.RangeMax() < limit { + break + } + } + return ci + } + + // The cell may be too small. Grow it if necessary. Note that generally + // this loop only iterates once. + for !ci.isFace() { + parent := ci.immediateParent() + if parent.RangeMin() != start || parent.RangeMax() >= limit { + break + } + ci = parent + } + return ci +} + +// centerFaceSiTi returns the (face, si, ti) coordinates of the center of the cell. +// Note that although (si,ti) coordinates span the range [0,2**31] in general, +// the cell center coordinates are always in the range [1,2**31-1] and +// therefore can be represented using a signed 32-bit integer. +func (ci CellID) centerFaceSiTi() (face, si, ti int) { + // First we compute the discrete (i,j) coordinates of a leaf cell contained + // within the given cell. Given that cells are represented by the Hilbert + // curve position corresponding at their center, it turns out that the cell + // returned by faceIJOrientation is always one of two leaf cells closest + // to the center of the cell (unless the given cell is a leaf cell itself, + // in which case there is only one possibility). + // + // Given a cell of size s >= 2 (i.e. not a leaf cell), and letting (imin, + // jmin) be the coordinates of its lower left-hand corner, the leaf cell + // returned by faceIJOrientation is either (imin + s/2, jmin + s/2) + // (imin + s/2 - 1, jmin + s/2 - 1). The first case is the one we want. + // We can distinguish these two cases by looking at the low bit of i or + // j. In the second case the low bit is one, unless s == 2 (i.e. the + // level just above leaf cells) in which case the low bit is zero. + // + // In the code below, the expression ((i ^ (int(id) >> 2)) & 1) is true + // if we are in the second case described above. + face, i, j, _ := ci.faceIJOrientation() + delta := 0 + if ci.IsLeaf() { + delta = 1 + } else if (int64(i)^(int64(ci)>>2))&1 == 1 { + delta = 2 + } + + // Note that (2 * {i,j} + delta) will never overflow a 32-bit integer. + return face, 2*i + delta, 2*j + delta +} diff --git a/vendor/github.com/golang/geo/s2/cellunion.go b/vendor/github.com/golang/geo/s2/cellunion.go new file mode 100644 index 000000000..0654de973 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/cellunion.go @@ -0,0 +1,590 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "sort" + + "github.com/golang/geo/s1" +) + +// A CellUnion is a collection of CellIDs. +// +// It is normalized if it is sorted, and does not contain redundancy. +// Specifically, it may not contain the same CellID twice, nor a CellID that +// is contained by another, nor the four sibling CellIDs that are children of +// a single higher level CellID. +// +// CellUnions are not required to be normalized, but certain operations will +// return different results if they are not (e.g. Contains). +type CellUnion []CellID + +// CellUnionFromRange creates a CellUnion that covers the half-open range +// of leaf cells [begin, end). If begin == end the resulting union is empty. +// This requires that begin and end are both leaves, and begin <= end. +// To create a closed-ended range, pass in end.Next(). +func CellUnionFromRange(begin, end CellID) CellUnion { + // We repeatedly add the largest cell we can. + var cu CellUnion + for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) { + cu = append(cu, id) + } + // The output is normalized because the cells are added in order by the iteration. + return cu +} + +// CellUnionFromUnion creates a CellUnion from the union of the given CellUnions. +func CellUnionFromUnion(cellUnions ...CellUnion) CellUnion { + var cu CellUnion + for _, cellUnion := range cellUnions { + cu = append(cu, cellUnion...) + } + cu.Normalize() + return cu +} + +// CellUnionFromIntersection creates a CellUnion from the intersection of the given CellUnions. +func CellUnionFromIntersection(x, y CellUnion) CellUnion { + var cu CellUnion + + // This is a fairly efficient calculation that uses binary search to skip + // over sections of both input vectors. It takes constant time if all the + // cells of x come before or after all the cells of y in CellID order. + var i, j int + for i < len(x) && j < len(y) { + iMin := x[i].RangeMin() + jMin := y[j].RangeMin() + if iMin > jMin { + // Either j.Contains(i) or the two cells are disjoint. + if x[i] <= y[j].RangeMax() { + cu = append(cu, x[i]) + i++ + } else { + // Advance j to the first cell possibly contained by x[i]. + j = y.lowerBound(j+1, len(y), iMin) + // The previous cell y[j-1] may now contain x[i]. + if x[i] <= y[j-1].RangeMax() { + j-- + } + } + } else if jMin > iMin { + // Identical to the code above with i and j reversed. + if y[j] <= x[i].RangeMax() { + cu = append(cu, y[j]) + j++ + } else { + i = x.lowerBound(i+1, len(x), jMin) + if y[j] <= x[i-1].RangeMax() { + i-- + } + } + } else { + // i and j have the same RangeMin(), so one contains the other. + if x[i] < y[j] { + cu = append(cu, x[i]) + i++ + } else { + cu = append(cu, y[j]) + j++ + } + } + } + + // The output is generated in sorted order. + cu.Normalize() + return cu +} + +// CellUnionFromIntersectionWithCellID creates a CellUnion from the intersection +// of a CellUnion with the given CellID. This can be useful for splitting a +// CellUnion into chunks. +func CellUnionFromIntersectionWithCellID(x CellUnion, id CellID) CellUnion { + var cu CellUnion + if x.ContainsCellID(id) { + cu = append(cu, id) + cu.Normalize() + return cu + } + + idmax := id.RangeMax() + for i := x.lowerBound(0, len(x), id.RangeMin()); i < len(x) && x[i] <= idmax; i++ { + cu = append(cu, x[i]) + } + + cu.Normalize() + return cu +} + +// CellUnionFromDifference creates a CellUnion from the difference (x - y) +// of the given CellUnions. +func CellUnionFromDifference(x, y CellUnion) CellUnion { + // TODO(roberts): This is approximately O(N*log(N)), but could probably + // use similar techniques as CellUnionFromIntersectionWithCellID to be more efficient. + + var cu CellUnion + for _, xid := range x { + cu.cellUnionDifferenceInternal(xid, &y) + } + + // The output is generated in sorted order, and there should not be any + // cells that can be merged (provided that both inputs were normalized). + return cu +} + +// The C++ constructor methods FromNormalized and FromVerbatim are not necessary +// since they don't call Normalize, and just set the CellIDs directly on the object, +// so straight casting is sufficient in Go to replicate this behavior. + +// IsValid reports whether the cell union is valid, meaning that the CellIDs are +// valid, non-overlapping, and sorted in increasing order. +func (cu *CellUnion) IsValid() bool { + for i, cid := range *cu { + if !cid.IsValid() { + return false + } + if i == 0 { + continue + } + if (*cu)[i-1].RangeMax() >= cid.RangeMin() { + return false + } + } + return true +} + +// IsNormalized reports whether the cell union is normalized, meaning that it is +// satisfies IsValid and that no four cells have a common parent. +// Certain operations such as Contains will return a different +// result if the cell union is not normalized. +func (cu *CellUnion) IsNormalized() bool { + for i, cid := range *cu { + if !cid.IsValid() { + return false + } + if i == 0 { + continue + } + if (*cu)[i-1].RangeMax() >= cid.RangeMin() { + return false + } + if i < 3 { + continue + } + if areSiblings((*cu)[i-3], (*cu)[i-2], (*cu)[i-1], cid) { + return false + } + } + return true +} + +// Normalize normalizes the CellUnion. +func (cu *CellUnion) Normalize() { + sortCellIDs(*cu) + + output := make([]CellID, 0, len(*cu)) // the list of accepted cells + // Loop invariant: output is a sorted list of cells with no redundancy. + for _, ci := range *cu { + // The first two passes here either ignore this new candidate, + // or remove previously accepted cells that are covered by this candidate. + + // Ignore this cell if it is contained by the previous one. + // We only need to check the last accepted cell. The ordering of the + // cells implies containment (but not the converse), and output has no redundancy, + // so if this candidate is not contained by the last accepted cell + // then it cannot be contained by any previously accepted cell. + if len(output) > 0 && output[len(output)-1].Contains(ci) { + continue + } + + // Discard any previously accepted cells contained by this one. + // This could be any contiguous trailing subsequence, but it can't be + // a discontiguous subsequence because of the containment property of + // sorted S2 cells mentioned above. + j := len(output) - 1 // last index to keep + for j >= 0 { + if !ci.Contains(output[j]) { + break + } + j-- + } + output = output[:j+1] + + // See if the last three cells plus this one can be collapsed. + // We loop because collapsing three accepted cells and adding a higher level cell + // could cascade into previously accepted cells. + for len(output) >= 3 && areSiblings(output[len(output)-3], output[len(output)-2], output[len(output)-1], ci) { + // Replace four children by their parent cell. + output = output[:len(output)-3] + ci = ci.immediateParent() // checked !ci.isFace above + } + output = append(output, ci) + } + *cu = output +} + +// IntersectsCellID reports whether this CellUnion intersects the given cell ID. +func (cu *CellUnion) IntersectsCellID(id CellID) bool { + // Find index of array item that occurs directly after our probe cell: + i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] }) + + if i != len(*cu) && (*cu)[i].RangeMin() <= id.RangeMax() { + return true + } + return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin() +} + +// ContainsCellID reports whether the CellUnion contains the given cell ID. +// Containment is defined with respect to regions, e.g. a cell contains its 4 children. +// +// CAVEAT: If you have constructed a non-normalized CellUnion, note that groups +// of 4 child cells are *not* considered to contain their parent cell. To get +// this behavior you must use one of the call Normalize() explicitly. +func (cu *CellUnion) ContainsCellID(id CellID) bool { + // Find index of array item that occurs directly after our probe cell: + i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] }) + + if i != len(*cu) && (*cu)[i].RangeMin() <= id { + return true + } + return i != 0 && (*cu)[i-1].RangeMax() >= id +} + +// Denormalize replaces this CellUnion with an expanded version of the +// CellUnion where any cell whose level is less than minLevel or where +// (level - minLevel) is not a multiple of levelMod is replaced by its +// children, until either both of these conditions are satisfied or the +// maximum level is reached. +func (cu *CellUnion) Denormalize(minLevel, levelMod int) { + var denorm CellUnion + for _, id := range *cu { + level := id.Level() + newLevel := level + if newLevel < minLevel { + newLevel = minLevel + } + if levelMod > 1 { + newLevel += (maxLevel - (newLevel - minLevel)) % levelMod + if newLevel > maxLevel { + newLevel = maxLevel + } + } + if newLevel == level { + denorm = append(denorm, id) + } else { + end := id.ChildEndAtLevel(newLevel) + for ci := id.ChildBeginAtLevel(newLevel); ci != end; ci = ci.Next() { + denorm = append(denorm, ci) + } + } + } + *cu = denorm +} + +// RectBound returns a Rect that bounds this entity. +func (cu *CellUnion) RectBound() Rect { + bound := EmptyRect() + for _, c := range *cu { + bound = bound.Union(CellFromCellID(c).RectBound()) + } + return bound +} + +// CapBound returns a Cap that bounds this entity. +func (cu *CellUnion) CapBound() Cap { + if len(*cu) == 0 { + return EmptyCap() + } + + // Compute the approximate centroid of the region. This won't produce the + // bounding cap of minimal area, but it should be close enough. + var centroid Point + + for _, ci := range *cu { + area := AvgAreaMetric.Value(ci.Level()) + centroid = Point{centroid.Add(ci.Point().Mul(area))} + } + + if zero := (Point{}); centroid == zero { + centroid = PointFromCoords(1, 0, 0) + } else { + centroid = Point{centroid.Normalize()} + } + + // Use the centroid as the cap axis, and expand the cap angle so that it + // contains the bounding caps of all the individual cells. Note that it is + // *not* sufficient to just bound all the cell vertices because the bounding + // cap may be concave (i.e. cover more than one hemisphere). + c := CapFromPoint(centroid) + for _, ci := range *cu { + c = c.AddCap(CellFromCellID(ci).CapBound()) + } + + return c +} + +// ContainsCell reports whether this cell union contains the given cell. +func (cu *CellUnion) ContainsCell(c Cell) bool { + return cu.ContainsCellID(c.id) +} + +// IntersectsCell reports whether this cell union intersects the given cell. +func (cu *CellUnion) IntersectsCell(c Cell) bool { + return cu.IntersectsCellID(c.id) +} + +// ContainsPoint reports whether this cell union contains the given point. +func (cu *CellUnion) ContainsPoint(p Point) bool { + return cu.ContainsCell(CellFromPoint(p)) +} + +// CellUnionBound computes a covering of the CellUnion. +func (cu *CellUnion) CellUnionBound() []CellID { + return cu.CapBound().CellUnionBound() +} + +// LeafCellsCovered reports the number of leaf cells covered by this cell union. +// This will be no more than 6*2^60 for the whole sphere. +func (cu *CellUnion) LeafCellsCovered() int64 { + var numLeaves int64 + for _, c := range *cu { + numLeaves += 1 << uint64((maxLevel-int64(c.Level()))<<1) + } + return numLeaves +} + +// Returns true if the given four cells have a common parent. +// This requires that the four CellIDs are distinct. +func areSiblings(a, b, c, d CellID) bool { + // A necessary (but not sufficient) condition is that the XOR of the + // four cell IDs must be zero. This is also very fast to test. + if (a ^ b ^ c) != d { + return false + } + + // Now we do a slightly more expensive but exact test. First, compute a + // mask that blocks out the two bits that encode the child position of + // "id" with respect to its parent, then check that the other three + // children all agree with "mask". + mask := d.lsb() << 1 + mask = ^(mask + (mask << 1)) + idMasked := (uint64(d) & mask) + return ((uint64(a)&mask) == idMasked && + (uint64(b)&mask) == idMasked && + (uint64(c)&mask) == idMasked && + !d.isFace()) +} + +// Contains reports whether this CellUnion contains all of the CellIDs of the given CellUnion. +func (cu *CellUnion) Contains(o CellUnion) bool { + // TODO(roberts): Investigate alternatives such as divide-and-conquer + // or alternating-skip-search that may be significantly faster in both + // the average and worst case. This applies to Intersects as well. + for _, id := range o { + if !cu.ContainsCellID(id) { + return false + } + } + + return true +} + +// Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion. +func (cu *CellUnion) Intersects(o CellUnion) bool { + for _, c := range *cu { + if o.IntersectsCellID(c) { + return true + } + } + + return false +} + +// lowerBound returns the index in this CellUnion to the first element whose value +// is not considered to go before the given cell id. (i.e., either it is equivalent +// or comes after the given id.) If there is no match, then end is returned. +func (cu *CellUnion) lowerBound(begin, end int, id CellID) int { + for i := begin; i < end; i++ { + if (*cu)[i] >= id { + return i + } + } + + return end +} + +// cellUnionDifferenceInternal adds the difference between the CellID and the union to +// the result CellUnion. If they intersect but the difference is non-empty, it divides +// and conquers. +func (cu *CellUnion) cellUnionDifferenceInternal(id CellID, other *CellUnion) { + if !other.IntersectsCellID(id) { + (*cu) = append((*cu), id) + return + } + + if !other.ContainsCellID(id) { + for _, child := range id.Children() { + cu.cellUnionDifferenceInternal(child, other) + } + } +} + +// ExpandAtLevel expands this CellUnion by adding a rim of cells at expandLevel +// around the unions boundary. +// +// For each cell c in the union, we add all cells at level +// expandLevel that abut c. There are typically eight of those +// (four edge-abutting and four sharing a vertex). However, if c is +// finer than expandLevel, we add all cells abutting +// c.Parent(expandLevel) as well as c.Parent(expandLevel) itself, +// as an expandLevel cell rarely abuts a smaller cell. +// +// Note that the size of the output is exponential in +// expandLevel. For example, if expandLevel == 20 and the input +// has a cell at level 10, there will be on the order of 4000 +// adjacent cells in the output. For most applications the +// ExpandByRadius method below is easier to use. +func (cu *CellUnion) ExpandAtLevel(level int) { + var output CellUnion + levelLsb := lsbForLevel(level) + for i := len(*cu) - 1; i >= 0; i-- { + id := (*cu)[i] + if id.lsb() < levelLsb { + id = id.Parent(level) + // Optimization: skip over any cells contained by this one. This is + // especially important when very small regions are being expanded. + for i > 0 && id.Contains((*cu)[i-1]) { + i-- + } + } + output = append(output, id) + output = append(output, id.AllNeighbors(level)...) + } + sortCellIDs(output) + + *cu = output + cu.Normalize() +} + +// ExpandByRadius expands this CellUnion such that it contains all points whose +// distance to the CellUnion is at most minRadius, but do not use cells that +// are more than maxLevelDiff levels higher than the largest cell in the input. +// The second parameter controls the tradeoff between accuracy and output size +// when a large region is being expanded by a small amount (e.g. expanding Canada +// by 1km). For example, if maxLevelDiff == 4 the region will always be expanded +// by approximately 1/16 the width of its largest cell. Note that in the worst case, +// the number of cells in the output can be up to 4 * (1 + 2 ** maxLevelDiff) times +// larger than the number of cells in the input. +func (cu *CellUnion) ExpandByRadius(minRadius s1.Angle, maxLevelDiff int) { + minLevel := maxLevel + for _, cid := range *cu { + minLevel = minInt(minLevel, cid.Level()) + } + + // Find the maximum level such that all cells are at least "minRadius" wide. + radiusLevel := MinWidthMetric.MaxLevel(minRadius.Radians()) + if radiusLevel == 0 && minRadius.Radians() > MinWidthMetric.Value(0) { + // The requested expansion is greater than the width of a face cell. + // The easiest way to handle this is to expand twice. + cu.ExpandAtLevel(0) + } + cu.ExpandAtLevel(minInt(minLevel+maxLevelDiff, radiusLevel)) +} + +// Equal reports whether the two CellUnions are equal. +func (cu CellUnion) Equal(o CellUnion) bool { + if len(cu) != len(o) { + return false + } + for i := 0; i < len(cu); i++ { + if cu[i] != o[i] { + return false + } + } + return true +} + +// AverageArea returns the average area of this CellUnion. +// This is accurate to within a factor of 1.7. +func (cu *CellUnion) AverageArea() float64 { + return AvgAreaMetric.Value(maxLevel) * float64(cu.LeafCellsCovered()) +} + +// ApproxArea returns the approximate area of this CellUnion. This method is accurate +// to within 3% percent for all cell sizes and accurate to within 0.1% for cells +// at level 5 or higher within the union. +func (cu *CellUnion) ApproxArea() float64 { + var area float64 + for _, id := range *cu { + area += CellFromCellID(id).ApproxArea() + } + return area +} + +// ExactArea returns the area of this CellUnion as accurately as possible. +func (cu *CellUnion) ExactArea() float64 { + var area float64 + for _, id := range *cu { + area += CellFromCellID(id).ExactArea() + } + return area +} + +// Encode encodes the CellUnion. +func (cu *CellUnion) Encode(w io.Writer) error { + e := &encoder{w: w} + cu.encode(e) + return e.err +} + +func (cu *CellUnion) encode(e *encoder) { + e.writeInt8(encodingVersion) + e.writeInt64(int64(len(*cu))) + for _, ci := range *cu { + ci.encode(e) + } +} + +// Decode decodes the CellUnion. +func (cu *CellUnion) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + cu.decode(d) + return d.err +} + +func (cu *CellUnion) decode(d *decoder) { + version := d.readInt8() + if d.err != nil { + return + } + if version != encodingVersion { + d.err = fmt.Errorf("only version %d is supported", encodingVersion) + return + } + n := d.readInt64() + if d.err != nil { + return + } + const maxCells = 1000000 + if n > maxCells { + d.err = fmt.Errorf("too many cells (%d; max is %d)", n, maxCells) + return + } + *cu = make([]CellID, n) + for i := range *cu { + (*cu)[i].decode(d) + } +} diff --git a/vendor/github.com/golang/geo/s2/centroids.go b/vendor/github.com/golang/geo/s2/centroids.go new file mode 100644 index 000000000..e8a91c442 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/centroids.go @@ -0,0 +1,133 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/r3" +) + +// There are several notions of the "centroid" of a triangle. First, there +// is the planar centroid, which is simply the centroid of the ordinary +// (non-spherical) triangle defined by the three vertices. Second, there is +// the surface centroid, which is defined as the intersection of the three +// medians of the spherical triangle. It is possible to show that this +// point is simply the planar centroid projected to the surface of the +// sphere. Finally, there is the true centroid (mass centroid), which is +// defined as the surface integral over the spherical triangle of (x,y,z) +// divided by the triangle area. This is the point that the triangle would +// rotate around if it was spinning in empty space. +// +// The best centroid for most purposes is the true centroid. Unlike the +// planar and surface centroids, the true centroid behaves linearly as +// regions are added or subtracted. That is, if you split a triangle into +// pieces and compute the average of their centroids (weighted by triangle +// area), the result equals the centroid of the original triangle. This is +// not true of the other centroids. +// +// Also note that the surface centroid may be nowhere near the intuitive +// "center" of a spherical triangle. For example, consider the triangle +// with vertices A=(1,eps,0), B=(0,0,1), C=(-1,eps,0) (a quarter-sphere). +// The surface centroid of this triangle is at S=(0, 2*eps, 1), which is +// within a distance of 2*eps of the vertex B. Note that the median from A +// (the segment connecting A to the midpoint of BC) passes through S, since +// this is the shortest path connecting the two endpoints. On the other +// hand, the true centroid is at M=(0, 0.5, 0.5), which when projected onto +// the surface is a much more reasonable interpretation of the "center" of +// this triangle. +// + +// TrueCentroid returns the true centroid of the spherical triangle ABC +// multiplied by the signed area of spherical triangle ABC. The reasons for +// multiplying by the signed area are (1) this is the quantity that needs to be +// summed to compute the centroid of a union or difference of triangles, and +// (2) it's actually easier to calculate this way. All points must have unit length. +// +// Note that the result of this function is defined to be Point(0, 0, 0) if +// the triangle is degenerate. +func TrueCentroid(a, b, c Point) Point { + // Use Distance to get accurate results for small triangles. + ra := float64(1) + if sa := float64(b.Distance(c)); sa != 0 { + ra = sa / math.Sin(sa) + } + rb := float64(1) + if sb := float64(c.Distance(a)); sb != 0 { + rb = sb / math.Sin(sb) + } + rc := float64(1) + if sc := float64(a.Distance(b)); sc != 0 { + rc = sc / math.Sin(sc) + } + + // Now compute a point M such that: + // + // [Ax Ay Az] [Mx] [ra] + // [Bx By Bz] [My] = 0.5 * det(A,B,C) * [rb] + // [Cx Cy Cz] [Mz] [rc] + // + // To improve the numerical stability we subtract the first row (A) from the + // other two rows; this reduces the cancellation error when A, B, and C are + // very close together. Then we solve it using Cramer's rule. + // + // The result is the true centroid of the triangle multiplied by the + // triangle's area. + // + // This code still isn't as numerically stable as it could be. + // The biggest potential improvement is to compute B-A and C-A more + // accurately so that (B-A)x(C-A) is always inside triangle ABC. + x := r3.Vector{a.X, b.X - a.X, c.X - a.X} + y := r3.Vector{a.Y, b.Y - a.Y, c.Y - a.Y} + z := r3.Vector{a.Z, b.Z - a.Z, c.Z - a.Z} + r := r3.Vector{ra, rb - ra, rc - ra} + + return Point{r3.Vector{y.Cross(z).Dot(r), z.Cross(x).Dot(r), x.Cross(y).Dot(r)}.Mul(0.5)} +} + +// EdgeTrueCentroid returns the true centroid of the spherical geodesic edge AB +// multiplied by the length of the edge AB. As with triangles, the true centroid +// of a collection of line segments may be computed simply by summing the result +// of this method for each segment. +// +// Note that the planar centroid of a line segment is simply 0.5 * (a + b), +// while the surface centroid is (a + b).Normalize(). However neither of +// these values is appropriate for computing the centroid of a collection of +// edges (such as a polyline). +// +// Also note that the result of this function is defined to be Point(0, 0, 0) +// if the edge is degenerate. +func EdgeTrueCentroid(a, b Point) Point { + // The centroid (multiplied by length) is a vector toward the midpoint + // of the edge, whose length is twice the sine of half the angle between + // the two vertices. Defining theta to be this angle, we have: + vDiff := a.Sub(b.Vector) // Length == 2*sin(theta) + vSum := a.Add(b.Vector) // Length == 2*cos(theta) + sin2 := vDiff.Norm2() + cos2 := vSum.Norm2() + if cos2 == 0 { + return Point{} // Ignore antipodal edges. + } + return Point{vSum.Mul(math.Sqrt(sin2 / cos2))} // Length == 2*sin(theta) +} + +// PlanarCentroid returns the centroid of the planar triangle ABC. This can be +// normalized to unit length to obtain the "surface centroid" of the corresponding +// spherical triangle, i.e. the intersection of the three medians. However, note +// that for large spherical triangles the surface centroid may be nowhere near +// the intuitive "center". +func PlanarCentroid(a, b, c Point) Point { + return Point{a.Add(b.Vector).Add(c.Vector).Mul(1. / 3)} +} diff --git a/vendor/github.com/golang/geo/s2/contains_point_query.go b/vendor/github.com/golang/geo/s2/contains_point_query.go new file mode 100644 index 000000000..3026f3601 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/contains_point_query.go @@ -0,0 +1,190 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// VertexModel defines whether shapes are considered to contain their vertices. +// Note that these definitions differ from the ones used by BooleanOperation. +// +// Note that points other than vertices are never contained by polylines. +// If you want need this behavior, use ClosestEdgeQuery's IsDistanceLess +// with a suitable distance threshold instead. +type VertexModel int + +const ( + // VertexModelOpen means no shapes contain their vertices (not even + // points). Therefore Contains(Point) returns true if and only if the + // point is in the interior of some polygon. + VertexModelOpen VertexModel = iota + + // VertexModelSemiOpen means that polygon point containment is defined + // such that if several polygons tile the region around a vertex, then + // exactly one of those polygons contains that vertex. Points and + // polylines still do not contain any vertices. + VertexModelSemiOpen + + // VertexModelClosed means all shapes contain their vertices (including + // points and polylines). + VertexModelClosed +) + +// ContainsPointQuery determines whether one or more shapes in a ShapeIndex +// contain a given Point. The ShapeIndex may contain any number of points, +// polylines, and/or polygons (possibly overlapping). Shape boundaries may be +// modeled as Open, SemiOpen, or Closed (this affects whether or not shapes are +// considered to contain their vertices). +// +// This type is not safe for concurrent use. +// +// However, note that if you need to do a large number of point containment +// tests, it is more efficient to re-use the query rather than creating a new +// one each time. +type ContainsPointQuery struct { + model VertexModel + index *ShapeIndex + iter *ShapeIndexIterator +} + +// NewContainsPointQuery creates a new instance of the ContainsPointQuery for the index +// and given vertex model choice. +func NewContainsPointQuery(index *ShapeIndex, model VertexModel) *ContainsPointQuery { + return &ContainsPointQuery{ + index: index, + model: model, + iter: index.Iterator(), + } +} + +// Contains reports whether any shape in the queries index contains the point p +// under the queries vertex model (Open, SemiOpen, or Closed). +func (q *ContainsPointQuery) Contains(p Point) bool { + if !q.iter.LocatePoint(p) { + return false + } + + cell := q.iter.IndexCell() + for _, clipped := range cell.shapes { + if q.shapeContains(clipped, q.iter.Center(), p) { + return true + } + } + return false +} + +// shapeContains reports whether the clippedShape from the iterator's center position contains +// the given point. +func (q *ContainsPointQuery) shapeContains(clipped *clippedShape, center, p Point) bool { + inside := clipped.containsCenter + numEdges := clipped.numEdges() + if numEdges <= 0 { + return inside + } + + shape := q.index.Shape(clipped.shapeID) + if shape.Dimension() != 2 { + // Points and polylines can be ignored unless the vertex model is Closed. + if q.model != VertexModelClosed { + return false + } + + // Otherwise, the point is contained if and only if it matches a vertex. + for _, edgeID := range clipped.edges { + edge := shape.Edge(edgeID) + if edge.V0 == p || edge.V1 == p { + return true + } + } + return false + } + + // Test containment by drawing a line segment from the cell center to the + // given point and counting edge crossings. + crosser := NewEdgeCrosser(center, p) + for _, edgeID := range clipped.edges { + edge := shape.Edge(edgeID) + sign := crosser.CrossingSign(edge.V0, edge.V1) + if sign == DoNotCross { + continue + } + if sign == MaybeCross { + // For the Open and Closed models, check whether p is a vertex. + if q.model != VertexModelSemiOpen && (edge.V0 == p || edge.V1 == p) { + return (q.model == VertexModelClosed) + } + // C++ plays fast and loose with the int <-> bool conversions here. + if VertexCrossing(crosser.a, crosser.b, edge.V0, edge.V1) { + sign = Cross + } else { + sign = DoNotCross + } + } + inside = inside != (sign == Cross) + } + + return inside +} + +// ShapeContains reports whether the given shape contains the point under this +// queries vertex model (Open, SemiOpen, or Closed). +// +// This requires the shape belongs to this queries index. +func (q *ContainsPointQuery) ShapeContains(shape Shape, p Point) bool { + if !q.iter.LocatePoint(p) { + return false + } + + clipped := q.iter.IndexCell().findByShapeID(q.index.idForShape(shape)) + if clipped == nil { + return false + } + return q.shapeContains(clipped, q.iter.Center(), p) +} + +// shapeVisitorFunc is a type of function that can be called against shaped in an index. +type shapeVisitorFunc func(shape Shape) bool + +// visitContainingShapes visits all shapes in the given index that contain the +// given point p, terminating early if the given visitor function returns false, +// in which case visitContainingShapes returns false. Each shape is +// visited at most once. +func (q *ContainsPointQuery) visitContainingShapes(p Point, f shapeVisitorFunc) bool { + // This function returns false only if the algorithm terminates early + // because the visitor function returned false. + if !q.iter.LocatePoint(p) { + return true + } + + cell := q.iter.IndexCell() + for _, clipped := range cell.shapes { + if q.shapeContains(clipped, q.iter.Center(), p) && + !f(q.index.Shape(clipped.shapeID)) { + return false + } + } + return true +} + +// ContainingShapes returns a slice of all shapes that contain the given point. +func (q *ContainsPointQuery) ContainingShapes(p Point) []Shape { + var shapes []Shape + q.visitContainingShapes(p, func(shape Shape) bool { + shapes = append(shapes, shape) + return true + }) + return shapes +} + +// TODO(roberts): Remaining methods from C++ +// type edgeVisitorFunc func(shape ShapeEdge) bool +// func (q *ContainsPointQuery) visitIncidentEdges(p Point, v edgeVisitorFunc) bool diff --git a/vendor/github.com/golang/geo/s2/contains_vertex_query.go b/vendor/github.com/golang/geo/s2/contains_vertex_query.go new file mode 100644 index 000000000..8e74f9e5b --- /dev/null +++ b/vendor/github.com/golang/geo/s2/contains_vertex_query.go @@ -0,0 +1,63 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// ContainsVertexQuery is used to track the edges entering and leaving the +// given vertex of a Polygon in order to be able to determine if the point is +// contained by the Polygon. +// +// Point containment is defined according to the semi-open boundary model +// which means that if several polygons tile the region around a vertex, +// then exactly one of those polygons contains that vertex. +type ContainsVertexQuery struct { + target Point + edgeMap map[Point]int +} + +// NewContainsVertexQuery returns a new query for the given vertex whose +// containment will be determined. +func NewContainsVertexQuery(target Point) *ContainsVertexQuery { + return &ContainsVertexQuery{ + target: target, + edgeMap: make(map[Point]int), + } +} + +// AddEdge adds the edge between target and v with the given direction. +// (+1 = outgoing, -1 = incoming, 0 = degenerate). +func (q *ContainsVertexQuery) AddEdge(v Point, direction int) { + q.edgeMap[v] += direction +} + +// ContainsVertex reports a +1 if the target vertex is contained, -1 if it is +// not contained, and 0 if the incident edges consisted of matched sibling pairs. +func (q *ContainsVertexQuery) ContainsVertex() int { + // Find the unmatched edge that is immediately clockwise from Ortho(P). + referenceDir := Point{q.target.Ortho()} + + bestPoint := referenceDir + bestDir := 0 + + for k, v := range q.edgeMap { + if v == 0 { + continue // This is a "matched" edge. + } + if OrderedCCW(referenceDir, bestPoint, k, q.target) { + bestPoint = k + bestDir = v + } + } + return bestDir +} diff --git a/vendor/github.com/golang/geo/s2/convex_hull_query.go b/vendor/github.com/golang/geo/s2/convex_hull_query.go new file mode 100644 index 000000000..d1e79d0c1 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/convex_hull_query.go @@ -0,0 +1,239 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "sort" +) + +// ConvexHullQuery builds the convex hull of any collection of points, +// polylines, loops, and polygons. It returns a single convex loop. +// +// The convex hull is defined as the smallest convex region on the sphere that +// contains all of your input geometry. Recall that a region is "convex" if +// for every pair of points inside the region, the straight edge between them +// is also inside the region. In our case, a "straight" edge is a geodesic, +// i.e. the shortest path on the sphere between two points. +// +// Containment of input geometry is defined as follows: +// +// - Each input loop and polygon is contained by the convex hull exactly +// (i.e., according to Polygon's Contains(Polygon)). +// +// - Each input point is either contained by the convex hull or is a vertex +// of the convex hull. (Recall that S2Loops do not necessarily contain their +// vertices.) +// +// - For each input polyline, the convex hull contains all of its vertices +// according to the rule for points above. (The definition of convexity +// then ensures that the convex hull also contains the polyline edges.) +// +// To use this type, call the various Add... methods to add your input geometry, and +// then call ConvexHull. Note that ConvexHull does *not* reset the +// state; you can continue adding geometry if desired and compute the convex +// hull again. If you want to start from scratch, simply create a new +// ConvexHullQuery value. +// +// This implement Andrew's monotone chain algorithm, which is a variant of the +// Graham scan (see https://en.wikipedia.org/wiki/Graham_scan). The time +// complexity is O(n log n), and the space required is O(n). In fact only the +// call to "sort" takes O(n log n) time; the rest of the algorithm is linear. +// +// Demonstration of the algorithm and code: +// en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain +// +// This type is not safe for concurrent use. +type ConvexHullQuery struct { + bound Rect + points []Point +} + +// NewConvexHullQuery creates a new ConvexHullQuery. +func NewConvexHullQuery() *ConvexHullQuery { + return &ConvexHullQuery{ + bound: EmptyRect(), + } +} + +// AddPoint adds the given point to the input geometry. +func (q *ConvexHullQuery) AddPoint(p Point) { + q.bound = q.bound.AddPoint(LatLngFromPoint(p)) + q.points = append(q.points, p) +} + +// AddPolyline adds the given polyline to the input geometry. +func (q *ConvexHullQuery) AddPolyline(p *Polyline) { + q.bound = q.bound.Union(p.RectBound()) + q.points = append(q.points, (*p)...) +} + +// AddLoop adds the given loop to the input geometry. +func (q *ConvexHullQuery) AddLoop(l *Loop) { + q.bound = q.bound.Union(l.RectBound()) + if l.isEmptyOrFull() { + return + } + q.points = append(q.points, l.vertices...) +} + +// AddPolygon adds the given polygon to the input geometry. +func (q *ConvexHullQuery) AddPolygon(p *Polygon) { + q.bound = q.bound.Union(p.RectBound()) + for _, l := range p.loops { + // Only loops at depth 0 can contribute to the convex hull. + if l.depth == 0 { + q.AddLoop(l) + } + } +} + +// CapBound returns a bounding cap for the input geometry provided. +// +// Note that this method does not clear the geometry; you can continue +// adding to it and call this method again if desired. +func (q *ConvexHullQuery) CapBound() Cap { + // We keep track of a rectangular bound rather than a spherical cap because + // it is easy to compute a tight bound for a union of rectangles, whereas it + // is quite difficult to compute a tight bound around a union of caps. + // Also, polygons and polylines implement CapBound() in terms of + // RectBound() for this same reason, so it is much better to keep track + // of a rectangular bound as we go along and convert it at the end. + // + // TODO(roberts): We could compute an optimal bound by implementing Welzl's + // algorithm. However we would still need to have special handling of loops + // and polygons, since if a loop spans more than 180 degrees in any + // direction (i.e., if it contains two antipodal points), then it is not + // enough just to bound its vertices. In this case the only convex bounding + // cap is FullCap(), and the only convex bounding loop is the full loop. + return q.bound.CapBound() +} + +// ConvexHull returns a Loop representing the convex hull of the input geometry provided. +// +// If there is no geometry, this method returns an empty loop containing no +// points. +// +// If the geometry spans more than half of the sphere, this method returns a +// full loop containing the entire sphere. +// +// If the geometry contains 1 or 2 points, or a single edge, this method +// returns a very small loop consisting of three vertices (which are a +// superset of the input vertices). +// +// Note that this method does not clear the geometry; you can continue +// adding to the query and call this method again. +func (q *ConvexHullQuery) ConvexHull() *Loop { + c := q.CapBound() + if c.Height() >= 1 { + // The bounding cap is not convex. The current bounding cap + // implementation is not optimal, but nevertheless it is likely that the + // input geometry itself is not contained by any convex polygon. In any + // case, we need a convex bounding cap to proceed with the algorithm below + // (in order to construct a point "origin" that is definitely outside the + // convex hull). + return FullLoop() + } + + // Remove duplicates. We need to do this before checking whether there are + // fewer than 3 points. + x := make(map[Point]bool) + r, w := 0, 0 // read/write indexes + for ; r < len(q.points); r++ { + if x[q.points[r]] { + continue + } + q.points[w] = q.points[r] + x[q.points[r]] = true + w++ + } + q.points = q.points[:w] + + // This code implements Andrew's monotone chain algorithm, which is a simple + // variant of the Graham scan. Rather than sorting by x-coordinate, instead + // we sort the points in CCW order around an origin O such that all points + // are guaranteed to be on one side of some geodesic through O. This + // ensures that as we scan through the points, each new point can only + // belong at the end of the chain (i.e., the chain is monotone in terms of + // the angle around O from the starting point). + origin := Point{c.Center().Ortho()} + sort.Slice(q.points, func(i, j int) bool { + return RobustSign(origin, q.points[i], q.points[j]) == CounterClockwise + }) + + // Special cases for fewer than 3 points. + switch len(q.points) { + case 0: + return EmptyLoop() + case 1: + return singlePointLoop(q.points[0]) + case 2: + return singleEdgeLoop(q.points[0], q.points[1]) + } + + // Generate the lower and upper halves of the convex hull. Each half + // consists of the maximal subset of vertices such that the edge chain + // makes only left (CCW) turns. + lower := q.monotoneChain() + + // reverse the points + for left, right := 0, len(q.points)-1; left < right; left, right = left+1, right-1 { + q.points[left], q.points[right] = q.points[right], q.points[left] + } + upper := q.monotoneChain() + + // Remove the duplicate vertices and combine the chains. + lower = lower[:len(lower)-1] + upper = upper[:len(upper)-1] + lower = append(lower, upper...) + + return LoopFromPoints(lower) +} + +// monotoneChain iterates through the points, selecting the maximal subset of points +// such that the edge chain makes only left (CCW) turns. +func (q *ConvexHullQuery) monotoneChain() []Point { + var output []Point + for _, p := range q.points { + // Remove any points that would cause the chain to make a clockwise turn. + for len(output) >= 2 && RobustSign(output[len(output)-2], output[len(output)-1], p) != CounterClockwise { + output = output[:len(output)-1] + } + output = append(output, p) + } + return output +} + +// singlePointLoop constructs a 3-vertex polygon consisting of "p" and two nearby +// vertices. Note that ContainsPoint(p) may be false for the resulting loop. +func singlePointLoop(p Point) *Loop { + const offset = 1e-15 + d0 := p.Ortho() + d1 := p.Cross(d0) + vertices := []Point{ + p, + {p.Add(d0.Mul(offset)).Normalize()}, + {p.Add(d1.Mul(offset)).Normalize()}, + } + return LoopFromPoints(vertices) +} + +// singleEdgeLoop constructs a loop consisting of the two vertices and their midpoint. +func singleEdgeLoop(a, b Point) *Loop { + vertices := []Point{a, b, {a.Add(b.Vector).Normalize()}} + loop := LoopFromPoints(vertices) + // The resulting loop may be clockwise, so invert it if necessary. + loop.Normalize() + return loop +} diff --git a/vendor/github.com/golang/geo/s2/crossing_edge_query.go b/vendor/github.com/golang/geo/s2/crossing_edge_query.go new file mode 100644 index 000000000..51852dab4 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/crossing_edge_query.go @@ -0,0 +1,409 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "sort" + + "github.com/golang/geo/r2" +) + +// CrossingEdgeQuery is used to find the Edge IDs of Shapes that are crossed by +// a given edge(s). +// +// Note that if you need to query many edges, it is more efficient to declare +// a single CrossingEdgeQuery instance and reuse it. +// +// If you want to find *all* the pairs of crossing edges, it is more efficient to +// use the not yet implemented VisitCrossings in shapeutil. +type CrossingEdgeQuery struct { + index *ShapeIndex + + // temporary values used while processing a query. + a, b r2.Point + iter *ShapeIndexIterator + + // candidate cells generated when finding crossings. + cells []*ShapeIndexCell +} + +// NewCrossingEdgeQuery creates a CrossingEdgeQuery for the given index. +func NewCrossingEdgeQuery(index *ShapeIndex) *CrossingEdgeQuery { + c := &CrossingEdgeQuery{ + index: index, + iter: index.Iterator(), + } + return c +} + +// Crossings returns the set of edge of the shape S that intersect the given edge AB. +// If the CrossingType is Interior, then only intersections at a point interior to both +// edges are reported, while if it is CrossingTypeAll then edges that share a vertex +// are also reported. +func (c *CrossingEdgeQuery) Crossings(a, b Point, shape Shape, crossType CrossingType) []int { + edges := c.candidates(a, b, shape) + if len(edges) == 0 { + return nil + } + + crosser := NewEdgeCrosser(a, b) + out := 0 + n := len(edges) + + for in := 0; in < n; in++ { + b := shape.Edge(edges[in]) + sign := crosser.CrossingSign(b.V0, b.V1) + if crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross) || crossType != CrossingTypeAll && sign == Cross { + edges[out] = edges[in] + out++ + } + } + + if out < n { + edges = edges[0:out] + } + return edges +} + +// EdgeMap stores a sorted set of edge ids for each shape. +type EdgeMap map[Shape][]int + +// CrossingsEdgeMap returns the set of all edges in the index that intersect the given +// edge AB. If crossType is CrossingTypeInterior, then only intersections at a +// point interior to both edges are reported, while if it is CrossingTypeAll +// then edges that share a vertex are also reported. +// +// The edges are returned as a mapping from shape to the edges of that shape +// that intersect AB. Every returned shape has at least one crossing edge. +func (c *CrossingEdgeQuery) CrossingsEdgeMap(a, b Point, crossType CrossingType) EdgeMap { + edgeMap := c.candidatesEdgeMap(a, b) + if len(edgeMap) == 0 { + return nil + } + + crosser := NewEdgeCrosser(a, b) + for shape, edges := range edgeMap { + out := 0 + n := len(edges) + for in := 0; in < n; in++ { + edge := shape.Edge(edges[in]) + sign := crosser.CrossingSign(edge.V0, edge.V1) + if (crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross)) || (crossType != CrossingTypeAll && sign == Cross) { + edgeMap[shape][out] = edges[in] + out++ + } + } + + if out == 0 { + delete(edgeMap, shape) + } else { + if out < n { + edgeMap[shape] = edgeMap[shape][0:out] + } + } + } + return edgeMap +} + +// candidates returns a superset of the edges of the given shape that intersect +// the edge AB. +func (c *CrossingEdgeQuery) candidates(a, b Point, shape Shape) []int { + var edges []int + + // For small loops it is faster to use brute force. The threshold below was + // determined using benchmarks. + const maxBruteForceEdges = 27 + maxEdges := shape.NumEdges() + if maxEdges <= maxBruteForceEdges { + edges = make([]int, maxEdges) + for i := 0; i < maxEdges; i++ { + edges[i] = i + } + return edges + } + + // Compute the set of index cells intersected by the query edge. + c.getCellsForEdge(a, b) + if len(c.cells) == 0 { + return nil + } + + // Gather all the edges that intersect those cells and sort them. + // TODO(roberts): Shapes don't track their ID, so we need to range over + // the index to find the ID manually. + var shapeID int32 + for k, v := range c.index.shapes { + if v == shape { + shapeID = k + } + } + + for _, cell := range c.cells { + if cell == nil { + continue + } + clipped := cell.findByShapeID(shapeID) + if clipped == nil { + continue + } + edges = append(edges, clipped.edges...) + } + + if len(c.cells) > 1 { + edges = uniqueInts(edges) + } + + return edges +} + +// uniqueInts returns the sorted uniqued values from the given input. +func uniqueInts(in []int) []int { + var edges []int + m := make(map[int]bool) + for _, i := range in { + if m[i] { + continue + } + m[i] = true + edges = append(edges, i) + } + sort.Ints(edges) + return edges +} + +// candidatesEdgeMap returns a map from shapes to the superse of edges for that +// shape that intersect the edge AB. +// +// CAVEAT: This method may return shapes that have an empty set of candidate edges. +// However the return value is non-empty only if at least one shape has a candidate edge. +func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap { + edgeMap := make(EdgeMap) + + // If there are only a few edges then it's faster to use brute force. We + // only bother with this optimization when there is a single shape. + if len(c.index.shapes) == 1 { + // Typically this method is called many times, so it is worth checking + // whether the edge map is empty or already consists of a single entry for + // this shape, and skip clearing edge map in that case. + shape := c.index.Shape(0) + + // Note that we leave the edge map non-empty even if there are no candidates + // (i.e., there is a single entry with an empty set of edges). + edgeMap[shape] = c.candidates(a, b, shape) + return edgeMap + } + + // Compute the set of index cells intersected by the query edge. + c.getCellsForEdge(a, b) + if len(c.cells) == 0 { + return edgeMap + } + + // Gather all the edges that intersect those cells and sort them. + for _, cell := range c.cells { + for _, clipped := range cell.shapes { + s := c.index.Shape(clipped.shapeID) + for j := 0; j < clipped.numEdges(); j++ { + edgeMap[s] = append(edgeMap[s], clipped.edges[j]) + } + } + } + + if len(c.cells) > 1 { + for s, edges := range edgeMap { + edgeMap[s] = uniqueInts(edges) + } + } + + return edgeMap +} + +// getCells returns the set of ShapeIndexCells that might contain edges intersecting +// the edge AB in the given cell root. This method is used primarily by loop and shapeutil. +func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell { + aUV, bUV, ok := ClipToFace(a, b, root.id.Face()) + if ok { + c.a = aUV + c.b = bUV + edgeBound := r2.RectFromPoints(c.a, c.b) + if root.Bound().Intersects(edgeBound) { + c.computeCellsIntersected(root, edgeBound) + } + } + + if len(c.cells) == 0 { + return nil + } + + return c.cells +} + +// getCellsForEdge populates the cells field to the set of index cells intersected by an edge AB. +func (c *CrossingEdgeQuery) getCellsForEdge(a, b Point) { + c.cells = nil + + segments := FaceSegments(a, b) + for _, segment := range segments { + c.a = segment.a + c.b = segment.b + + // Optimization: rather than always starting the recursive subdivision at + // the top level face cell, instead we start at the smallest S2CellId that + // contains the edge (the edge root cell). This typically lets us skip + // quite a few levels of recursion since most edges are short. + edgeBound := r2.RectFromPoints(c.a, c.b) + pcell := PaddedCellFromCellID(CellIDFromFace(segment.face), 0) + edgeRoot := pcell.ShrinkToFit(edgeBound) + + // Now we need to determine how the edge root cell is related to the cells + // in the spatial index (cellMap). There are three cases: + // + // 1. edgeRoot is an index cell or is contained within an index cell. + // In this case we only need to look at the contents of that cell. + // 2. edgeRoot is subdivided into one or more index cells. In this case + // we recursively subdivide to find the cells intersected by AB. + // 3. edgeRoot does not intersect any index cells. In this case there + // is nothing to do. + relation := c.iter.LocateCellID(edgeRoot) + if relation == Indexed { + // edgeRoot is an index cell or is contained by an index cell (case 1). + c.cells = append(c.cells, c.iter.IndexCell()) + } else if relation == Subdivided { + // edgeRoot is subdivided into one or more index cells (case 2). We + // find the cells intersected by AB using recursive subdivision. + if !edgeRoot.isFace() { + pcell = PaddedCellFromCellID(edgeRoot, 0) + } + c.computeCellsIntersected(pcell, edgeBound) + } + } +} + +// computeCellsIntersected computes the index cells intersected by the current +// edge that are descendants of pcell and adds them to this queries set of cells. +func (c *CrossingEdgeQuery) computeCellsIntersected(pcell *PaddedCell, edgeBound r2.Rect) { + + c.iter.seek(pcell.id.RangeMin()) + if c.iter.Done() || c.iter.CellID() > pcell.id.RangeMax() { + // The index does not contain pcell or any of its descendants. + return + } + if c.iter.CellID() == pcell.id { + // The index contains this cell exactly. + c.cells = append(c.cells, c.iter.IndexCell()) + return + } + + // Otherwise, split the edge among the four children of pcell. + center := pcell.Middle().Lo() + + if edgeBound.X.Hi < center.X { + // Edge is entirely contained in the two left children. + c.clipVAxis(edgeBound, center.Y, 0, pcell) + return + } else if edgeBound.X.Lo >= center.X { + // Edge is entirely contained in the two right children. + c.clipVAxis(edgeBound, center.Y, 1, pcell) + return + } + + childBounds := c.splitUBound(edgeBound, center.X) + if edgeBound.Y.Hi < center.Y { + // Edge is entirely contained in the two lower children. + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 0), childBounds[0]) + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 0), childBounds[1]) + } else if edgeBound.Y.Lo >= center.Y { + // Edge is entirely contained in the two upper children. + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 1), childBounds[0]) + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 1), childBounds[1]) + } else { + // The edge bound spans all four children. The edge itself intersects + // at most three children (since no padding is being used). + c.clipVAxis(childBounds[0], center.Y, 0, pcell) + c.clipVAxis(childBounds[1], center.Y, 1, pcell) + } +} + +// clipVAxis computes the intersected cells recursively for a given padded cell. +// Given either the left (i=0) or right (i=1) side of a padded cell pcell, +// determine whether the current edge intersects the lower child, upper child, +// or both children, and call c.computeCellsIntersected recursively on those children. +// The center is the v-coordinate at the center of pcell. +func (c *CrossingEdgeQuery) clipVAxis(edgeBound r2.Rect, center float64, i int, pcell *PaddedCell) { + if edgeBound.Y.Hi < center { + // Edge is entirely contained in the lower child. + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), edgeBound) + } else if edgeBound.Y.Lo >= center { + // Edge is entirely contained in the upper child. + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), edgeBound) + } else { + // The edge intersects both children. + childBounds := c.splitVBound(edgeBound, center) + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), childBounds[0]) + c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), childBounds[1]) + } +} + +// splitUBound returns the bound for two children as a result of spliting the +// current edge at the given value U. +func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect { + v := edgeBound.Y.ClampPoint(interpolateFloat64(u, c.a.X, c.b.X, c.a.Y, c.b.Y)) + // diag indicates which diagonal of the bounding box is spanned by AB: + // it is 0 if AB has positive slope, and 1 if AB has negative slope. + var diag int + if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) { + diag = 1 + } + return splitBound(edgeBound, 0, diag, u, v) +} + +// splitVBound returns the bound for two children as a result of spliting the +// current edge into two child edges at the given value V. +func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect { + u := edgeBound.X.ClampPoint(interpolateFloat64(v, c.a.Y, c.b.Y, c.a.X, c.b.X)) + var diag int + if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) { + diag = 1 + } + return splitBound(edgeBound, diag, 0, u, v) +} + +// splitBound returns the bounds for the two childrenn as a result of spliting +// the current edge into two child edges at the given point (u,v). uEnd and vEnd +// indicate which bound endpoints of the first child will be updated. +func splitBound(edgeBound r2.Rect, uEnd, vEnd int, u, v float64) [2]r2.Rect { + var childBounds = [2]r2.Rect{ + edgeBound, + edgeBound, + } + + if uEnd == 1 { + childBounds[0].X.Lo = u + childBounds[1].X.Hi = u + } else { + childBounds[0].X.Hi = u + childBounds[1].X.Lo = u + } + + if vEnd == 1 { + childBounds[0].Y.Lo = v + childBounds[1].Y.Hi = v + } else { + childBounds[0].Y.Hi = v + childBounds[1].Y.Lo = v + } + + return childBounds +} diff --git a/vendor/github.com/golang/geo/s2/distance_target.go b/vendor/github.com/golang/geo/s2/distance_target.go new file mode 100644 index 000000000..066bbacfa --- /dev/null +++ b/vendor/github.com/golang/geo/s2/distance_target.go @@ -0,0 +1,149 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "github.com/golang/geo/s1" +) + +// The distance interface represents a set of common methods used by algorithms +// that compute distances between various S2 types. +type distance interface { + // chordAngle returns this type as a ChordAngle. + chordAngle() s1.ChordAngle + + // fromChordAngle is used to type convert a ChordAngle to this type. + // This is to work around needing to be clever in parts of the code + // where a distanceTarget interface method expects distances, but the + // user only supplies a ChordAngle, and we need to dynamically cast it + // to an appropriate distance interface types. + fromChordAngle(o s1.ChordAngle) distance + + // zero returns a zero distance. + zero() distance + // negative returns a value smaller than any valid value. + negative() distance + // infinity returns a value larger than any valid value. + infinity() distance + + // less is similar to the Less method in Sort. To get minimum values, + // this would be a less than type operation. For maximum, this would + // be a greater than type operation. + less(other distance) bool + + // sub subtracts the other value from this one and returns the new value. + // This is done as a method and not simple mathematical operation to + // allow closest and furthest to implement this in opposite ways. + sub(other distance) distance + + // chordAngleBound reports the upper bound on a ChordAngle corresponding + // to this distance. For example, if distance measures WGS84 ellipsoid + // distance then the corresponding angle needs to be 0.56% larger. + chordAngleBound() s1.ChordAngle + + // updateDistance may update the value this distance represents + // based on the given input. The updated value and a boolean reporting + // if the value was changed are returned. + updateDistance(other distance) (distance, bool) +} + +// distanceTarget is an interface that represents a geometric type to which distances +// are measured. +// +// For example, there are implementations that measure distances to a Point, +// an Edge, a Cell, a CellUnion, and even to an arbitrary collection of geometry +// stored in ShapeIndex. +// +// The distanceTarget types are provided for the benefit of types that measure +// distances and/or find nearby geometry, such as ClosestEdgeQuery, FurthestEdgeQuery, +// ClosestPointQuery, and ClosestCellQuery, etc. +type distanceTarget interface { + // capBound returns a Cap that bounds the set of points whose distance to the + // target is distance.zero(). + capBound() Cap + + // updateDistanceToPoint updates the distance if the distance to + // the point P is within than the given dist. + // The boolean reports if the value was updated. + updateDistanceToPoint(p Point, dist distance) (distance, bool) + + // updateDistanceToEdge updates the distance if the distance to + // the edge E is within than the given dist. + // The boolean reports if the value was updated. + updateDistanceToEdge(e Edge, dist distance) (distance, bool) + + // updateDistanceToCell updates the distance if the distance to the cell C + // (including its interior) is within than the given dist. + // The boolean reports if the value was updated. + updateDistanceToCell(c Cell, dist distance) (distance, bool) + + // setMaxError potentially updates the value of MaxError, and reports if + // the specific type supports altering it. Whenever one of the + // updateDistanceTo... methods above returns true, the returned distance + // is allowed to be up to maxError larger than the true minimum distance. + // In other words, it gives this target object permission to terminate its + // distance calculation as soon as it has determined that (1) the minimum + // distance is less than minDist and (2) the best possible further + // improvement is less than maxError. + // + // If the target takes advantage of maxError to optimize its distance + // calculation, this method must return true. (Most target types will + // default to return false.) + setMaxError(maxErr s1.ChordAngle) bool + + // maxBruteForceIndexSize reports the maximum number of indexed objects for + // which it is faster to compute the distance by brute force (e.g., by testing + // every edge) rather than by using an index. + // + // The following method is provided as a convenience for types that compute + // distances to a collection of indexed geometry, such as ClosestEdgeQuery + // and ClosestPointQuery. + // + // Types that do not support this should return a -1. + maxBruteForceIndexSize() int + + // distance returns an instance of the underlying distance type this + // target uses. This is to work around the use of Templates in the C++. + distance() distance + + // visitContainingShapes finds all polygons in the given index that + // completely contain a connected component of the target geometry. (For + // example, if the target consists of 10 points, this method finds + // polygons that contain any of those 10 points.) For each such polygon, + // the visit function is called with the Shape of the polygon along with + // a point of the target geometry that is contained by that polygon. + // + // Optionally, any polygon that intersects the target geometry may also be + // returned. In other words, this method returns all polygons that + // contain any connected component of the target, along with an arbitrary + // subset of the polygons that intersect the target. + // + // For example, suppose that the index contains two abutting polygons + // A and B. If the target consists of two points "a" contained by A and + // "b" contained by B, then both A and B are returned. But if the target + // consists of the edge "ab", then any subset of {A, B} could be returned + // (because both polygons intersect the target but neither one contains + // the edge "ab"). + // + // If the visit function returns false, this method terminates early and + // returns false as well. Otherwise returns true. + // + // NOTE(roberts): This method exists only for the purpose of implementing + // edgeQuery IncludeInteriors efficiently. + visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool +} + +// shapePointVisitorFunc defines a type of function the visitContainingShapes can call. +type shapePointVisitorFunc func(containingShape Shape, targetPoint Point) bool diff --git a/vendor/github.com/golang/geo/s2/doc.go b/vendor/github.com/golang/geo/s2/doc.go new file mode 100644 index 000000000..43e7a6344 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/doc.go @@ -0,0 +1,29 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package s2 is a library for working with geometry in S² (spherical geometry). + +Its related packages, parallel to this one, are s1 (operates on S¹), r1 (operates on ℝ¹), +r2 (operates on ℝ²) and r3 (operates on ℝ³). + +This package provides types and functions for the S2 cell hierarchy and coordinate systems. +The S2 cell hierarchy is a hierarchical decomposition of the surface of a unit sphere (S²) +into ``cells''; it is highly efficient, scales from continental size to under 1 cm² +and preserves spatial locality (nearby cells have close IDs). + +More information including an in-depth introduction to S2 can be found on the +S2 website https://s2geometry.io/ +*/ +package s2 diff --git a/vendor/github.com/golang/geo/s2/edge_clipping.go b/vendor/github.com/golang/geo/s2/edge_clipping.go new file mode 100644 index 000000000..57a53bf0f --- /dev/null +++ b/vendor/github.com/golang/geo/s2/edge_clipping.go @@ -0,0 +1,672 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// This file contains a collection of methods for: +// +// (1) Robustly clipping geodesic edges to the faces of the S2 biunit cube +// (see s2stuv), and +// +// (2) Robustly clipping 2D edges against 2D rectangles. +// +// These functions can be used to efficiently find the set of CellIDs that +// are intersected by a geodesic edge (e.g., see CrossingEdgeQuery). + +import ( + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" + "github.com/golang/geo/r3" +) + +const ( + // edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate + // compared to the exact result, assuming that the points A and B are in + // the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less). + edgeClipErrorUVCoord = 2.25 * dblEpsilon + + // edgeClipErrorUVDist is the maximum distance from a clipped point to + // the corresponding exact result. It is equal to the error in a single + // coordinate because at most one coordinate is subject to error. + edgeClipErrorUVDist = 2.25 * dblEpsilon + + // faceClipErrorRadians is the maximum angle between a returned vertex + // and the nearest point on the exact edge AB. It is equal to the + // maximum directional error in PointCross, plus the error when + // projecting points onto a cube face. + faceClipErrorRadians = 3 * dblEpsilon + + // faceClipErrorDist is the same angle expressed as a maximum distance + // in (u,v)-space. In other words, a returned vertex is at most this far + // from the exact edge AB projected into (u,v)-space. + faceClipErrorUVDist = 9 * dblEpsilon + + // faceClipErrorUVCoord is the maximum angle between a returned vertex + // and the nearest point on the exact edge AB expressed as the maximum error + // in an individual u- or v-coordinate. In other words, for each + // returned vertex there is a point on the exact edge AB whose u- and + // v-coordinates differ from the vertex by at most this amount. + faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon + + // intersectsRectErrorUVDist is the maximum error when computing if a point + // intersects with a given Rect. If some point of AB is inside the + // rectangle by at least this distance, the result is guaranteed to be true; + // if all points of AB are outside the rectangle by at least this distance, + // the result is guaranteed to be false. This bound assumes that rect is + // a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it + // (e.g., by 1e-10 or less). + intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon +) + +// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that +// intersects the given face, or false if the edge AB does not intersect. +// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1] +// cube face rectangle and are within faceClipErrorUVDist of the line AB, but +// the results may differ from those produced by FaceSegments. +func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) { + return ClipToPaddedFace(a, b, face, 0.0) +} + +// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that +// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1] +// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding). +// Padding must be non-negative. +func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) { + // Fast path: both endpoints are on the given face. + if face(a.Vector) == f && face(b.Vector) == f { + au, av := validFaceXYZToUV(f, a.Vector) + bu, bv := validFaceXYZToUV(f, b.Vector) + return r2.Point{au, av}, r2.Point{bu, bv}, true + } + + // Convert everything into the (u,v,w) coordinates of the given face. Note + // that the cross product *must* be computed in the original (x,y,z) + // coordinate system because PointCross (unlike the mathematical cross + // product) can produce different results in different coordinate systems + // when one argument is a linear multiple of the other, due to the use of + // symbolic perturbations. + normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b))) + aUVW := pointUVW(faceXYZtoUVW(f, a)) + bUVW := pointUVW(faceXYZtoUVW(f, b)) + + // Padding is handled by scaling the u- and v-components of the normal. + // Letting R=1+padding, this means that when we compute the dot product of + // the normal with a cube face vertex (such as (-1,-1,1)), we will actually + // compute the dot product with the scaled vertex (-R,-R,1). This allows + // methods such as intersectsFace, exitAxis, etc, to handle padding + // with no further modifications. + scaleUV := 1 + padding + scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}} + if !scaledN.intersectsFace() { + return aUV, bUV, false + } + + // TODO(roberts): This is a workaround for extremely small vectors where some + // loss of precision can occur in Normalize causing underflow. When PointCross + // is updated to work around this, this can be removed. + if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) { + normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))} + } + + normUVW = pointUVW{normUVW.Normalize()} + + aTan := pointUVW{normUVW.Cross(aUVW.Vector)} + bTan := pointUVW{bUVW.Cross(normUVW.Vector)} + + // As described in clipDestination, if the sum of the scores from clipping the two + // endpoints is 3 or more, then the segment does not intersect this face. + aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV) + bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV) + + return aUV, bUV, aScore+bScore < 3 +} + +// ClipEdge returns the portion of the edge defined by AB that is contained by the +// given rectangle. If there is no intersection, false is returned and aClip and bClip +// are undefined. +func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) { + // Compute the bounding rectangle of AB, clip it, and then extract the new + // endpoints from the clipped bound. + bound := r2.RectFromPoints(a, b) + if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects { + return aClip, bClip, false + } + ai := 0 + if a.X > b.X { + ai = 1 + } + aj := 0 + if a.Y > b.Y { + aj = 1 + } + + return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true +} + +// The three functions below (sumEqual, intersectsFace, intersectsOppositeEdges) +// all compare a sum (u + v) to a third value w. They are implemented in such a +// way that they produce an exact result even though all calculations are done +// with ordinary floating-point operations. Here are the principles on which these +// functions are based: +// +// A. If u + v < w in floating-point, then u + v < w in exact arithmetic. +// +// B. If u + v < w in exact arithmetic, then at least one of the following +// expressions is true in floating-point: +// u + v < w +// u < w - v +// v < w - u +// +// Proof: By rearranging terms and substituting ">" for "<", we can assume +// that all values are non-negative. Now clearly "w" is not the smallest +// value, so assume WLOG that "u" is the smallest. We want to show that +// u < w - v in floating-point. If v >= w/2, the calculation of w - v is +// exact since the result is smaller in magnitude than either input value, +// so the result holds. Otherwise we have u <= v < w/2 and w - v >= w/2 +// (even in floating point), so the result also holds. + +// sumEqual reports whether u + v == w exactly. +func sumEqual(u, v, w float64) bool { + return (u+v == w) && (u == w-v) && (v == w-u) +} + +// pointUVW represents a Point in (u,v,w) coordinate space of a cube face. +type pointUVW Point + +// intersectsFace reports whether a given directed line L intersects the cube face F. +// The line L is defined by its normal N in the (u,v,w) coordinates of F. +func (p pointUVW) intersectsFace() bool { + // L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot + // products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1), + // and (-1,1,1) do not all have the same sign. This is true exactly when + // |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly. + u := math.Abs(p.X) + v := math.Abs(p.Y) + w := math.Abs(p.Z) + + // We only need to consider the cases where u or v is the smallest value, + // since if w is the smallest then both expressions below will have a + // positive LHS and a negative RHS. + return (v >= w-u) && (u >= w-v) +} + +// intersectsOppositeEdges reports whether a directed line L intersects two +// opposite edges of a cube face F. This includs the case where L passes +// exactly through a corner vertex of F. The directed line L is defined +// by its normal N in the (u,v,w) coordinates of F. +func (p pointUVW) intersectsOppositeEdges() bool { + // The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if + // and only exactly two of the corner vertices lie on each side of L. This + // is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this + // expression exactly. + u := math.Abs(p.X) + v := math.Abs(p.Y) + w := math.Abs(p.Z) + + // If w is the smallest, the following line returns an exact result. + if math.Abs(u-v) != w { + return math.Abs(u-v) >= w + } + + // Otherwise u - v = w exactly, or w is not the smallest value. In either + // case the following returns the correct result. + if u >= v { + return u-w >= v + } + return v-w >= u +} + +// axis represents the possible results of exitAxis. +type axis int + +const ( + axisU axis = iota + axisV +) + +// exitAxis reports which axis the directed line L exits the cube face F on. +// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates +// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits +// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly +// through a corner vertex of the cube face. +func (p pointUVW) exitAxis() axis { + if p.intersectsOppositeEdges() { + // The line passes through through opposite edges of the face. + // It exits through the v=+1 or v=-1 edge if the u-component of N has a + // larger absolute magnitude than the v-component. + if math.Abs(p.X) >= math.Abs(p.Y) { + return axisV + } + return axisU + } + + // The line passes through through two adjacent edges of the face. + // It exits the v=+1 or v=-1 edge if an even number of the components of N + // are negative. We test this using signbit() rather than multiplication + // to avoid the possibility of underflow. + var x, y, z int + if math.Signbit(p.X) { + x = 1 + } + if math.Signbit(p.Y) { + y = 1 + } + if math.Signbit(p.Z) { + z = 1 + } + + if x^y^z == 0 { + return axisV + } + return axisU +} + +// exitPoint returns the UV coordinates of the point where a directed line L (represented +// by the CCW normal of this point), exits the cube face this point is derived from along +// the given axis. +func (p pointUVW) exitPoint(a axis) r2.Point { + if a == axisU { + u := -1.0 + if p.Y > 0 { + u = 1.0 + } + return r2.Point{u, (-u*p.X - p.Z) / p.Y} + } + + v := -1.0 + if p.X < 0 { + v = 1.0 + } + return r2.Point{(-v*p.Y - p.Z) / p.X, v} +} + +// clipDestination returns a score which is used to indicate if the clipped edge AB +// on the given face intersects the face at all. This function returns the score for +// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores +// from both of the endpoints is 3 or more, then edge AB does not intersect this face. +// +// First, it clips the line segment AB to find the clipped destination B' on a given +// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w) +// coordinates of that face.) Second, it partially computes whether the segment AB +// intersects this face at all. The actual condition is fairly complicated, but it +// turns out that it can be expressed as a "score" that can be computed independently +// when clipping the two endpoints A and B. +func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) { + var uv r2.Point + + // Optimization: if B is within the safe region of the face, use it. + maxSafeUVCoord := 1 - faceClipErrorUVCoord + if b.Z > 0 { + uv = r2.Point{b.X / b.Z, b.Y / b.Z} + if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord { + return uv, 0 + } + } + + // Otherwise find the point B' where the line AB exits the face. + uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV) + + p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}}) + + // Determine if the exit point B' is contained within the segment. We do this + // by computing the dot products with two inward-facing tangent vectors at A + // and B. If either dot product is negative, we say that B' is on the "wrong + // side" of that point. As the point B' moves around the great circle AB past + // the segment endpoint B, it is initially on the wrong side of B only; as it + // moves further it is on the wrong side of both endpoints; and then it is on + // the wrong side of A only. If the exit point B' is on the wrong side of + // either endpoint, we can't use it; instead the segment is clipped at the + // original endpoint B. + // + // We reject the segment if the sum of the scores of the two endpoints is 3 + // or more. Here is what that rule encodes: + // - If B' is on the wrong side of A, then the other clipped endpoint A' + // must be in the interior of AB (otherwise AB' would go the wrong way + // around the circle). There is a similar rule for A'. + // - If B' is on the wrong side of either endpoint (and therefore we must + // use the original endpoint B instead), then it must be possible to + // project B onto this face (i.e., its w-coordinate must be positive). + // This rule is only necessary to handle certain zero-length edges (A=B). + score := 0 + if p.Sub(a.Vector).Dot(aTan.Vector) < 0 { + score = 2 // B' is on wrong side of A. + } else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 { + score = 1 // B' is on wrong side of B. + } + + if score > 0 { // B' is not in the interior of AB. + if b.Z <= 0 { + score = 3 // B cannot be projected onto this face. + } else { + uv = r2.Point{b.X / b.Z, b.Y / b.Z} + } + } + + return uv, score +} + +// updateEndpoint returns the interval with the specified endpoint updated to +// the given value. If the value lies beyond the opposite endpoint, nothing is +// changed and false is returned. +func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) { + if !highEndpoint { + if bound.Hi < value { + return bound, false + } + if bound.Lo < value { + bound.Lo = value + } + return bound, true + } + + if bound.Lo > value { + return bound, false + } + if bound.Hi > value { + bound.Hi = value + } + return bound, true +} + +// clipBoundAxis returns the clipped versions of the bounding intervals for the given +// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the +// given clip interval. negSlope is a precomputed helper variable that indicates which +// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope, +// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds, +// false is returned. +func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval, + negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) { + + if bound0.Lo < clip.Lo { + // If the upper bound is below the clips lower bound, there is nothing to do. + if bound0.Hi < clip.Lo { + return bound0, bound1, false + } + // narrow the intervals lower bound to the clip bound. + bound0.Lo = clip.Lo + if bound1, updated = updateEndpoint(bound1, negSlope, interpolateFloat64(clip.Lo, a0, b0, a1, b1)); !updated { + return bound0, bound1, false + } + } + + if bound0.Hi > clip.Hi { + // If the lower bound is above the clips upper bound, there is nothing to do. + if bound0.Lo > clip.Hi { + return bound0, bound1, false + } + // narrow the intervals upper bound to the clip bound. + bound0.Hi = clip.Hi + if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateFloat64(clip.Hi, a0, b0, a1, b1)); !updated { + return bound0, bound1, false + } + } + return bound0, bound1, true +} + +// edgeIntersectsRect reports whether the edge defined by AB intersects the +// given closed rectangle to within the error bound. +func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool { + // First check whether the bounds of a Rect around AB intersects the given rect. + if !r.Intersects(r2.RectFromPoints(a, b)) { + return false + } + + // Otherwise AB intersects the rect if and only if all four vertices of rect + // do not lie on the same side of the extended line AB. We test this by finding + // the two vertices of rect with minimum and maximum projections onto the normal + // of AB, and computing their dot products with the edge normal. + n := b.Sub(a).Ortho() + + i := 0 + if n.X >= 0 { + i = 1 + } + j := 0 + if n.Y >= 0 { + j = 1 + } + + max := n.Dot(r.VertexIJ(i, j).Sub(a)) + min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a)) + + return (max >= 0) && (min <= 0) +} + +// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined +// by AB intersected by clip. The resulting bound may be empty. This is a convenience +// function built on top of clipEdgeBound. +func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect { + bound := r2.RectFromPoints(a, b) + if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects { + return b1 + } + return r2.EmptyRect() +} + +// clipEdgeBound clips an edge AB to sequence of rectangles efficiently. +// It represents the clipped edges by their bounding boxes rather than as a pair of +// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be +// a tight bound of A'B'. This function returns the bound that is a tight bound +// of A'B' intersected with a given rectangle. If A'B' does not intersect clip, +// it returns false and the original bound. +func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) { + // negSlope indicates which diagonal of the bounding box is spanned by AB: it + // is false if AB has positive slope, and true if AB has negative slope. This is + // used to determine which interval endpoints need to be updated each time + // the edge is clipped. + negSlope := (a.X > b.X) != (a.Y > b.Y) + + b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X) + if !up1 { + return bound, false + } + b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y) + if !up2 { + return r2.Rect{b0x, b0y}, false + } + return r2.Rect{X: b1x, Y: b1y}, true +} + +// interpolateFloat64 returns a value with the same combination of a1 and b1 as the +// given value x is of a and b. This function makes the following guarantees: +// - If x == a, then x1 = a1 (exactly). +// - If x == b, then x1 = b1 (exactly). +// - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1). +// This requires a != b. +func interpolateFloat64(x, a, b, a1, b1 float64) float64 { + // To get results that are accurate near both A and B, we interpolate + // starting from the closer of the two points. + if math.Abs(a-x) <= math.Abs(b-x) { + return a1 + (b1-a1)*(x-a)/(b-a) + } + return b1 + (a1-b1)*(x-b)/(a-b) +} + +// FaceSegment represents an edge AB clipped to an S2 cube face. It is +// represented by a face index and a pair of (u,v) coordinates. +type FaceSegment struct { + face int + a, b r2.Point +} + +// FaceSegments subdivides the given edge AB at every point where it crosses the +// boundary between two S2 cube faces and returns the corresponding FaceSegments. +// The segments are returned in order from A toward B. The input points must be +// unit length. +// +// This function guarantees that the returned segments form a continuous path +// from A to B, and that all vertices are within faceClipErrorUVDist of the +// line AB. All vertices lie within the [-1,1]x[-1,1] cube face rectangles. +// The results are consistent with Sign, i.e. the edge is well-defined even its +// endpoints are antipodal. +// TODO(roberts): Extend the implementation of PointCross so that this is true. +func FaceSegments(a, b Point) []FaceSegment { + var segment FaceSegment + + // Fast path: both endpoints are on the same face. + var aFace, bFace int + aFace, segment.a.X, segment.a.Y = xyzToFaceUV(a.Vector) + bFace, segment.b.X, segment.b.Y = xyzToFaceUV(b.Vector) + if aFace == bFace { + segment.face = aFace + return []FaceSegment{segment} + } + + // Starting at A, we follow AB from face to face until we reach the face + // containing B. The following code is designed to ensure that we always + // reach B, even in the presence of numerical errors. + // + // First we compute the normal to the plane containing A and B. This normal + // becomes the ultimate definition of the line AB; it is used to resolve all + // questions regarding where exactly the line goes. Unfortunately due to + // numerical errors, the line may not quite intersect the faces containing + // the original endpoints. We handle this by moving A and/or B slightly if + // necessary so that they are on faces intersected by the line AB. + ab := a.PointCross(b) + + aFace, segment.a = moveOriginToValidFace(aFace, a, ab, segment.a) + bFace, segment.b = moveOriginToValidFace(bFace, b, Point{ab.Mul(-1)}, segment.b) + + // Now we simply follow AB from face to face until we reach B. + var segments []FaceSegment + segment.face = aFace + bSaved := segment.b + + for face := aFace; face != bFace; { + // Complete the current segment by finding the point where AB + // exits the current face. + z := faceXYZtoUVW(face, ab) + n := pointUVW{z.Vector} + + exitAxis := n.exitAxis() + segment.b = n.exitPoint(exitAxis) + segments = append(segments, segment) + + // Compute the next face intersected by AB, and translate the exit + // point of the current segment into the (u,v) coordinates of the + // next face. This becomes the first point of the next segment. + exitXyz := faceUVToXYZ(face, segment.b.X, segment.b.Y) + face = nextFace(face, segment.b, exitAxis, n, bFace) + exitUvw := faceXYZtoUVW(face, Point{exitXyz}) + segment.face = face + segment.a = r2.Point{exitUvw.X, exitUvw.Y} + } + // Finish the last segment. + segment.b = bSaved + return append(segments, segment) +} + +// moveOriginToValidFace updates the origin point to a valid face if necessary. +// Given a line segment AB whose origin A has been projected onto a given cube +// face, determine whether it is necessary to project A onto a different face +// instead. This can happen because the normal of the line AB is not computed +// exactly, so that the line AB (defined as the set of points perpendicular to +// the normal) may not intersect the cube face containing A. Even if it does +// intersect the face, the exit point of the line from that face may be on +// the wrong side of A (i.e., in the direction away from B). If this happens, +// we reproject A onto the adjacent face where the line AB approaches A most +// closely. This moves the origin by a small amount, but never more than the +// error tolerances. +func moveOriginToValidFace(face int, a, ab Point, aUV r2.Point) (int, r2.Point) { + // Fast path: if the origin is sufficiently far inside the face, it is + // always safe to use it. + const maxSafeUVCoord = 1 - faceClipErrorUVCoord + if math.Max(math.Abs((aUV).X), math.Abs((aUV).Y)) <= maxSafeUVCoord { + return face, aUV + } + + // Otherwise check whether the normal AB even intersects this face. + z := faceXYZtoUVW(face, ab) + n := pointUVW{z.Vector} + if n.intersectsFace() { + // Check whether the point where the line AB exits this face is on the + // wrong side of A (by more than the acceptable error tolerance). + uv := n.exitPoint(n.exitAxis()) + exit := faceUVToXYZ(face, uv.X, uv.Y) + aTangent := ab.Normalize().Cross(a.Vector) + + // We can use the given face. + if exit.Sub(a.Vector).Dot(aTangent) >= -faceClipErrorRadians { + return face, aUV + } + } + + // Otherwise we reproject A to the nearest adjacent face. (If line AB does + // not pass through a given face, it must pass through all adjacent faces.) + var dir int + if math.Abs((aUV).X) >= math.Abs((aUV).Y) { + // U-axis + if aUV.X > 0 { + dir = 1 + } + face = uvwFace(face, 0, dir) + } else { + // V-axis + if aUV.Y > 0 { + dir = 1 + } + face = uvwFace(face, 1, dir) + } + + aUV.X, aUV.Y = validFaceXYZToUV(face, a.Vector) + aUV.X = math.Max(-1.0, math.Min(1.0, aUV.X)) + aUV.Y = math.Max(-1.0, math.Min(1.0, aUV.Y)) + + return face, aUV +} + +// nextFace returns the next face that should be visited by FaceSegments, given that +// we have just visited face and we are following the line AB (represented +// by its normal N in the (u,v,w) coordinates of that face). The other +// arguments include the point where AB exits face, the corresponding +// exit axis, and the target face containing the destination point B. +func nextFace(face int, exit r2.Point, axis axis, n pointUVW, targetFace int) int { + // this bit is to work around C++ cleverly casting bools to ints for you. + exitA := exit.X + exit1MinusA := exit.Y + + if axis == axisV { + exitA = exit.Y + exit1MinusA = exit.X + } + exitAPos := 0 + if exitA > 0 { + exitAPos = 1 + } + exit1MinusAPos := 0 + if exit1MinusA > 0 { + exit1MinusAPos = 1 + } + + // We return the face that is adjacent to the exit point along the given + // axis. If line AB exits *exactly* through a corner of the face, there are + // two possible next faces. If one is the target face containing B, then + // we guarantee that we advance to that face directly. + // + // The three conditions below check that (1) AB exits approximately through + // a corner, (2) the adjacent face along the non-exit axis is the target + // face, and (3) AB exits *exactly* through the corner. (The sumEqual + // code checks whether the dot product of (u,v,1) and n is exactly zero.) + if math.Abs(exit1MinusA) == 1 && + uvwFace(face, int(1-axis), exit1MinusAPos) == targetFace && + sumEqual(exit.X*n.X, exit.Y*n.Y, -n.Z) { + return targetFace + } + + // Otherwise return the face that is adjacent to the exit point in the + // direction of the exit axis. + return uvwFace(face, int(axis), exitAPos) +} diff --git a/vendor/github.com/golang/geo/s2/edge_crosser.go b/vendor/github.com/golang/geo/s2/edge_crosser.go new file mode 100644 index 000000000..69c6da6b9 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/edge_crosser.go @@ -0,0 +1,227 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" +) + +// EdgeCrosser allows edges to be efficiently tested for intersection with a +// given fixed edge AB. It is especially efficient when testing for +// intersection with an edge chain connecting vertices v0, v1, v2, ... +// +// Example usage: +// +// func CountIntersections(a, b Point, edges []Edge) int { +// count := 0 +// crosser := NewEdgeCrosser(a, b) +// for _, edge := range edges { +// if crosser.CrossingSign(&edge.First, &edge.Second) != DoNotCross { +// count++ +// } +// } +// return count +// } +// +type EdgeCrosser struct { + a Point + b Point + aXb Point + + // To reduce the number of calls to expensiveSign, we compute an + // outward-facing tangent at A and B if necessary. If the plane + // perpendicular to one of these tangents separates AB from CD (i.e., one + // edge on each side) then there is no intersection. + aTangent Point // Outward-facing tangent at A. + bTangent Point // Outward-facing tangent at B. + + // The fields below are updated for each vertex in the chain. + c Point // Previous vertex in the vertex chain. + acb Direction // The orientation of triangle ACB. +} + +// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB. +func NewEdgeCrosser(a, b Point) *EdgeCrosser { + norm := a.PointCross(b) + return &EdgeCrosser{ + a: a, + b: b, + aXb: Point{a.Cross(b.Vector)}, + aTangent: Point{a.Cross(norm.Vector)}, + bTangent: Point{norm.Cross(b.Vector)}, + } +} + +// CrossingSign reports whether the edge AB intersects the edge CD. If any two +// vertices from different edges are the same, returns MaybeCross. If either edge +// is degenerate (A == B or C == D), returns either DoNotCross or MaybeCross. +// +// Properties of CrossingSign: +// +// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d) +// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d) +// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d +// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d +// +// Note that if you want to check an edge against a chain of other edges, +// it is slightly more efficient to use the single-argument version +// ChainCrossingSign below. +func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing { + if c != e.c { + e.RestartAt(c) + } + return e.ChainCrossingSign(d) +} + +// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and +// CD share a vertex and VertexCrossing(a, b, c, d) is true. +// +// This method extends the concept of a "crossing" to the case where AB +// and CD have a vertex in common. The two edges may or may not cross, +// according to the rules defined in VertexCrossing above. The rules +// are designed so that point containment tests can be implemented simply +// by counting edge crossings. Similarly, determining whether one edge +// chain crosses another edge chain can be implemented by counting. +func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool { + if c != e.c { + e.RestartAt(c) + } + return e.EdgeOrVertexChainCrossing(d) +} + +// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge, +// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)). +// +// You don't need to use this or any of the chain functions unless you're trying to +// squeeze out every last drop of performance. Essentially all you are saving is a test +// whether the first vertex of the current edge is the same as the second vertex of the +// previous edge. +func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser { + e := NewEdgeCrosser(a, b) + e.RestartAt(c) + return e +} + +// RestartAt sets the current point of the edge crosser to be c. +// Call this method when your chain 'jumps' to a new place. +// The argument must point to a value that persists until the next call. +func (e *EdgeCrosser) RestartAt(c Point) { + e.c = c + e.acb = -triageSign(e.a, e.b, e.c) +} + +// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of +// the crossing methods (or RestartAt) as the first vertex of the current edge. +func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing { + // For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must + // all be oriented the same way (CW or CCW). We keep the orientation of ACB + // as part of our state. When each new point D arrives, we compute the + // orientation of BDA and check whether it matches ACB. This checks whether + // the points C and D are on opposite sides of the great circle through AB. + + // Recall that triageSign is invariant with respect to rotating its + // arguments, i.e. ABD has the same orientation as BDA. + bda := triageSign(e.a, e.b, d) + if e.acb == -bda && bda != Indeterminate { + // The most common case -- triangles have opposite orientations. Save the + // current vertex D as the next vertex C, and also save the orientation of + // the new triangle ACB (which is opposite to the current triangle BDA). + e.c = d + e.acb = -bda + return DoNotCross + } + return e.crossingSign(d, bda) +} + +// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex +// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge. +func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool { + // We need to copy e.c since it is clobbered by ChainCrossingSign. + c := e.c + switch e.ChainCrossingSign(d) { + case DoNotCross: + return false + case Cross: + return true + } + return VertexCrossing(e.a, e.b, c, d) +} + +// crossingSign handle the slow path of CrossingSign. +func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing { + // Compute the actual result, and then save the current vertex D as the next + // vertex C, and save the orientation of the next triangle ACB (which is + // opposite to the current triangle BDA). + defer func() { + e.c = d + e.acb = -bda + }() + + // At this point, a very common situation is that A,B,C,D are four points on + // a line such that AB does not overlap CD. (For example, this happens when + // a line or curve is sampled finely, or when geometry is constructed by + // computing the union of S2CellIds.) Most of the time, we can determine + // that AB and CD do not intersect using the two outward-facing + // tangents at A and B (parallel to AB) and testing whether AB and CD are on + // opposite sides of the plane perpendicular to one of these tangents. This + // is moderately expensive but still much cheaper than expensiveSign. + + // The error in RobustCrossProd is insignificant. The maximum error in + // the call to CrossProd (i.e., the maximum norm of the error vector) is + // (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to + // DotProd below is dblEpsilon. (There is also a small relative error + // term that is insignificant because we are comparing the result against a + // constant that is very close to zero.) + maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon + if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) { + return DoNotCross + } + + // Otherwise, eliminate the cases where two vertices from different edges are + // equal. (These cases could be handled in the code below, but we would rather + // avoid calling ExpensiveSign if possible.) + if e.a == e.c || e.a == d || e.b == e.c || e.b == d { + return MaybeCross + } + + // Eliminate the cases where an input edge is degenerate. (Note that in + // most cases, if CD is degenerate then this method is not even called + // because acb and bda have different signs.) + if e.a == e.b || e.c == d { + return DoNotCross + } + + // Otherwise it's time to break out the big guns. + if e.acb == Indeterminate { + e.acb = -expensiveSign(e.a, e.b, e.c) + } + if bda == Indeterminate { + bda = expensiveSign(e.a, e.b, d) + } + + if bda != e.acb { + return DoNotCross + } + + cbd := -RobustSign(e.c, d, e.b) + if cbd != e.acb { + return DoNotCross + } + dac := RobustSign(e.c, d, e.a) + if dac != e.acb { + return DoNotCross + } + return Cross +} diff --git a/vendor/github.com/golang/geo/s2/edge_crossings.go b/vendor/github.com/golang/geo/s2/edge_crossings.go new file mode 100644 index 000000000..a98ec76ff --- /dev/null +++ b/vendor/github.com/golang/geo/s2/edge_crossings.go @@ -0,0 +1,396 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "math" + + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +const ( + // intersectionError can be set somewhat arbitrarily, because the algorithm + // uses more precision if necessary in order to achieve the specified error. + // The only strict requirement is that intersectionError >= dblEpsilon + // radians. However, using a larger error tolerance makes the algorithm more + // efficient because it reduces the number of cases where exact arithmetic is + // needed. + intersectionError = s1.Angle(8 * dblError) + + // intersectionMergeRadius is used to ensure that intersection points that + // are supposed to be coincident are merged back together into a single + // vertex. This is required in order for various polygon operations (union, + // intersection, etc) to work correctly. It is twice the intersection error + // because two coincident intersection points might have errors in + // opposite directions. + intersectionMergeRadius = 2 * intersectionError +) + +// A Crossing indicates how edges cross. +type Crossing int + +const ( + // Cross means the edges cross. + Cross Crossing = iota + // MaybeCross means two vertices from different edges are the same. + MaybeCross + // DoNotCross means the edges do not cross. + DoNotCross +) + +func (c Crossing) String() string { + switch c { + case Cross: + return "Cross" + case MaybeCross: + return "MaybeCross" + case DoNotCross: + return "DoNotCross" + default: + return fmt.Sprintf("(BAD CROSSING %d)", c) + } +} + +// CrossingSign reports whether the edge AB intersects the edge CD. +// If AB crosses CD at a point that is interior to both edges, Cross is returned. +// If any two vertices from different edges are the same it returns MaybeCross. +// Otherwise it returns DoNotCross. +// If either edge is degenerate (A == B or C == D), the return value is MaybeCross +// if two vertices from different edges are the same and DoNotCross otherwise. +// +// Properties of CrossingSign: +// +// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d) +// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d) +// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d +// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d +// +// This method implements an exact, consistent perturbation model such +// that no three points are ever considered to be collinear. This means +// that even if you have 4 points A, B, C, D that lie exactly in a line +// (say, around the equator), C and D will be treated as being slightly to +// one side or the other of AB. This is done in a way such that the +// results are always consistent (see RobustSign). +func CrossingSign(a, b, c, d Point) Crossing { + crosser := NewChainEdgeCrosser(a, b, c) + return crosser.ChainCrossingSign(d) +} + +// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon +// containment tests can be implemented by counting the number of edge crossings. +// +// Given two edges AB and CD where at least two vertices are identical +// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing" +// occurs if AB is encountered after CD during a CCW sweep around the shared +// vertex starting from a fixed reference point. +// +// Note that according to this rule, if AB crosses CD then in general CD +// does not cross AB. However, this leads to the correct result when +// counting polygon edge crossings. For example, suppose that A,B,C are +// three consecutive vertices of a CCW polygon. If we now consider the edge +// crossings of a segment BP as P sweeps around B, the crossing number +// changes parity exactly when BP crosses BA or BC. +// +// Useful properties of VertexCrossing (VC): +// +// (1) VC(a,a,c,d) == VC(a,b,c,c) == false +// (2) VC(a,b,a,b) == VC(a,b,b,a) == true +// (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c) +// (3) If exactly one of a,b equals one of c,d, then exactly one of +// VC(a,b,c,d) and VC(c,d,a,b) is true +// +// It is an error to call this method with 4 distinct vertices. +func VertexCrossing(a, b, c, d Point) bool { + // If A == B or C == D there is no intersection. We need to check this + // case first in case 3 or more input points are identical. + if a == b || c == d { + return false + } + + // If any other pair of vertices is equal, there is a crossing if and only + // if OrderedCCW indicates that the edge AB is further CCW around the + // shared vertex O (either A or B) than the edge CD, starting from an + // arbitrary fixed reference point. + + // Optimization: if AB=CD or AB=DC, we can avoid most of the calculations. + switch { + case a == c: + return (b == d) || OrderedCCW(Point{a.Ortho()}, d, b, a) + case b == d: + return OrderedCCW(Point{b.Ortho()}, c, a, b) + case a == d: + return (b == c) || OrderedCCW(Point{a.Ortho()}, c, b, a) + case b == c: + return OrderedCCW(Point{b.Ortho()}, d, a, b) + } + + return false +} + +// EdgeOrVertexCrossing is a convenience function that calls CrossingSign to +// handle cases where all four vertices are distinct, and VertexCrossing to +// handle cases where two or more vertices are the same. This defines a crossing +// function such that point-in-polygon containment tests can be implemented +// by simply counting edge crossings. +func EdgeOrVertexCrossing(a, b, c, d Point) bool { + switch CrossingSign(a, b, c, d) { + case DoNotCross: + return false + case Cross: + return true + default: + return VertexCrossing(a, b, c, d) + } +} + +// Intersection returns the intersection point of two edges AB and CD that cross +// (CrossingSign(a,b,c,d) == Crossing). +// +// Useful properties of Intersection: +// +// (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d) +// (2) Intersection(c,d,a,b) == Intersection(a,b,c,d) +// +// The returned intersection point X is guaranteed to be very close to the +// true intersection point of AB and CD, even if the edges intersect at a +// very small angle. +func Intersection(a0, a1, b0, b1 Point) Point { + // It is difficult to compute the intersection point of two edges accurately + // when the angle between the edges is very small. Previously we handled + // this by only guaranteeing that the returned intersection point is within + // intersectionError of each edge. However, this means that when the edges + // cross at a very small angle, the computed result may be very far from the + // true intersection point. + // + // Instead this function now guarantees that the result is always within + // intersectionError of the true intersection. This requires using more + // sophisticated techniques and in some cases extended precision. + // + // - intersectionStable computes the intersection point using + // projection and interpolation, taking care to minimize cancellation + // error. + // + // - intersectionExact computes the intersection point using precision + // arithmetic and converts the final result back to an Point. + pt, ok := intersectionStable(a0, a1, b0, b1) + if !ok { + pt = intersectionExact(a0, a1, b0, b1) + } + + // Make sure the intersection point is on the correct side of the sphere. + // Since all vertices are unit length, and edges are less than 180 degrees, + // (a0 + a1) and (b0 + b1) both have positive dot product with the + // intersection point. We use the sum of all vertices to make sure that the + // result is unchanged when the edges are swapped or reversed. + if pt.Dot((a0.Add(a1.Vector)).Add(b0.Add(b1.Vector))) < 0 { + pt = Point{pt.Mul(-1)} + } + + return pt +} + +// Computes the cross product of two vectors, normalized to be unit length. +// Also returns the length of the cross +// product before normalization, which is useful for estimating the amount of +// error in the result. For numerical stability, the vectors should both be +// approximately unit length. +func robustNormalWithLength(x, y r3.Vector) (r3.Vector, float64) { + var pt r3.Vector + // This computes 2 * (x.Cross(y)), but has much better numerical + // stability when x and y are unit length. + tmp := x.Sub(y).Cross(x.Add(y)) + length := tmp.Norm() + if length != 0 { + pt = tmp.Mul(1 / length) + } + return pt, 0.5 * length // Since tmp == 2 * (x.Cross(y)) +} + +/* +// intersectionSimple is not used by the C++ so it is skipped here. +*/ + +// projection returns the projection of aNorm onto X (x.Dot(aNorm)), and a bound +// on the error in the result. aNorm is not necessarily unit length. +// +// The remaining parameters (the length of aNorm (aNormLen) and the edge endpoints +// a0 and a1) allow this dot product to be computed more accurately and efficiently. +func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound float64) { + // The error in the dot product is proportional to the lengths of the input + // vectors, so rather than using x itself (a unit-length vector) we use + // the vectors from x to the closer of the two edge endpoints. This + // typically reduces the error by a huge factor. + x0 := x.Sub(a0.Vector) + x1 := x.Sub(a1.Vector) + x0Dist2 := x0.Norm2() + x1Dist2 := x1.Norm2() + + // If both distances are the same, we need to be careful to choose one + // endpoint deterministically so that the result does not change if the + // order of the endpoints is reversed. + var dist float64 + if x0Dist2 < x1Dist2 || (x0Dist2 == x1Dist2 && x0.Cmp(x1) == -1) { + dist = math.Sqrt(x0Dist2) + proj = x0.Dot(aNorm) + } else { + dist = math.Sqrt(x1Dist2) + proj = x1.Dot(aNorm) + } + + // This calculation bounds the error from all sources: the computation of + // the normal, the subtraction of one endpoint, and the dot product itself. + // dblError appears because the input points are assumed to be + // normalized in double precision. + // + // For reference, the bounds that went into this calculation are: + // ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblError) * epsilon + // |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon + // ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon + bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblError)*dist + 1.5*math.Abs(proj)) * epsilon + return proj, bound +} + +// compareEdges reports whether (a0,a1) is less than (b0,b1) with respect to a total +// ordering on edges that is invariant under edge reversals. +func compareEdges(a0, a1, b0, b1 Point) bool { + if a0.Cmp(a1.Vector) != -1 { + a0, a1 = a1, a0 + } + if b0.Cmp(b1.Vector) != -1 { + b0, b1 = b1, b0 + } + return a0.Cmp(b0.Vector) == -1 || (a0 == b0 && b0.Cmp(b1.Vector) == -1) +} + +// intersectionStable returns the intersection point of the edges (a0,a1) and +// (b0,b1) if it can be computed to within an error of at most intersectionError +// by this function. +// +// The intersection point is not guaranteed to have the correct sign because we +// choose to use the longest of the two edges first. The sign is corrected by +// Intersection. +func intersectionStable(a0, a1, b0, b1 Point) (Point, bool) { + // Sort the two edges so that (a0,a1) is longer, breaking ties in a + // deterministic way that does not depend on the ordering of the endpoints. + // This is desirable for two reasons: + // - So that the result doesn't change when edges are swapped or reversed. + // - It reduces error, since the first edge is used to compute the edge + // normal (where a longer edge means less error), and the second edge + // is used for interpolation (where a shorter edge means less error). + aLen2 := a1.Sub(a0.Vector).Norm2() + bLen2 := b1.Sub(b0.Vector).Norm2() + if aLen2 < bLen2 || (aLen2 == bLen2 && compareEdges(a0, a1, b0, b1)) { + return intersectionStableSorted(b0, b1, a0, a1) + } + return intersectionStableSorted(a0, a1, b0, b1) +} + +// intersectionStableSorted is a helper function for intersectionStable. +// It expects that the edges (a0,a1) and (b0,b1) have been sorted so that +// the first edge passed in is longer. +func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) { + var pt Point + + // Compute the normal of the plane through (a0, a1) in a stable way. + aNorm := a0.Sub(a1.Vector).Cross(a0.Add(a1.Vector)) + aNormLen := aNorm.Norm() + bLen := b1.Sub(b0.Vector).Norm() + + // Compute the projection (i.e., signed distance) of b0 and b1 onto the + // plane through (a0, a1). Distances are scaled by the length of aNorm. + b0Dist, b0Error := projection(b0.Vector, aNorm, aNormLen, a0, a1) + b1Dist, b1Error := projection(b1.Vector, aNorm, aNormLen, a0, a1) + + // The total distance from b0 to b1 measured perpendicularly to (a0,a1) is + // |b0Dist - b1Dist|. Note that b0Dist and b1Dist generally have + // opposite signs because b0 and b1 are on opposite sides of (a0, a1). The + // code below finds the intersection point by interpolating along the edge + // (b0, b1) to a fractional distance of b0Dist / (b0Dist - b1Dist). + // + // It can be shown that the maximum error in the interpolation fraction is + // + // (b0Dist * b1Error - b1Dist * b0Error) / (distSum * (distSum - errorSum)) + // + // We save ourselves some work by scaling the result and the error bound by + // "distSum", since the result is normalized to be unit length anyway. + distSum := math.Abs(b0Dist - b1Dist) + errorSum := b0Error + b1Error + if distSum <= errorSum { + return pt, false // Error is unbounded in this case. + } + + x := b1.Mul(b0Dist).Sub(b0.Mul(b1Dist)) + err := bLen*math.Abs(b0Dist*b1Error-b1Dist*b0Error)/ + (distSum-errorSum) + 2*distSum*epsilon + + // Finally we normalize the result, compute the corresponding error, and + // check whether the total error is acceptable. + xLen := x.Norm() + maxError := intersectionError + if err > (float64(maxError)-epsilon)*xLen { + return pt, false + } + + return Point{x.Mul(1 / xLen)}, true +} + +// intersectionExact returns the intersection point of (a0, a1) and (b0, b1) +// using precise arithmetic. Note that the result is not exact because it is +// rounded down to double precision at the end. Also, the intersection point +// is not guaranteed to have the correct sign (i.e., the return value may need +// to be negated). +func intersectionExact(a0, a1, b0, b1 Point) Point { + // Since we are using presice arithmetic, we don't need to worry about + // numerical stability. + a0P := r3.PreciseVectorFromVector(a0.Vector) + a1P := r3.PreciseVectorFromVector(a1.Vector) + b0P := r3.PreciseVectorFromVector(b0.Vector) + b1P := r3.PreciseVectorFromVector(b1.Vector) + aNormP := a0P.Cross(a1P) + bNormP := b0P.Cross(b1P) + xP := aNormP.Cross(bNormP) + + // The final Normalize() call is done in double precision, which creates a + // directional error of up to 2*dblError. (Precise conversion and Normalize() + // each contribute up to dblError of directional error.) + x := xP.Vector() + + if x == (r3.Vector{}) { + // The two edges are exactly collinear, but we still consider them to be + // "crossing" because of simulation of simplicity. Out of the four + // endpoints, exactly two lie in the interior of the other edge. Of + // those two we return the one that is lexicographically smallest. + x = r3.Vector{10, 10, 10} // Greater than any valid S2Point + + aNorm := Point{aNormP.Vector()} + bNorm := Point{bNormP.Vector()} + if OrderedCCW(b0, a0, b1, bNorm) && a0.Cmp(x) == -1 { + return a0 + } + if OrderedCCW(b0, a1, b1, bNorm) && a1.Cmp(x) == -1 { + return a1 + } + if OrderedCCW(a0, b0, a1, aNorm) && b0.Cmp(x) == -1 { + return b0 + } + if OrderedCCW(a0, b1, a1, aNorm) && b1.Cmp(x) == -1 { + return b1 + } + } + + return Point{x} +} diff --git a/vendor/github.com/golang/geo/s2/edge_distances.go b/vendor/github.com/golang/geo/s2/edge_distances.go new file mode 100644 index 000000000..ca197af1d --- /dev/null +++ b/vendor/github.com/golang/geo/s2/edge_distances.go @@ -0,0 +1,408 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// This file defines a collection of methods for computing the distance to an edge, +// interpolating along an edge, projecting points onto edges, etc. + +import ( + "math" + + "github.com/golang/geo/s1" +) + +// DistanceFromSegment returns the distance of point X from line segment AB. +// The points are expected to be normalized. The result is very accurate for small +// distances but may have some numerical error if the distance is large +// (approximately pi/2 or greater). The case A == B is handled correctly. +func DistanceFromSegment(x, a, b Point) s1.Angle { + var minDist s1.ChordAngle + minDist, _ = updateMinDistance(x, a, b, minDist, true) + return minDist.Angle() +} + +// IsDistanceLess reports whether the distance from X to the edge AB is less +// than limit. (For less than or equal to, specify limit.Successor()). +// This method is faster than DistanceFromSegment(). If you want to +// compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle +// once and save the value, since this conversion is relatively expensive. +func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool { + _, less := UpdateMinDistance(x, a, b, limit) + return less +} + +// UpdateMinDistance checks if the distance from X to the edge AB is less +// than minDist, and if so, returns the updated value and true. +// The case A == B is handled correctly. +// +// Use this method when you want to compute many distances and keep track of +// the minimum. It is significantly faster than using DistanceFromSegment +// because (1) using s1.ChordAngle is much faster than s1.Angle, and (2) it +// can save a lot of work by not actually computing the distance when it is +// obviously larger than the current minimum. +func UpdateMinDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { + return updateMinDistance(x, a, b, minDist, false) +} + +// UpdateMaxDistance checks if the distance from X to the edge AB is greater +// than maxDist, and if so, returns the updated value and true. +// Otherwise it returns false. The case A == B is handled correctly. +func UpdateMaxDistance(x, a, b Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) { + dist := maxChordAngle(ChordAngleBetweenPoints(x, a), ChordAngleBetweenPoints(x, b)) + if dist > s1.RightChordAngle { + dist, _ = updateMinDistance(Point{x.Mul(-1)}, a, b, dist, true) + dist = s1.StraightChordAngle - dist + } + if maxDist < dist { + return dist, true + } + + return maxDist, false +} + +// IsInteriorDistanceLess reports whether the minimum distance from X to the edge +// AB is attained at an interior point of AB (i.e., not an endpoint), and that +// distance is less than limit. (Specify limit.Successor() for less than or equal to). +func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool { + _, less := UpdateMinInteriorDistance(x, a, b, limit) + return less +} + +// UpdateMinInteriorDistance reports whether the minimum distance from X to AB +// is attained at an interior point of AB (i.e., not an endpoint), and that distance +// is less than minDist. If so, the value of minDist is updated and true is returned. +// Otherwise it is unchanged and returns false. +func UpdateMinInteriorDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { + return interiorDist(x, a, b, minDist, false) +} + +// Project returns the point along the edge AB that is closest to the point X. +// The fractional distance of this point along the edge AB can be obtained +// using DistanceFraction. +// +// This requires that all points are unit length. +func Project(x, a, b Point) Point { + aXb := a.PointCross(b) + // Find the closest point to X along the great circle through AB. + p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2())) + + // If this point is on the edge AB, then it's the closest point. + if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) { + return Point{p.Normalize()} + } + + // Otherwise, the closest point is either A or B. + if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() { + return a + } + return b +} + +// DistanceFraction returns the distance ratio of the point X along an edge AB. +// If X is on the line segment AB, this is the fraction T such +// that X == Interpolate(T, A, B). +// +// This requires that A and B are distinct. +func DistanceFraction(x, a, b Point) float64 { + d0 := x.Angle(a.Vector) + d1 := x.Angle(b.Vector) + return float64(d0 / (d0 + d1)) +} + +// Interpolate returns the point X along the line segment AB whose distance from A +// is the given fraction "t" of the distance AB. Does NOT require that "t" be +// between 0 and 1. Note that all distances are measured on the surface of +// the sphere, so this is more complicated than just computing (1-t)*a + t*b +// and normalizing the result. +func Interpolate(t float64, a, b Point) Point { + if t == 0 { + return a + } + if t == 1 { + return b + } + ab := a.Angle(b.Vector) + return InterpolateAtDistance(s1.Angle(t)*ab, a, b) +} + +// InterpolateAtDistance returns the point X along the line segment AB whose +// distance from A is the angle ax. +func InterpolateAtDistance(ax s1.Angle, a, b Point) Point { + aRad := ax.Radians() + + // Use PointCross to compute the tangent vector at A towards B. The + // result is always perpendicular to A, even if A=B or A=-B, but it is not + // necessarily unit length. (We effectively normalize it below.) + normal := a.PointCross(b) + tangent := normal.Vector.Cross(a.Vector) + + // Now compute the appropriate linear combination of A and "tangent". With + // infinite precision the result would always be unit length, but we + // normalize it anyway to ensure that the error is within acceptable bounds. + // (Otherwise errors can build up when the result of one interpolation is + // fed into another interpolation.) + return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()} +} + +// minUpdateDistanceMaxError returns the maximum error in the result of +// UpdateMinDistance (and the associated functions such as +// UpdateMinInteriorDistance, IsDistanceLess, etc), assuming that all +// input points are normalized to within the bounds guaranteed by r3.Vector's +// Normalize. The error can be added or subtracted from an s1.ChordAngle +// using its Expanded method. +func minUpdateDistanceMaxError(dist s1.ChordAngle) float64 { + // There are two cases for the maximum error in UpdateMinDistance(), + // depending on whether the closest point is interior to the edge. + return math.Max(minUpdateInteriorDistanceMaxError(dist), dist.MaxPointError()) +} + +// minUpdateInteriorDistanceMaxError returns the maximum error in the result of +// UpdateMinInteriorDistance, assuming that all input points are normalized +// to within the bounds guaranteed by Point's Normalize. The error can be added +// or subtracted from an s1.ChordAngle using its Expanded method. +// +// Note that accuracy goes down as the distance approaches 0 degrees or 180 +// degrees (for different reasons). Near 0 degrees the error is acceptable +// for all practical purposes (about 1.2e-15 radians ~= 8 nanometers). For +// exactly antipodal points the maximum error is quite high (0.5 meters), +// but this error drops rapidly as the points move away from antipodality +// (approximately 1 millimeter for points that are 50 meters from antipodal, +// and 1 micrometer for points that are 50km from antipodal). +// +// TODO(roberts): Currently the error bound does not hold for edges whose endpoints +// are antipodal to within about 1e-15 radians (less than 1 micron). This could +// be fixed by extending PointCross to use higher precision when necessary. +func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 { + // If a point is more than 90 degrees from an edge, then the minimum + // distance is always to one of the endpoints, not to the edge interior. + if dist >= s1.RightChordAngle { + return 0.0 + } + + // This bound includes all source of error, assuming that the input points + // are normalized. a and b are components of chord length that are + // perpendicular and parallel to a plane containing the edge respectively. + b := math.Min(1.0, 0.5*float64(dist)) + a := math.Sqrt(b * (2 - b)) + return ((2.5+2*math.Sqrt(3)+8.5*a)*a + + (2+2*math.Sqrt(3)/3+6.5*(1-b))*b + + (23+16/math.Sqrt(3))*dblEpsilon) * dblEpsilon +} + +// updateMinDistance computes the distance from a point X to a line segment AB, +// and if either the distance was less than the given minDist, or alwaysUpdate is +// true, the value and whether it was updated are returned. +func updateMinDistance(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) { + if d, ok := interiorDist(x, a, b, minDist, alwaysUpdate); ok { + // Minimum distance is attained along the edge interior. + return d, true + } + + // Otherwise the minimum distance is to one of the endpoints. + xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2() + dist := s1.ChordAngle(math.Min(xa2, xb2)) + if !alwaysUpdate && dist >= minDist { + return minDist, false + } + return dist, true +} + +// interiorDist returns the shortest distance from point x to edge ab, assuming +// that the closest point to X is interior to AB. If the closest point is not +// interior to AB, interiorDist returns (minDist, false). If alwaysUpdate is set to +// false, the distance is only updated when the value exceeds certain the given minDist. +func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) { + // Chord distance of x to both end points a and b. + xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2() + + // The closest point on AB could either be one of the two vertices (the + // vertex case) or in the interior (the interior case). Let C = A x B. + // If X is in the spherical wedge extending from A to B around the axis + // through C, then we are in the interior case. Otherwise we are in the + // vertex case. + // + // Check whether we might be in the interior case. For this to be true, XAB + // and XBA must both be acute angles. Checking this condition exactly is + // expensive, so instead we consider the planar triangle ABX (which passes + // through the sphere's interior). The planar angles XAB and XBA are always + // less than the corresponding spherical angles, so if we are in the + // interior case then both of these angles must be acute. + // + // We check this by computing the squared edge lengths of the planar + // triangle ABX, and testing whether angles XAB and XBA are both acute using + // the law of cosines: + // + // | XA^2 - XB^2 | < AB^2 (*) + // + // This test must be done conservatively (taking numerical errors into + // account) since otherwise we might miss a situation where the true minimum + // distance is achieved by a point on the edge interior. + // + // There are two sources of error in the expression above (*). The first is + // that points are not normalized exactly; they are only guaranteed to be + // within 2 * dblEpsilon of unit length. Under the assumption that the two + // sides of (*) are nearly equal, the total error due to normalization errors + // can be shown to be at most + // + // 2 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 . + // + // The other source of error is rounding of results in the calculation of (*). + // Each of XA^2, XB^2, AB^2 has a maximum relative error of 2.5 * dblEpsilon, + // plus an additional relative error of 0.5 * dblEpsilon in the final + // subtraction which we further bound as 0.25 * dblEpsilon * (XA^2 + XB^2 + + // AB^2) for convenience. This yields a final error bound of + // + // 4.75 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 . + ab2 := a.Sub(b.Vector).Norm2() + maxError := (4.75*dblEpsilon*(xa2+xb2+ab2) + 8*dblEpsilon*dblEpsilon) + if math.Abs(xa2-xb2) >= ab2+maxError { + return minDist, false + } + + // The minimum distance might be to a point on the edge interior. Let R + // be closest point to X that lies on the great circle through AB. Rather + // than computing the geodesic distance along the surface of the sphere, + // instead we compute the "chord length" through the sphere's interior. + // + // The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q + // is the point X projected onto the plane through the great circle AB. + // The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B. + // We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it + // is faster and the corresponding distance on the Earth's surface is + // accurate to within 1% for distances up to about 1800km. + c := a.PointCross(b) + c2 := c.Norm2() + xDotC := x.Dot(c.Vector) + xDotC2 := xDotC * xDotC + if !alwaysUpdate && xDotC2 > c2*float64(minDist) { + // The closest point on the great circle AB is too far away. We need to + // test this using ">" rather than ">=" because the actual minimum bound + // on the distance is (xDotC2 / c2), which can be rounded differently + // than the (more efficient) multiplicative test above. + return minDist, false + } + + // Otherwise we do the exact, more expensive test for the interior case. + // This test is very likely to succeed because of the conservative planar + // test we did initially. + // + // TODO(roberts): Ensure that the errors in test are accurately reflected in the + // minUpdateInteriorDistanceMaxError. + cx := c.Cross(x.Vector) + if a.Sub(x.Vector).Dot(cx) >= 0 || b.Sub(x.Vector).Dot(cx) <= 0 { + return minDist, false + } + + // Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above). + // This calculation has good accuracy for all chord lengths since it + // is based on both the dot product and cross product (rather than + // deriving one from the other). However, note that the chord length + // representation itself loses accuracy as the angle approaches π. + qr := 1 - math.Sqrt(cx.Norm2()/c2) + dist := s1.ChordAngle((xDotC2 / c2) + (qr * qr)) + + if !alwaysUpdate && dist >= minDist { + return minDist, false + } + + return dist, true +} + +// updateEdgePairMinDistance computes the minimum distance between the given +// pair of edges. If the two edges cross, the distance is zero. The cases +// a0 == a1 and b0 == b1 are handled correctly. +func updateEdgePairMinDistance(a0, a1, b0, b1 Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { + if minDist == 0 { + return 0, false + } + if CrossingSign(a0, a1, b0, b1) == Cross { + minDist = 0 + return 0, true + } + + // Otherwise, the minimum distance is achieved at an endpoint of at least + // one of the two edges. We ensure that all four possibilities are always checked. + // + // The calculation below computes each of the six vertex-vertex distances + // twice (this could be optimized). + var ok1, ok2, ok3, ok4 bool + minDist, ok1 = UpdateMinDistance(a0, b0, b1, minDist) + minDist, ok2 = UpdateMinDistance(a1, b0, b1, minDist) + minDist, ok3 = UpdateMinDistance(b0, a0, a1, minDist) + minDist, ok4 = UpdateMinDistance(b1, a0, a1, minDist) + return minDist, ok1 || ok2 || ok3 || ok4 +} + +// updateEdgePairMaxDistance reports the minimum distance between the given pair of edges. +// If one edge crosses the antipodal reflection of the other, the distance is pi. +func updateEdgePairMaxDistance(a0, a1, b0, b1 Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) { + if maxDist == s1.StraightChordAngle { + return s1.StraightChordAngle, false + } + if CrossingSign(a0, a1, Point{b0.Mul(-1)}, Point{b1.Mul(-1)}) == Cross { + return s1.StraightChordAngle, true + } + + // Otherwise, the maximum distance is achieved at an endpoint of at least + // one of the two edges. We ensure that all four possibilities are always checked. + // + // The calculation below computes each of the six vertex-vertex distances + // twice (this could be optimized). + var ok1, ok2, ok3, ok4 bool + maxDist, ok1 = UpdateMaxDistance(a0, b0, b1, maxDist) + maxDist, ok2 = UpdateMaxDistance(a1, b0, b1, maxDist) + maxDist, ok3 = UpdateMaxDistance(b0, a0, a1, maxDist) + maxDist, ok4 = UpdateMaxDistance(b1, a0, a1, maxDist) + return maxDist, ok1 || ok2 || ok3 || ok4 +} + +// EdgePairClosestPoints returns the pair of points (a, b) that achieves the +// minimum distance between edges a0a1 and b0b1, where a is a point on a0a1 and +// b is a point on b0b1. If the two edges intersect, a and b are both equal to +// the intersection point. Handles a0 == a1 and b0 == b1 correctly. +func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) { + if CrossingSign(a0, a1, b0, b1) == Cross { + x := Intersection(a0, a1, b0, b1) + return x, x + } + // We save some work by first determining which vertex/edge pair achieves + // the minimum distance, and then computing the closest point on that edge. + var minDist s1.ChordAngle + var ok bool + + minDist, ok = updateMinDistance(a0, b0, b1, minDist, true) + closestVertex := 0 + if minDist, ok = UpdateMinDistance(a1, b0, b1, minDist); ok { + closestVertex = 1 + } + if minDist, ok = UpdateMinDistance(b0, a0, a1, minDist); ok { + closestVertex = 2 + } + if minDist, ok = UpdateMinDistance(b1, a0, a1, minDist); ok { + closestVertex = 3 + } + switch closestVertex { + case 0: + return a0, Project(a0, b0, b1) + case 1: + return a1, Project(a1, b0, b1) + case 2: + return Project(b0, a0, a1), b0 + case 3: + return Project(b1, a0, a1), b1 + default: + panic("illegal case reached") + } +} diff --git a/vendor/github.com/golang/geo/s2/edge_query.go b/vendor/github.com/golang/geo/s2/edge_query.go new file mode 100644 index 000000000..3942c2bc5 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/edge_query.go @@ -0,0 +1,512 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "sort" + + "github.com/golang/geo/s1" +) + +// EdgeQueryOptions holds the options for controlling how EdgeQuery operates. +// +// Options can be chained together builder-style: +// +// opts = NewClosestEdgeQueryOptions(). +// MaxResults(1). +// DistanceLimit(s1.ChordAngleFromAngle(3 * s1.Degree)). +// MaxError(s1.ChordAngleFromAngle(0.001 * s1.Degree)) +// query = NewClosestEdgeQuery(index, opts) +// +// or set individually: +// +// opts = NewClosestEdgeQueryOptions() +// opts.IncludeInteriors(true) +// +// or just inline: +// +// query = NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions().MaxResults(3)) +// +// If you pass a nil as the options you get the default values for the options. +type EdgeQueryOptions struct { + common *queryOptions +} + +// DistanceLimit specifies that only edges whose distance to the target is +// within, this distance should be returned. Edges whose distance is equal +// are not returned. To include values that are equal, specify the limit with +// the next largest representable distance. i.e. limit.Successor(). +func (e *EdgeQueryOptions) DistanceLimit(limit s1.ChordAngle) *EdgeQueryOptions { + e.common = e.common.DistanceLimit(limit) + return e +} + +// IncludeInteriors specifies whether polygon interiors should be +// included when measuring distances. +func (e *EdgeQueryOptions) IncludeInteriors(x bool) *EdgeQueryOptions { + e.common = e.common.IncludeInteriors(x) + return e +} + +// UseBruteForce sets or disables the use of brute force in a query. +func (e *EdgeQueryOptions) UseBruteForce(x bool) *EdgeQueryOptions { + e.common = e.common.UseBruteForce(x) + return e +} + +// MaxError specifies that edges up to dist away than the true +// matching edges may be substituted in the result set, as long as such +// edges satisfy all the remaining search criteria (such as DistanceLimit). +// This option only has an effect if MaxResults is also specified; +// otherwise all edges closer than MaxDistance will always be returned. +func (e *EdgeQueryOptions) MaxError(dist s1.ChordAngle) *EdgeQueryOptions { + e.common = e.common.MaxError(dist) + return e +} + +// MaxResults specifies that at most MaxResults edges should be returned. +// This must be at least 1. +func (e *EdgeQueryOptions) MaxResults(n int) *EdgeQueryOptions { + e.common = e.common.MaxResults(n) + return e +} + +// NewClosestEdgeQueryOptions returns a set of edge query options suitable +// for performing closest edge queries. +func NewClosestEdgeQueryOptions() *EdgeQueryOptions { + return &EdgeQueryOptions{ + common: newQueryOptions(minDistance(0)), + } +} + +// NewFurthestEdgeQueryOptions returns a set of edge query options suitable +// for performing furthest edge queries. +func NewFurthestEdgeQueryOptions() *EdgeQueryOptions { + return &EdgeQueryOptions{ + common: newQueryOptions(maxDistance(0)), + } +} + +// EdgeQueryResult represents an edge that meets the target criteria for the +// query. Note the following special cases: +// +// - ShapeID >= 0 && EdgeID < 0 represents the interior of a shape. +// Such results may be returned when the option IncludeInteriors is true. +// +// - ShapeID < 0 && EdgeID < 0 is returned to indicate that no edge +// satisfies the requested query options. +type EdgeQueryResult struct { + distance distance + shapeID int32 + edgeID int32 +} + +// Distance reports the distance between the edge in this shape that satisfied +// the query's parameters. +func (e EdgeQueryResult) Distance() s1.ChordAngle { return e.distance.chordAngle() } + +// ShapeID reports the ID of the Shape this result is for. +func (e EdgeQueryResult) ShapeID() int32 { return e.shapeID } + +// EdgeID reports the ID of the edge in the results Shape. +func (e EdgeQueryResult) EdgeID() int32 { return e.edgeID } + +// newEdgeQueryResult returns a result instance with default values. +func newEdgeQueryResult(target distanceTarget) EdgeQueryResult { + return EdgeQueryResult{ + distance: target.distance().infinity(), + shapeID: -1, + edgeID: -1, + } +} + +// IsInterior reports if this result represents the interior of a Shape. +func (e EdgeQueryResult) IsInterior() bool { + return e.shapeID >= 0 && e.edgeID < 0 +} + +// IsEmpty reports if this has no edge that satisfies the given edge query options. +// This result is only returned in one special case, namely when FindEdge() does +// not find any suitable edges. +func (e EdgeQueryResult) IsEmpty() bool { + return e.shapeID < 0 +} + +// Less reports if this results is less that the other first by distance, +// then by (shapeID, edgeID). This is used for sorting. +func (e EdgeQueryResult) Less(other EdgeQueryResult) bool { + if e.distance.less(other.distance) { + return true + } + if other.distance.less(e.distance) { + return false + } + if e.shapeID < other.shapeID { + return true + } + if other.shapeID < e.shapeID { + return false + } + return e.edgeID < other.edgeID +} + +// EdgeQuery is used to find the edge(s) between two geometries that match a +// given set of options. It is flexible enough so that it can be adapted to +// compute maximum distances and even potentially Hausdorff distances. +// +// By using the appropriate options, this type can answer questions such as: +// +// - Find the minimum distance between two geometries A and B. +// - Find all edges of geometry A that are within a distance D of geometry B. +// - Find the k edges of geometry A that are closest to a given point P. +// +// You can also specify whether polygons should include their interiors (i.e., +// if a point is contained by a polygon, should the distance be zero or should +// it be measured to the polygon boundary?) +// +// The input geometries may consist of any number of points, polylines, and +// polygons (collectively referred to as "shapes"). Shapes do not need to be +// disjoint; they may overlap or intersect arbitrarily. The implementation is +// designed to be fast for both simple and complex geometries. +type EdgeQuery struct { + index *ShapeIndex + opts *queryOptions + target distanceTarget + + // True if opts.maxError must be subtracted from ShapeIndex cell distances + // in order to ensure that such distances are measured conservatively. This + // is true only if the target takes advantage of maxError in order to + // return faster results, and 0 < maxError < distanceLimit. + useConservativeCellDistance bool + + // The decision about whether to use the brute force algorithm is based on + // counting the total number of edges in the index. However if the index + // contains a large number of shapes, this in itself might take too long. + // So instead we only count edges up to (maxBruteForceIndexSize() + 1) + // for the current target type (stored as indexNumEdgesLimit). + indexNumEdges int + indexNumEdgesLimit int + + // The distance beyond which we can safely ignore further candidate edges. + // (Candidates that are exactly at the limit are ignored; this is more + // efficient for UpdateMinDistance and should not affect clients since + // distance measurements have a small amount of error anyway.) + // + // Initially this is the same as the maximum distance specified by the user, + // but it can also be updated by the algorithm (see maybeAddResult). + distanceLimit distance + + // The current set of results of the query. + results []EdgeQueryResult + + // This field is true when duplicates must be avoided explicitly. This + // is achieved by maintaining a separate set keyed by (shapeID, edgeID) + // only, and checking whether each edge is in that set before computing the + // distance to it. + avoidDuplicates bool + + // testedEdges tracks the set of shape and edges that have already been tested. + testedEdges map[ShapeEdgeID]uint32 +} + +// NewClosestEdgeQuery returns an EdgeQuery that is used for finding the +// closest edge(s) to a given Point, Edge, Cell, or geometry collection. +// +// You can find either the k closest edges, or all edges within a given +// radius, or both (i.e., the k closest edges up to a given maximum radius). +// E.g. to find all the edges within 5 kilometers, set the DistanceLimit in +// the options. +// +// By default *all* edges are returned, so you should always specify either +// MaxResults or DistanceLimit options or both. +// +// Note that by default, distances are measured to the boundary and interior +// of polygons. For example, if a point is inside a polygon then its distance +// is zero. To change this behavior, set the IncludeInteriors option to false. +// +// If you only need to test whether the distance is above or below a given +// threshold (e.g., 10 km), you can use the IsDistanceLess() method. This is +// much faster than actually calculating the distance with FindEdge, +// since the implementation can stop as soon as it can prove that the minimum +// distance is either above or below the threshold. +func NewClosestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery { + if opts == nil { + opts = NewClosestEdgeQueryOptions() + } + return &EdgeQuery{ + testedEdges: make(map[ShapeEdgeID]uint32), + index: index, + opts: opts.common, + } +} + +// NewFurthestEdgeQuery returns an EdgeQuery that is used for finding the +// furthest edge(s) to a given Point, Edge, Cell, or geometry collection. +// +// The furthest edge is defined as the one which maximizes the +// distance from any point on that edge to any point on the target geometry. +// +// Similar to the example in NewClosestEdgeQuery, to find the 5 furthest edges +// from a given Point: +func NewFurthestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery { + if opts == nil { + opts = NewFurthestEdgeQueryOptions() + } + return &EdgeQuery{ + testedEdges: make(map[ShapeEdgeID]uint32), + index: index, + opts: opts.common, + } +} + +// FindEdges returns the edges for the given target that satisfy the current options. +// +// Note that if opts.IncludeInteriors is true, the results may include some +// entries with edge_id == -1. This indicates that the target intersects +// the indexed polygon with the given ShapeID. +func (e *EdgeQuery) FindEdges(target distanceTarget) []EdgeQueryResult { + return e.findEdges(target, e.opts) +} + +// Distance reports the distance to the target. If the index or target is empty, +// returns the EdgeQuery's maximal sentinel. +// +// Use IsDistanceLess()/IsDistanceGreater() if you only want to compare the +// distance against a threshold value, since it is often much faster. +func (e *EdgeQuery) Distance(target distanceTarget) s1.ChordAngle { + return e.findEdge(target, e.opts).Distance() +} + +// IsDistanceLess reports if the distance to target is less than the given limit. +// +// This method is usually much faster than Distance(), since it is much +// less work to determine whether the minimum distance is above or below a +// threshold than it is to calculate the actual minimum distance. +// +// If you wish to check if the distance is less than or equal to the limit, use: +// +// query.IsDistanceLess(target, limit.Successor()) +// +func (e *EdgeQuery) IsDistanceLess(target distanceTarget, limit s1.ChordAngle) bool { + opts := e.opts + opts = opts.MaxResults(1). + DistanceLimit(limit). + MaxError(s1.StraightChordAngle) + return !e.findEdge(target, opts).IsEmpty() +} + +// IsDistanceGreater reports if the distance to target is greater than limit. +// +// This method is usually much faster than Distance, since it is much +// less work to determine whether the maximum distance is above or below a +// threshold than it is to calculate the actual maximum distance. +// If you wish to check if the distance is less than or equal to the limit, use: +// +// query.IsDistanceGreater(target, limit.Predecessor()) +// +func (e *EdgeQuery) IsDistanceGreater(target distanceTarget, limit s1.ChordAngle) bool { + return e.IsDistanceLess(target, limit) +} + +// IsConservativeDistanceLessOrEqual reports if the distance to target is less +// or equal to the limit, where the limit has been expanded by the maximum error +// for the distance calculation. +// +// For example, suppose that we want to test whether two geometries might +// intersect each other after they are snapped together using Builder +// (using the IdentitySnapFunction with a given "snap radius"). Since +// Builder uses exact distance predicates (s2predicates), we need to +// measure the distance between the two geometries conservatively. If the +// distance is definitely greater than "snap radius", then the geometries +// are guaranteed to not intersect after snapping. +func (e *EdgeQuery) IsConservativeDistanceLessOrEqual(target distanceTarget, limit s1.ChordAngle) bool { + return e.IsDistanceLess(target, limit.Expanded(minUpdateDistanceMaxError(limit))) +} + +// IsConservativeDistanceGreaterOrEqual reports if the distance to the target is greater +// than or equal to the given limit with some small tolerance. +func (e *EdgeQuery) IsConservativeDistanceGreaterOrEqual(target distanceTarget, limit s1.ChordAngle) bool { + return e.IsDistanceGreater(target, limit.Expanded(-minUpdateDistanceMaxError(limit))) +} + +// findEdges returns the closest edges to the given target that satisfy the given options. +// +// Note that if opts.includeInteriors is true, the results may include some +// entries with edgeID == -1. This indicates that the target intersects the +// indexed polygon with the given shapeID. +func (e *EdgeQuery) findEdges(target distanceTarget, opts *queryOptions) []EdgeQueryResult { + e.findEdgesInternal(target, opts) + // TODO(roberts): Revisit this if there is a heap or other sorted and + // uniquing datastructure we can use instead of just a slice. + e.results = sortAndUniqueResults(e.results) + if len(e.results) > e.opts.maxResults { + e.results = e.results[:e.opts.maxResults] + } + return e.results +} + +func sortAndUniqueResults(results []EdgeQueryResult) []EdgeQueryResult { + if len(results) <= 1 { + return results + } + sort.Slice(results, func(i, j int) bool { return results[i].Less(results[j]) }) + j := 0 + for i := 1; i < len(results); i++ { + if results[j] == results[i] { + continue + } + j++ + results[j] = results[i] + } + return results[:j+1] +} + +// findEdge is a convenience method that returns exactly one edge, and if no +// edges satisfy the given search criteria, then a default Result is returned. +// +// This is primarily to ease the usage of a number of the methods in the DistanceTargets +// and in EdgeQuery. +func (e *EdgeQuery) findEdge(target distanceTarget, opts *queryOptions) EdgeQueryResult { + opts.MaxResults(1) + e.findEdges(target, opts) + if len(e.results) > 0 { + return e.results[0] + } + + return newEdgeQueryResult(target) +} + +// findEdgesInternal does the actual work for find edges that match the given options. +func (e *EdgeQuery) findEdgesInternal(target distanceTarget, opts *queryOptions) { + e.target = target + e.opts = opts + + e.testedEdges = make(map[ShapeEdgeID]uint32) + e.distanceLimit = target.distance().fromChordAngle(opts.distanceLimit) + e.results = make([]EdgeQueryResult, 0) + + if e.distanceLimit == target.distance().zero() { + return + } + + if opts.includeInteriors { + shapeIDs := map[int32]struct{}{} + e.target.visitContainingShapes(e.index, func(containingShape Shape, targetPoint Point) bool { + shapeIDs[e.index.idForShape(containingShape)] = struct{}{} + return len(shapeIDs) < opts.maxResults + }) + for shapeID := range shapeIDs { + e.addResult(EdgeQueryResult{target.distance().zero(), shapeID, -1}) + } + + if e.distanceLimit == target.distance().zero() { + return + } + } + + // If maxError > 0 and the target takes advantage of this, then we may + // need to adjust the distance estimates to ShapeIndex cells to ensure + // that they are always a lower bound on the true distance. For example, + // suppose max_distance == 100, maxError == 30, and we compute the distance + // to the target from some cell C0 as d(C0) == 80. Then because the target + // takes advantage of maxError, the true distance could be as low as 50. + // In order not to miss edges contained by such cells, we need to subtract + // maxError from the distance estimates. This behavior is controlled by + // the useConservativeCellDistance flag. + // + // However there is one important case where this adjustment is not + // necessary, namely when distanceLimit < maxError, This is because + // maxError only affects the algorithm once at least maxEdges edges + // have been found that satisfy the given distance limit. At that point, + // maxError is subtracted from distanceLimit in order to ensure that + // any further matches are closer by at least that amount. But when + // distanceLimit < maxError, this reduces the distance limit to 0, + // i.e. all remaining candidate cells and edges can safely be discarded. + // (This is how IsDistanceLess() and friends are implemented.) + targetUsesMaxError := opts.maxError != target.distance().zero().chordAngle() && + e.target.setMaxError(opts.maxError) + + // Note that we can't compare maxError and distanceLimit directly + // because one is a Delta and one is a Distance. Instead we subtract them. + e.useConservativeCellDistance = targetUsesMaxError && + (e.distanceLimit == target.distance().infinity() || + target.distance().zero().less(e.distanceLimit.sub(target.distance().fromChordAngle(opts.maxError)))) + + // Use the brute force algorithm if the index is small enough. To avoid + // spending too much time counting edges when there are many shapes, we stop + // counting once there are too many edges. We may need to recount the edges + // if we later see a target with a larger brute force edge threshold. + minOptimizedEdges := e.target.maxBruteForceIndexSize() + 1 + if minOptimizedEdges > e.indexNumEdgesLimit && e.indexNumEdges >= e.indexNumEdgesLimit { + e.indexNumEdges = e.index.NumEdgesUpTo(minOptimizedEdges) + e.indexNumEdgesLimit = minOptimizedEdges + } + + if opts.useBruteForce || e.indexNumEdges < minOptimizedEdges { + // The brute force algorithm already considers each edge exactly once. + e.avoidDuplicates = false + e.findEdgesBruteForce() + } else { + // If the target takes advantage of maxError then we need to avoid + // duplicate edges explicitly. (Otherwise it happens automatically.) + e.avoidDuplicates = targetUsesMaxError && opts.maxResults > 1 + + // TODO(roberts): Uncomment when optimized is completed. + e.findEdgesBruteForce() + //e.findEdgesOptimized() + } +} + +func (e *EdgeQuery) addResult(r EdgeQueryResult) { + e.results = append(e.results, r) + if e.opts.maxResults == 1 { + // Optimization for the common case where only the closest edge is wanted. + e.distanceLimit = r.distance.sub(e.target.distance().fromChordAngle(e.opts.maxError)) + } + // TODO(roberts): Add the other if/else cases when a different data structure + // is used for the results. +} + +func (e *EdgeQuery) maybeAddResult(shape Shape, edgeID int32) { + if _, ok := e.testedEdges[ShapeEdgeID{e.index.idForShape(shape), edgeID}]; e.avoidDuplicates && !ok { + return + } + edge := shape.Edge(int(edgeID)) + dist := e.distanceLimit + + if dist, ok := e.target.updateDistanceToEdge(edge, dist); ok { + e.addResult(EdgeQueryResult{dist, e.index.idForShape(shape), edgeID}) + } +} + +func (e *EdgeQuery) findEdgesBruteForce() { + // Range over all shapes in the index. Does order matter here? if so + // switch to for i = 0 .. n? + for _, shape := range e.index.shapes { + // TODO(roberts): can this happen if we are only ranging over current entries? + if shape == nil { + continue + } + for edgeID := int32(0); edgeID < int32(shape.NumEdges()); edgeID++ { + e.maybeAddResult(shape, edgeID) + } + } +} + +// TODO(roberts): Remaining pieces +// Add clear/reset/re-init method to empty out the state of the query. +// findEdgesOptimized and related methods. +// GetEdge +// Project diff --git a/vendor/github.com/golang/geo/s2/edge_tessellator.go b/vendor/github.com/golang/geo/s2/edge_tessellator.go new file mode 100644 index 000000000..5ad63bea2 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/edge_tessellator.go @@ -0,0 +1,167 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/r2" + "github.com/golang/geo/s1" +) + +const ( + // MinTessellationTolerance is the minimum supported tolerance (which + // corresponds to a distance less than 1 micrometer on the Earth's + // surface, but is still much larger than the expected projection and + // interpolation errors). + MinTessellationTolerance s1.Angle = 1e-13 +) + +// EdgeTessellator converts an edge in a given projection (e.g., Mercator) into +// a chain of spherical geodesic edges such that the maximum distance between +// the original edge and the geodesic edge chain is at most the requested +// tolerance. Similarly, it can convert a spherical geodesic edge into a chain +// of edges in a given 2D projection such that the maximum distance between the +// geodesic edge and the chain of projected edges is at most the requested tolerance. +// +// Method | Input | Output +// ------------|------------------------|----------------------- +// Projected | S2 geodesics | Planar projected edges +// Unprojected | Planar projected edges | S2 geodesics +type EdgeTessellator struct { + projection Projection + tolerance s1.ChordAngle + wrapDistance r2.Point +} + +// NewEdgeTessellator creates a new edge tessellator for the given projection and tolerance. +func NewEdgeTessellator(p Projection, tolerance s1.Angle) *EdgeTessellator { + return &EdgeTessellator{ + projection: p, + tolerance: s1.ChordAngleFromAngle(maxAngle(tolerance, MinTessellationTolerance)), + wrapDistance: p.WrapDistance(), + } +} + +// AppendProjected converts the spherical geodesic edge AB to a chain of planar edges +// in the given projection and returns the corresponding vertices. +// +// If the given projection has one or more coordinate axes that wrap, then +// every vertex's coordinates will be as close as possible to the previous +// vertex's coordinates. Note that this may yield vertices whose +// coordinates are outside the usual range. For example, tessellating the +// edge (0:170, 0:-170) (in lat:lng notation) yields (0:170, 0:190). +func (e *EdgeTessellator) AppendProjected(a, b Point, vertices []r2.Point) []r2.Point { + pa := e.projection.Project(a) + if len(vertices) == 0 { + vertices = []r2.Point{pa} + } else { + pa = e.wrapDestination(vertices[len(vertices)-1], pa) + } + + pb := e.wrapDestination(pa, e.projection.Project(b)) + return e.appendProjected(pa, a, pb, b, vertices) +} + +// appendProjected splits a geodesic edge AB as necessary and returns the +// projected vertices appended to the given vertices. +// +// The maximum recursion depth is (math.Pi / MinTessellationTolerance) < 45 +func (e *EdgeTessellator) appendProjected(pa r2.Point, a Point, pb r2.Point, b Point, vertices []r2.Point) []r2.Point { + // It's impossible to robustly test whether a projected edge is close enough + // to a geodesic edge without knowing the details of the projection + // function, but the following heuristic works well for a wide range of map + // projections. The idea is simply to test whether the midpoint of the + // projected edge is close enough to the midpoint of the geodesic edge. + // + // This measures the distance between the two edges by treating them as + // parametric curves rather than geometric ones. The problem with + // measuring, say, the minimum distance from the projected midpoint to the + // geodesic edge is that this is a lower bound on the value we want, because + // the maximum separation between the two curves is generally not attained + // at the midpoint of the projected edge. The distance between the curve + // midpoints is at least an upper bound on the distance from either midpoint + // to opposite curve. It's not necessarily an upper bound on the maximum + // distance between the two curves, but it is a powerful requirement because + // it demands that the two curves stay parametrically close together. This + // turns out to be much more robust with respect for projections with + // singularities (e.g., the North and South poles in the rectangular and + // Mercator projections) because the curve parameterization speed changes + // rapidly near such singularities. + mid := Point{a.Add(b.Vector).Normalize()} + testMid := e.projection.Unproject(e.projection.Interpolate(0.5, pa, pb)) + + if ChordAngleBetweenPoints(mid, testMid) < e.tolerance { + return append(vertices, pb) + } + + pmid := e.wrapDestination(pa, e.projection.Project(mid)) + vertices = e.appendProjected(pa, a, pmid, mid, vertices) + return e.appendProjected(pmid, mid, pb, b, vertices) +} + +// AppendUnprojected converts the planar edge AB in the given projection to a chain of +// spherical geodesic edges and returns the vertices. +// +// Note that to construct a Loop, you must eliminate the duplicate first and last +// vertex. Note also that if the given projection involves coordinate wrapping +// (e.g. across the 180 degree meridian) then the first and last vertices may not +// be exactly the same. +func (e *EdgeTessellator) AppendUnprojected(pa, pb r2.Point, vertices []Point) []Point { + pb2 := e.wrapDestination(pa, pb) + a := e.projection.Unproject(pa) + b := e.projection.Unproject(pb) + + if len(vertices) == 0 { + vertices = []Point{a} + } + + // Note that coordinate wrapping can create a small amount of error. For + // example in the edge chain "0:-175, 0:179, 0:-177", the first edge is + // transformed into "0:-175, 0:-181" while the second is transformed into + // "0:179, 0:183". The two coordinate pairs for the middle vertex + // ("0:-181" and "0:179") may not yield exactly the same S2Point. + return e.appendUnprojected(pa, a, pb2, b, vertices) +} + +// appendUnprojected interpolates a projected edge and appends the corresponding +// points on the sphere. +func (e *EdgeTessellator) appendUnprojected(pa r2.Point, a Point, pb r2.Point, b Point, vertices []Point) []Point { + pmid := e.projection.Interpolate(0.5, pa, pb) + mid := e.projection.Unproject(pmid) + testMid := Point{a.Add(b.Vector).Normalize()} + + if ChordAngleBetweenPoints(mid, testMid) < e.tolerance { + return append(vertices, b) + } + + vertices = e.appendUnprojected(pa, a, pmid, mid, vertices) + return e.appendUnprojected(pmid, mid, pb, b, vertices) +} + +// wrapDestination returns the coordinates of the edge destination wrapped if +// necessary to obtain the shortest edge. +func (e *EdgeTessellator) wrapDestination(pa, pb r2.Point) r2.Point { + x := pb.X + y := pb.Y + // The code below ensures that pb is unmodified unless wrapping is required. + if e.wrapDistance.X > 0 && math.Abs(x-pa.X) > 0.5*e.wrapDistance.X { + x = pa.X + math.Remainder(x-pa.X, e.wrapDistance.X) + } + if e.wrapDistance.Y > 0 && math.Abs(y-pa.Y) > 0.5*e.wrapDistance.Y { + y = pa.Y + math.Remainder(y-pa.Y, e.wrapDistance.Y) + } + return r2.Point{x, y} +} diff --git a/vendor/github.com/golang/geo/s2/encode.go b/vendor/github.com/golang/geo/s2/encode.go new file mode 100644 index 000000000..49ef364af --- /dev/null +++ b/vendor/github.com/golang/geo/s2/encode.go @@ -0,0 +1,237 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "encoding/binary" + "io" +) + +const ( + // encodingVersion is the current version of the encoding + // format that is compatible with C++ and other S2 libraries. + encodingVersion = int8(1) + + // encodingCompressedVersion is the current version of the + // compressed format. + encodingCompressedVersion = int8(4) +) + +// encoder handles the specifics of encoding for S2 types. +type encoder struct { + w io.Writer // the real writer passed to Encode + err error +} + +func (e *encoder) writeUvarint(x uint64) { + if e.err != nil { + return + } + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, e.err = e.w.Write(buf[:n]) +} + +func (e *encoder) writeBool(x bool) { + if e.err != nil { + return + } + var val int8 + if x { + val = 1 + } + e.err = binary.Write(e.w, binary.LittleEndian, val) +} + +func (e *encoder) writeInt8(x int8) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeInt16(x int16) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeInt32(x int32) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeInt64(x int64) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeUint8(x uint8) { + if e.err != nil { + return + } + _, e.err = e.w.Write([]byte{x}) +} + +func (e *encoder) writeUint32(x uint32) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeUint64(x uint64) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeFloat32(x float32) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +func (e *encoder) writeFloat64(x float64) { + if e.err != nil { + return + } + e.err = binary.Write(e.w, binary.LittleEndian, x) +} + +type byteReader interface { + io.Reader + io.ByteReader +} + +// byteReaderAdapter embellishes an io.Reader with a ReadByte method, +// so that it implements the io.ByteReader interface. +type byteReaderAdapter struct { + io.Reader +} + +func (b byteReaderAdapter) ReadByte() (byte, error) { + buf := []byte{0} + _, err := io.ReadFull(b, buf) + return buf[0], err +} + +func asByteReader(r io.Reader) byteReader { + if br, ok := r.(byteReader); ok { + return br + } + return byteReaderAdapter{r} +} + +type decoder struct { + r byteReader // the real reader passed to Decode + err error +} + +func (d *decoder) readBool() (x bool) { + if d.err != nil { + return + } + var val int8 + d.err = binary.Read(d.r, binary.LittleEndian, &val) + return val == 1 +} + +func (d *decoder) readInt8() (x int8) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readInt16() (x int16) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readInt32() (x int32) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readInt64() (x int64) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readUint8() (x uint8) { + if d.err != nil { + return + } + x, d.err = d.r.ReadByte() + return +} + +func (d *decoder) readUint32() (x uint32) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readUint64() (x uint64) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readFloat32() (x float32) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readFloat64() (x float64) { + if d.err != nil { + return + } + d.err = binary.Read(d.r, binary.LittleEndian, &x) + return +} + +func (d *decoder) readUvarint() (x uint64) { + if d.err != nil { + return + } + x, d.err = binary.ReadUvarint(d.r) + return +} diff --git a/vendor/github.com/golang/geo/s2/interleave.go b/vendor/github.com/golang/geo/s2/interleave.go new file mode 100644 index 000000000..6ac6ef58d --- /dev/null +++ b/vendor/github.com/golang/geo/s2/interleave.go @@ -0,0 +1,143 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +/* +The lookup table below can convert a sequence of interleaved 8 bits into +non-interleaved 4 bits. The table can convert both odd and even bits at the +same time, and lut[x & 0x55] converts the even bits (bits 0, 2, 4 and 6), +while lut[x & 0xaa] converts the odd bits (bits 1, 3, 5 and 7). + +The lookup table below was generated using the following python code: + + def deinterleave(bits): + if bits == 0: return 0 + if bits < 4: return 1 + return deinterleave(bits / 4) * 2 + deinterleave(bits & 3) + + for i in range(256): print "0x%x," % deinterleave(i), +*/ +var deinterleaveLookup = [256]uint32{ + 0x0, 0x1, 0x1, 0x1, 0x2, 0x3, 0x3, 0x3, + 0x2, 0x3, 0x3, 0x3, 0x2, 0x3, 0x3, 0x3, + 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, + 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, + 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, + 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, + 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, + 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, + + 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, + 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + + 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, + 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + + 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, + 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, + 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, + 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, +} + +// deinterleaveUint32 decodes the interleaved values. +func deinterleaveUint32(code uint64) (uint32, uint32) { + x := (deinterleaveLookup[code&0x55]) | + (deinterleaveLookup[(code>>8)&0x55] << 4) | + (deinterleaveLookup[(code>>16)&0x55] << 8) | + (deinterleaveLookup[(code>>24)&0x55] << 12) | + (deinterleaveLookup[(code>>32)&0x55] << 16) | + (deinterleaveLookup[(code>>40)&0x55] << 20) | + (deinterleaveLookup[(code>>48)&0x55] << 24) | + (deinterleaveLookup[(code>>56)&0x55] << 28) + y := (deinterleaveLookup[code&0xaa]) | + (deinterleaveLookup[(code>>8)&0xaa] << 4) | + (deinterleaveLookup[(code>>16)&0xaa] << 8) | + (deinterleaveLookup[(code>>24)&0xaa] << 12) | + (deinterleaveLookup[(code>>32)&0xaa] << 16) | + (deinterleaveLookup[(code>>40)&0xaa] << 20) | + (deinterleaveLookup[(code>>48)&0xaa] << 24) | + (deinterleaveLookup[(code>>56)&0xaa] << 28) + return x, y +} + +var interleaveLookup = [256]uint64{ + 0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015, + 0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055, + 0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115, + 0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155, + 0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415, + 0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455, + 0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515, + 0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555, + + 0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015, + 0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055, + 0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115, + 0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155, + 0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415, + 0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455, + 0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515, + 0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555, + + 0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015, + 0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055, + 0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115, + 0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155, + 0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415, + 0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455, + 0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515, + 0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555, + + 0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015, + 0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055, + 0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115, + 0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155, + 0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415, + 0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455, + 0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515, + 0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555, +} + +// interleaveUint32 interleaves the given arguments into the return value. +// +// The 0-bit in val0 will be the 0-bit in the return value. +// The 0-bit in val1 will be the 1-bit in the return value. +// The 1-bit of val0 will be the 2-bit in the return value, and so on. +func interleaveUint32(x, y uint32) uint64 { + return (interleaveLookup[x&0xff]) | + (interleaveLookup[(x>>8)&0xff] << 16) | + (interleaveLookup[(x>>16)&0xff] << 32) | + (interleaveLookup[x>>24] << 48) | + (interleaveLookup[y&0xff] << 1) | + (interleaveLookup[(y>>8)&0xff] << 17) | + (interleaveLookup[(y>>16)&0xff] << 33) | + (interleaveLookup[y>>24] << 49) +} diff --git a/vendor/github.com/golang/geo/s2/latlng.go b/vendor/github.com/golang/geo/s2/latlng.go new file mode 100644 index 000000000..a750304ab --- /dev/null +++ b/vendor/github.com/golang/geo/s2/latlng.go @@ -0,0 +1,101 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "math" + + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +const ( + northPoleLat = s1.Angle(math.Pi/2) * s1.Radian + southPoleLat = -northPoleLat +) + +// LatLng represents a point on the unit sphere as a pair of angles. +type LatLng struct { + Lat, Lng s1.Angle +} + +// LatLngFromDegrees returns a LatLng for the coordinates given in degrees. +func LatLngFromDegrees(lat, lng float64) LatLng { + return LatLng{s1.Angle(lat) * s1.Degree, s1.Angle(lng) * s1.Degree} +} + +// IsValid returns true iff the LatLng is normalized, with Lat ∈ [-π/2,π/2] and Lng ∈ [-π,π]. +func (ll LatLng) IsValid() bool { + return math.Abs(ll.Lat.Radians()) <= math.Pi/2 && math.Abs(ll.Lng.Radians()) <= math.Pi +} + +// Normalized returns the normalized version of the LatLng, +// with Lat clamped to [-π/2,π/2] and Lng wrapped in [-π,π]. +func (ll LatLng) Normalized() LatLng { + lat := ll.Lat + if lat > northPoleLat { + lat = northPoleLat + } else if lat < southPoleLat { + lat = southPoleLat + } + lng := s1.Angle(math.Remainder(ll.Lng.Radians(), 2*math.Pi)) * s1.Radian + return LatLng{lat, lng} +} + +func (ll LatLng) String() string { return fmt.Sprintf("[%v, %v]", ll.Lat, ll.Lng) } + +// Distance returns the angle between two LatLngs. +func (ll LatLng) Distance(ll2 LatLng) s1.Angle { + // Haversine formula, as used in C++ S2LatLng::GetDistance. + lat1, lat2 := ll.Lat.Radians(), ll2.Lat.Radians() + lng1, lng2 := ll.Lng.Radians(), ll2.Lng.Radians() + dlat := math.Sin(0.5 * (lat2 - lat1)) + dlng := math.Sin(0.5 * (lng2 - lng1)) + x := dlat*dlat + dlng*dlng*math.Cos(lat1)*math.Cos(lat2) + return s1.Angle(2*math.Atan2(math.Sqrt(x), math.Sqrt(math.Max(0, 1-x)))) * s1.Radian +} + +// NOTE(mikeperrow): The C++ implementation publicly exposes latitude/longitude +// functions. Let's see if that's really necessary before exposing the same functionality. + +func latitude(p Point) s1.Angle { + return s1.Angle(math.Atan2(p.Z, math.Sqrt(p.X*p.X+p.Y*p.Y))) * s1.Radian +} + +func longitude(p Point) s1.Angle { + return s1.Angle(math.Atan2(p.Y, p.X)) * s1.Radian +} + +// PointFromLatLng returns an Point for the given LatLng. +// The maximum error in the result is 1.5 * dblEpsilon. (This does not +// include the error of converting degrees, E5, E6, or E7 into radians.) +func PointFromLatLng(ll LatLng) Point { + phi := ll.Lat.Radians() + theta := ll.Lng.Radians() + cosphi := math.Cos(phi) + return Point{r3.Vector{math.Cos(theta) * cosphi, math.Sin(theta) * cosphi, math.Sin(phi)}} +} + +// LatLngFromPoint returns an LatLng for a given Point. +func LatLngFromPoint(p Point) LatLng { + return LatLng{latitude(p), longitude(p)} +} + +// ApproxEqual reports whether the latitude and longitude of the two LatLngs +// are the same up to a small tolerance. +func (ll LatLng) ApproxEqual(other LatLng) bool { + return ll.Lat.ApproxEqual(other.Lat) && ll.Lng.ApproxEqual(other.Lng) +} diff --git a/vendor/github.com/golang/geo/s2/lexicon.go b/vendor/github.com/golang/geo/s2/lexicon.go new file mode 100644 index 000000000..41cbffdc2 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/lexicon.go @@ -0,0 +1,175 @@ +// Copyright 2020 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "encoding/binary" + "hash/adler32" + "math" + "sort" +) + +// TODO(roberts): If any of these are worth making public, change the +// method signatures and type names. + +// emptySetID represents the last ID that will ever be generated. +// (Non-negative IDs are reserved for singleton sets.) +var emptySetID = int32(math.MinInt32) + +// idSetLexicon compactly represents a set of non-negative +// integers such as array indices ("ID sets"). It is especially suitable when +// either (1) there are many duplicate sets, or (2) there are many singleton +// or empty sets. See also sequenceLexicon. +// +// Each distinct ID set is mapped to a 32-bit integer. Empty and singleton +// sets take up no additional space; the set itself is represented +// by the unique ID assigned to the set. Duplicate sets are automatically +// eliminated. Note also that ID sets are referred to using 32-bit integers +// rather than pointers. +type idSetLexicon struct { + idSets *sequenceLexicon +} + +func newIDSetLexicon() *idSetLexicon { + return &idSetLexicon{ + idSets: newSequenceLexicon(), + } +} + +// add adds the given set of integers to the lexicon if it is not already +// present, and return the unique ID for this set. The values are automatically +// sorted and duplicates are removed. +// +// The primary difference between this and sequenceLexicon are: +// 1. Empty and singleton sets are represented implicitly; they use no space. +// 2. Sets are represented rather than sequences; the ordering of values is +// not important and duplicates are removed. +// 3. The values must be 32-bit non-negative integers only. +func (l *idSetLexicon) add(ids ...int32) int32 { + // Empty sets have a special ID chosen not to conflict with other IDs. + if len(ids) == 0 { + return emptySetID + } + + // Singleton sets are represented by their element. + if len(ids) == 1 { + return ids[0] + } + + // Canonicalize the set by sorting and removing duplicates. + // + // Creates a new slice in order to not alter the supplied values. + set := uniqueInt32s(ids) + + // Non-singleton sets are represented by the bitwise complement of the ID + // returned by the sequenceLexicon + return ^l.idSets.add(set) +} + +// idSet returns the set of integers corresponding to an ID returned by add. +func (l *idSetLexicon) idSet(setID int32) []int32 { + if setID >= 0 { + return []int32{setID} + } + if setID == emptySetID { + return []int32{} + } + + return l.idSets.sequence(^setID) +} + +func (l *idSetLexicon) clear() { + l.idSets.clear() +} + +// sequenceLexicon compactly represents a sequence of values (e.g., tuples). +// It automatically eliminates duplicates slices, and maps the remaining +// sequences to sequentially increasing integer IDs. See also idSetLexicon. +// +// Each distinct sequence is mapped to a 32-bit integer. +type sequenceLexicon struct { + values []int32 + begins []uint32 + + // idSet is a mapping of a sequence hash to sequence index in the lexicon. + idSet map[uint32]int32 +} + +func newSequenceLexicon() *sequenceLexicon { + return &sequenceLexicon{ + begins: []uint32{0}, + idSet: make(map[uint32]int32), + } +} + +// clears all data from the lexicon. +func (l *sequenceLexicon) clear() { + l.values = nil + l.begins = []uint32{0} + l.idSet = make(map[uint32]int32) +} + +// add adds the given value to the lexicon if it is not already present, and +// returns its ID. IDs are assigned sequentially starting from zero. +func (l *sequenceLexicon) add(ids []int32) int32 { + if id, ok := l.idSet[hashSet(ids)]; ok { + return id + } + for _, v := range ids { + l.values = append(l.values, v) + } + l.begins = append(l.begins, uint32(len(l.values))) + + id := int32(len(l.begins)) - 2 + l.idSet[hashSet(ids)] = id + + return id +} + +// sequence returns the original sequence of values for the given ID. +func (l *sequenceLexicon) sequence(id int32) []int32 { + return l.values[l.begins[id]:l.begins[id+1]] +} + +// size reports the number of value sequences in the lexicon. +func (l *sequenceLexicon) size() int { + // Subtract one because the list of begins starts out with the first element set to 0. + return len(l.begins) - 1 +} + +// hash returns a hash of this sequence of int32s. +func hashSet(s []int32) uint32 { + // TODO(roberts): We just need a way to nicely hash all the values down to + // a 32-bit value. To ensure no unnecessary dependencies we use the core + // library types available to do this. Is there a better option? + a := adler32.New() + binary.Write(a, binary.LittleEndian, s) + return a.Sum32() +} + +// uniqueInt32s returns the sorted and uniqued set of int32s from the input. +func uniqueInt32s(in []int32) []int32 { + var vals []int32 + m := make(map[int32]bool) + for _, i := range in { + if m[i] { + continue + } + m[i] = true + vals = append(vals, i) + } + sort.Slice(vals, func(i, j int) bool { return vals[i] < vals[j] }) + return vals +} diff --git a/vendor/github.com/golang/geo/s2/loop.go b/vendor/github.com/golang/geo/s2/loop.go new file mode 100644 index 000000000..882d8583c --- /dev/null +++ b/vendor/github.com/golang/geo/s2/loop.go @@ -0,0 +1,1816 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// Loop represents a simple spherical polygon. It consists of a sequence +// of vertices where the first vertex is implicitly connected to the +// last. All loops are defined to have a CCW orientation, i.e. the interior of +// the loop is on the left side of the edges. This implies that a clockwise +// loop enclosing a small area is interpreted to be a CCW loop enclosing a +// very large area. +// +// Loops are not allowed to have any duplicate vertices (whether adjacent or +// not). Non-adjacent edges are not allowed to intersect, and furthermore edges +// of length 180 degrees are not allowed (i.e., adjacent vertices cannot be +// antipodal). Loops must have at least 3 vertices (except for the "empty" and +// "full" loops discussed below). +// +// There are two special loops: the "empty" loop contains no points and the +// "full" loop contains all points. These loops do not have any edges, but to +// preserve the invariant that every loop can be represented as a vertex +// chain, they are defined as having exactly one vertex each (see EmptyLoop +// and FullLoop). +type Loop struct { + vertices []Point + + // originInside keeps a precomputed value whether this loop contains the origin + // versus computing from the set of vertices every time. + originInside bool + + // depth is the nesting depth of this Loop if it is contained by a Polygon + // or other shape and is used to determine if this loop represents a hole + // or a filled in portion. + depth int + + // bound is a conservative bound on all points contained by this loop. + // If l.ContainsPoint(P), then l.bound.ContainsPoint(P). + bound Rect + + // Since bound is not exact, it is possible that a loop A contains + // another loop B whose bounds are slightly larger. subregionBound + // has been expanded sufficiently to account for this error, i.e. + // if A.Contains(B), then A.subregionBound.Contains(B.bound). + subregionBound Rect + + // index is the spatial index for this Loop. + index *ShapeIndex +} + +// LoopFromPoints constructs a loop from the given points. +func LoopFromPoints(pts []Point) *Loop { + l := &Loop{ + vertices: pts, + } + + l.initOriginAndBound() + return l +} + +// LoopFromCell constructs a loop corresponding to the given cell. +// +// Note that the loop and cell *do not* contain exactly the same set of +// points, because Loop and Cell have slightly different definitions of +// point containment. For example, a Cell vertex is contained by all +// four neighboring Cells, but it is contained by exactly one of four +// Loops constructed from those cells. As another example, the cell +// coverings of cell and LoopFromCell(cell) will be different, because the +// loop contains points on its boundary that actually belong to other cells +// (i.e., the covering will include a layer of neighboring cells). +func LoopFromCell(c Cell) *Loop { + l := &Loop{ + vertices: []Point{ + c.Vertex(0), + c.Vertex(1), + c.Vertex(2), + c.Vertex(3), + }, + } + + l.initOriginAndBound() + return l +} + +// These two points are used for the special Empty and Full loops. +var ( + emptyLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: 1}} + fullLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: -1}} +) + +// EmptyLoop returns a special "empty" loop. +func EmptyLoop() *Loop { + return LoopFromPoints([]Point{emptyLoopPoint}) +} + +// FullLoop returns a special "full" loop. +func FullLoop() *Loop { + return LoopFromPoints([]Point{fullLoopPoint}) +} + +// initOriginAndBound sets the origin containment for the given point and then calls +// the initialization for the bounds objects and the internal index. +func (l *Loop) initOriginAndBound() { + if len(l.vertices) < 3 { + // Check for the special "empty" and "full" loops (which have one vertex). + if !l.isEmptyOrFull() { + l.originInside = false + return + } + + // This is the special empty or full loop, so the origin depends on if + // the vertex is in the southern hemisphere or not. + l.originInside = l.vertices[0].Z < 0 + } else { + // Point containment testing is done by counting edge crossings starting + // at a fixed point on the sphere (OriginPoint). We need to know whether + // the reference point (OriginPoint) is inside or outside the loop before + // we can construct the ShapeIndex. We do this by first guessing that + // it is outside, and then seeing whether we get the correct containment + // result for vertex 1. If the result is incorrect, the origin must be + // inside the loop. + // + // A loop with consecutive vertices A,B,C contains vertex B if and only if + // the fixed vector R = B.Ortho is contained by the wedge ABC. The + // wedge is closed at A and open at C, i.e. the point B is inside the loop + // if A = R but not if C = R. This convention is required for compatibility + // with VertexCrossing. (Note that we can't use OriginPoint + // as the fixed vector because of the possibility that B == OriginPoint.) + l.originInside = false + v1Inside := OrderedCCW(Point{l.vertices[1].Ortho()}, l.vertices[0], l.vertices[2], l.vertices[1]) + if v1Inside != l.ContainsPoint(l.vertices[1]) { + l.originInside = true + } + } + + // We *must* call initBound before initializing the index, because + // initBound calls ContainsPoint which does a bounds check before using + // the index. + l.initBound() + + // Create a new index and add us to it. + l.index = NewShapeIndex() + l.index.Add(l) +} + +// initBound sets up the approximate bounding Rects for this loop. +func (l *Loop) initBound() { + // Check for the special "empty" and "full" loops. + if l.isEmptyOrFull() { + if l.IsEmpty() { + l.bound = EmptyRect() + } else { + l.bound = FullRect() + } + l.subregionBound = l.bound + return + } + + // The bounding rectangle of a loop is not necessarily the same as the + // bounding rectangle of its vertices. First, the maximal latitude may be + // attained along the interior of an edge. Second, the loop may wrap + // entirely around the sphere (e.g. a loop that defines two revolutions of a + // candy-cane stripe). Third, the loop may include one or both poles. + // Note that a small clockwise loop near the equator contains both poles. + bounder := NewRectBounder() + for i := 0; i <= len(l.vertices); i++ { // add vertex 0 twice + bounder.AddPoint(l.Vertex(i)) + } + b := bounder.RectBound() + + if l.ContainsPoint(Point{r3.Vector{0, 0, 1}}) { + b = Rect{r1.Interval{b.Lat.Lo, math.Pi / 2}, s1.FullInterval()} + } + // If a loop contains the south pole, then either it wraps entirely + // around the sphere (full longitude range), or it also contains the + // north pole in which case b.Lng.IsFull() due to the test above. + // Either way, we only need to do the south pole containment test if + // b.Lng.IsFull(). + if b.Lng.IsFull() && l.ContainsPoint(Point{r3.Vector{0, 0, -1}}) { + b.Lat.Lo = -math.Pi / 2 + } + l.bound = b + l.subregionBound = ExpandForSubregions(l.bound) +} + +// Validate checks whether this is a valid loop. +func (l *Loop) Validate() error { + if err := l.findValidationErrorNoIndex(); err != nil { + return err + } + + // Check for intersections between non-adjacent edges (including at vertices) + // TODO(roberts): Once shapeutil gets findAnyCrossing uncomment this. + // return findAnyCrossing(l.index) + + return nil +} + +// findValidationErrorNoIndex reports whether this is not a valid loop, but +// skips checks that would require a ShapeIndex to be built for the loop. This +// is primarily used by Polygon to do validation so it doesn't trigger the +// creation of unneeded ShapeIndices. +func (l *Loop) findValidationErrorNoIndex() error { + // All vertices must be unit length. + for i, v := range l.vertices { + if !v.IsUnit() { + return fmt.Errorf("vertex %d is not unit length", i) + } + } + + // Loops must have at least 3 vertices (except for empty and full). + if len(l.vertices) < 3 { + if l.isEmptyOrFull() { + return nil // Skip remaining tests. + } + return fmt.Errorf("non-empty, non-full loops must have at least 3 vertices") + } + + // Loops are not allowed to have any duplicate vertices or edge crossings. + // We split this check into two parts. First we check that no edge is + // degenerate (identical endpoints). Then we check that there are no + // intersections between non-adjacent edges (including at vertices). The + // second check needs the ShapeIndex, so it does not fall within the scope + // of this method. + for i, v := range l.vertices { + if v == l.Vertex(i+1) { + return fmt.Errorf("edge %d is degenerate (duplicate vertex)", i) + } + + // Antipodal vertices are not allowed. + if other := (Point{l.Vertex(i + 1).Mul(-1)}); v == other { + return fmt.Errorf("vertices %d and %d are antipodal", i, + (i+1)%len(l.vertices)) + } + } + + return nil +} + +// Contains reports whether the region contained by this loop is a superset of the +// region contained by the given other loop. +func (l *Loop) Contains(o *Loop) bool { + // For a loop A to contain the loop B, all of the following must + // be true: + // + // (1) There are no edge crossings between A and B except at vertices. + // + // (2) At every vertex that is shared between A and B, the local edge + // ordering implies that A contains B. + // + // (3) If there are no shared vertices, then A must contain a vertex of B + // and B must not contain a vertex of A. (An arbitrary vertex may be + // chosen in each case.) + // + // The second part of (3) is necessary to detect the case of two loops whose + // union is the entire sphere, i.e. two loops that contains each other's + // boundaries but not each other's interiors. + if !l.subregionBound.Contains(o.bound) { + return false + } + + // Special cases to handle either loop being empty or full. + if l.isEmptyOrFull() || o.isEmptyOrFull() { + return l.IsFull() || o.IsEmpty() + } + + // Check whether there are any edge crossings, and also check the loop + // relationship at any shared vertices. + relation := &containsRelation{} + if hasCrossingRelation(l, o, relation) { + return false + } + + // There are no crossings, and if there are any shared vertices then A + // contains B locally at each shared vertex. + if relation.foundSharedVertex { + return true + } + + // Since there are no edge intersections or shared vertices, we just need to + // test condition (3) above. We can skip this test if we discovered that A + // contains at least one point of B while checking for edge crossings. + if !l.ContainsPoint(o.Vertex(0)) { + return false + } + + // We still need to check whether (A union B) is the entire sphere. + // Normally this check is very cheap due to the bounding box precondition. + if (o.subregionBound.Contains(l.bound) || o.bound.Union(l.bound).IsFull()) && + o.ContainsPoint(l.Vertex(0)) { + return false + } + return true +} + +// Intersects reports whether the region contained by this loop intersects the region +// contained by the other loop. +func (l *Loop) Intersects(o *Loop) bool { + // Given two loops, A and B, A.Intersects(B) if and only if !A.Complement().Contains(B). + // + // This code is similar to Contains, but is optimized for the case + // where both loops enclose less than half of the sphere. + if !l.bound.Intersects(o.bound) { + return false + } + + // Check whether there are any edge crossings, and also check the loop + // relationship at any shared vertices. + relation := &intersectsRelation{} + if hasCrossingRelation(l, o, relation) { + return true + } + if relation.foundSharedVertex { + return false + } + + // Since there are no edge intersections or shared vertices, the loops + // intersect only if A contains B, B contains A, or the two loops contain + // each other's boundaries. These checks are usually cheap because of the + // bounding box preconditions. Note that neither loop is empty (because of + // the bounding box check above), so it is safe to access vertex(0). + + // Check whether A contains B, or A and B contain each other's boundaries. + // (Note that A contains all the vertices of B in either case.) + if l.subregionBound.Contains(o.bound) || l.bound.Union(o.bound).IsFull() { + if l.ContainsPoint(o.Vertex(0)) { + return true + } + } + // Check whether B contains A. + if o.subregionBound.Contains(l.bound) { + if o.ContainsPoint(l.Vertex(0)) { + return true + } + } + return false +} + +// Equal reports whether two loops have the same vertices in the same linear order +// (i.e., cyclic rotations are not allowed). +func (l *Loop) Equal(other *Loop) bool { + if len(l.vertices) != len(other.vertices) { + return false + } + + for i, v := range l.vertices { + if v != other.Vertex(i) { + return false + } + } + return true +} + +// BoundaryEqual reports whether the two loops have the same boundary. This is +// true if and only if the loops have the same vertices in the same cyclic order +// (i.e., the vertices may be cyclically rotated). The empty and full loops are +// considered to have different boundaries. +func (l *Loop) BoundaryEqual(o *Loop) bool { + if len(l.vertices) != len(o.vertices) { + return false + } + + // Special case to handle empty or full loops. Since they have the same + // number of vertices, if one loop is empty/full then so is the other. + if l.isEmptyOrFull() { + return l.IsEmpty() == o.IsEmpty() + } + + // Loop through the vertices to find the first of ours that matches the + // starting vertex of the other loop. Use that offset to then 'align' the + // vertices for comparison. + for offset, vertex := range l.vertices { + if vertex == o.Vertex(0) { + // There is at most one starting offset since loop vertices are unique. + for i := 0; i < len(l.vertices); i++ { + if l.Vertex(i+offset) != o.Vertex(i) { + return false + } + } + return true + } + } + return false +} + +// compareBoundary returns +1 if this loop contains the boundary of the other loop, +// -1 if it excludes the boundary of the other, and 0 if the boundaries of the two +// loops cross. Shared edges are handled as follows: +// +// If XY is a shared edge, define Reversed(XY) to be true if XY +// appears in opposite directions in both loops. +// Then this loop contains XY if and only if Reversed(XY) == the other loop is a hole. +// (Intuitively, this checks whether this loop contains a vanishingly small region +// extending from the boundary of the other toward the interior of the polygon to +// which the other belongs.) +// +// This function is used for testing containment and intersection of +// multi-loop polygons. Note that this method is not symmetric, since the +// result depends on the direction of this loop but not on the direction of +// the other loop (in the absence of shared edges). +// +// This requires that neither loop is empty, and if other loop IsFull, then it must not +// be a hole. +func (l *Loop) compareBoundary(o *Loop) int { + // The bounds must intersect for containment or crossing. + if !l.bound.Intersects(o.bound) { + return -1 + } + + // Full loops are handled as though the loop surrounded the entire sphere. + if l.IsFull() { + return 1 + } + if o.IsFull() { + return -1 + } + + // Check whether there are any edge crossings, and also check the loop + // relationship at any shared vertices. + relation := newCompareBoundaryRelation(o.IsHole()) + if hasCrossingRelation(l, o, relation) { + return 0 + } + if relation.foundSharedVertex { + if relation.containsEdge { + return 1 + } + return -1 + } + + // There are no edge intersections or shared vertices, so we can check + // whether A contains an arbitrary vertex of B. + if l.ContainsPoint(o.Vertex(0)) { + return 1 + } + return -1 +} + +// ContainsOrigin reports true if this loop contains s2.OriginPoint(). +func (l *Loop) ContainsOrigin() bool { + return l.originInside +} + +// ReferencePoint returns the reference point for this loop. +func (l *Loop) ReferencePoint() ReferencePoint { + return OriginReferencePoint(l.originInside) +} + +// NumEdges returns the number of edges in this shape. +func (l *Loop) NumEdges() int { + if l.isEmptyOrFull() { + return 0 + } + return len(l.vertices) +} + +// Edge returns the endpoints for the given edge index. +func (l *Loop) Edge(i int) Edge { + return Edge{l.Vertex(i), l.Vertex(i + 1)} +} + +// NumChains reports the number of contiguous edge chains in the Loop. +func (l *Loop) NumChains() int { + if l.IsEmpty() { + return 0 + } + return 1 +} + +// Chain returns the i-th edge chain in the Shape. +func (l *Loop) Chain(chainID int) Chain { + return Chain{0, l.NumEdges()} +} + +// ChainEdge returns the j-th edge of the i-th edge chain. +func (l *Loop) ChainEdge(chainID, offset int) Edge { + return Edge{l.Vertex(offset), l.Vertex(offset + 1)} +} + +// ChainPosition returns a ChainPosition pair (i, j) such that edgeID is the +// j-th edge of the Loop. +func (l *Loop) ChainPosition(edgeID int) ChainPosition { + return ChainPosition{0, edgeID} +} + +// Dimension returns the dimension of the geometry represented by this Loop. +func (l *Loop) Dimension() int { return 2 } + +func (l *Loop) typeTag() typeTag { return typeTagNone } + +func (l *Loop) privateInterface() {} + +// IsEmpty reports true if this is the special empty loop that contains no points. +func (l *Loop) IsEmpty() bool { + return l.isEmptyOrFull() && !l.ContainsOrigin() +} + +// IsFull reports true if this is the special full loop that contains all points. +func (l *Loop) IsFull() bool { + return l.isEmptyOrFull() && l.ContainsOrigin() +} + +// isEmptyOrFull reports true if this loop is either the "empty" or "full" special loops. +func (l *Loop) isEmptyOrFull() bool { + return len(l.vertices) == 1 +} + +// Vertices returns the vertices in the loop. +func (l *Loop) Vertices() []Point { + return l.vertices +} + +// RectBound returns a tight bounding rectangle. If the loop contains the point, +// the bound also contains it. +func (l *Loop) RectBound() Rect { + return l.bound +} + +// CapBound returns a bounding cap that may have more padding than the corresponding +// RectBound. The bound is conservative such that if the loop contains a point P, +// the bound also contains it. +func (l *Loop) CapBound() Cap { + return l.bound.CapBound() +} + +// Vertex returns the vertex for the given index. For convenience, the vertex indices +// wrap automatically for methods that do index math such as Edge. +// i.e., Vertex(NumEdges() + n) is the same as Vertex(n). +func (l *Loop) Vertex(i int) Point { + return l.vertices[i%len(l.vertices)] +} + +// OrientedVertex returns the vertex in reverse order if the loop represents a polygon +// hole. For example, arguments 0, 1, 2 are mapped to vertices n-1, n-2, n-3, where +// n == len(vertices). This ensures that the interior of the polygon is always to +// the left of the vertex chain. +// +// This requires: 0 <= i < 2 * len(vertices) +func (l *Loop) OrientedVertex(i int) Point { + j := i - len(l.vertices) + if j < 0 { + j = i + } + if l.IsHole() { + j = len(l.vertices) - 1 - j + } + return l.Vertex(j) +} + +// NumVertices returns the number of vertices in this loop. +func (l *Loop) NumVertices() int { + return len(l.vertices) +} + +// bruteForceContainsPoint reports if the given point is contained by this loop. +// This method does not use the ShapeIndex, so it is only preferable below a certain +// size of loop. +func (l *Loop) bruteForceContainsPoint(p Point) bool { + origin := OriginPoint() + inside := l.originInside + crosser := NewChainEdgeCrosser(origin, p, l.Vertex(0)) + for i := 1; i <= len(l.vertices); i++ { // add vertex 0 twice + inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(i)) + } + return inside +} + +// ContainsPoint returns true if the loop contains the point. +func (l *Loop) ContainsPoint(p Point) bool { + // Empty and full loops don't need a special case, but invalid loops with + // zero vertices do, so we might as well handle them all at once. + if len(l.vertices) < 3 { + return l.originInside + } + + // For small loops, and during initial construction, it is faster to just + // check all the crossing. + const maxBruteForceVertices = 32 + if len(l.vertices) < maxBruteForceVertices || l.index == nil { + return l.bruteForceContainsPoint(p) + } + + // Otherwise, look up the point in the index. + it := l.index.Iterator() + if !it.LocatePoint(p) { + return false + } + return l.iteratorContainsPoint(it, p) +} + +// ContainsCell reports whether the given Cell is contained by this Loop. +func (l *Loop) ContainsCell(target Cell) bool { + it := l.index.Iterator() + relation := it.LocateCellID(target.ID()) + + // If "target" is disjoint from all index cells, it is not contained. + // Similarly, if "target" is subdivided into one or more index cells then it + // is not contained, since index cells are subdivided only if they (nearly) + // intersect a sufficient number of edges. (But note that if "target" itself + // is an index cell then it may be contained, since it could be a cell with + // no edges in the loop interior.) + if relation != Indexed { + return false + } + + // Otherwise check if any edges intersect "target". + if l.boundaryApproxIntersects(it, target) { + return false + } + + // Otherwise check if the loop contains the center of "target". + return l.iteratorContainsPoint(it, target.Center()) +} + +// IntersectsCell reports whether this Loop intersects the given cell. +func (l *Loop) IntersectsCell(target Cell) bool { + it := l.index.Iterator() + relation := it.LocateCellID(target.ID()) + + // If target does not overlap any index cell, there is no intersection. + if relation == Disjoint { + return false + } + // If target is subdivided into one or more index cells, there is an + // intersection to within the ShapeIndex error bound (see Contains). + if relation == Subdivided { + return true + } + // If target is an index cell, there is an intersection because index cells + // are created only if they have at least one edge or they are entirely + // contained by the loop. + if it.CellID() == target.id { + return true + } + // Otherwise check if any edges intersect target. + if l.boundaryApproxIntersects(it, target) { + return true + } + // Otherwise check if the loop contains the center of target. + return l.iteratorContainsPoint(it, target.Center()) +} + +// CellUnionBound computes a covering of the Loop. +func (l *Loop) CellUnionBound() []CellID { + return l.CapBound().CellUnionBound() +} + +// boundaryApproxIntersects reports if the loop's boundary intersects target. +// It may also return true when the loop boundary does not intersect target but +// some edge comes within the worst-case error tolerance. +// +// This requires that it.Locate(target) returned Indexed. +func (l *Loop) boundaryApproxIntersects(it *ShapeIndexIterator, target Cell) bool { + aClipped := it.IndexCell().findByShapeID(0) + + // If there are no edges, there is no intersection. + if len(aClipped.edges) == 0 { + return false + } + + // We can save some work if target is the index cell itself. + if it.CellID() == target.ID() { + return true + } + + // Otherwise check whether any of the edges intersect target. + maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist) + bound := target.BoundUV().ExpandedByMargin(maxError) + for _, ai := range aClipped.edges { + v0, v1, ok := ClipToPaddedFace(l.Vertex(ai), l.Vertex(ai+1), target.Face(), maxError) + if ok && edgeIntersectsRect(v0, v1, bound) { + return true + } + } + return false +} + +// iteratorContainsPoint reports if the iterator that is positioned at the ShapeIndexCell +// that may contain p, contains the point p. +func (l *Loop) iteratorContainsPoint(it *ShapeIndexIterator, p Point) bool { + // Test containment by drawing a line segment from the cell center to the + // given point and counting edge crossings. + aClipped := it.IndexCell().findByShapeID(0) + inside := aClipped.containsCenter + if len(aClipped.edges) > 0 { + center := it.Center() + crosser := NewEdgeCrosser(center, p) + aiPrev := -2 + for _, ai := range aClipped.edges { + if ai != aiPrev+1 { + crosser.RestartAt(l.Vertex(ai)) + } + aiPrev = ai + inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(ai+1)) + } + } + return inside +} + +// RegularLoop creates a loop with the given number of vertices, all +// located on a circle of the specified radius around the given center. +func RegularLoop(center Point, radius s1.Angle, numVertices int) *Loop { + return RegularLoopForFrame(getFrame(center), radius, numVertices) +} + +// RegularLoopForFrame creates a loop centered around the z-axis of the given +// coordinate frame, with the first vertex in the direction of the positive x-axis. +func RegularLoopForFrame(frame matrix3x3, radius s1.Angle, numVertices int) *Loop { + return LoopFromPoints(regularPointsForFrame(frame, radius, numVertices)) +} + +// CanonicalFirstVertex returns a first index and a direction (either +1 or -1) +// such that the vertex sequence (first, first+dir, ..., first+(n-1)*dir) does +// not change when the loop vertex order is rotated or inverted. This allows the +// loop vertices to be traversed in a canonical order. The return values are +// chosen such that (first, ..., first+n*dir) are in the range [0, 2*n-1] as +// expected by the Vertex method. +func (l *Loop) CanonicalFirstVertex() (firstIdx, direction int) { + firstIdx = 0 + n := len(l.vertices) + for i := 1; i < n; i++ { + if l.Vertex(i).Cmp(l.Vertex(firstIdx).Vector) == -1 { + firstIdx = i + } + } + + // 0 <= firstIdx <= n-1, so (firstIdx+n*dir) <= 2*n-1. + if l.Vertex(firstIdx+1).Cmp(l.Vertex(firstIdx+n-1).Vector) == -1 { + return firstIdx, 1 + } + + // n <= firstIdx <= 2*n-1, so (firstIdx+n*dir) >= 0. + firstIdx += n + return firstIdx, -1 +} + +// TurningAngle returns the sum of the turning angles at each vertex. The return +// value is positive if the loop is counter-clockwise, negative if the loop is +// clockwise, and zero if the loop is a great circle. Degenerate and +// nearly-degenerate loops are handled consistently with Sign. So for example, +// if a loop has zero area (i.e., it is a very small CCW loop) then the turning +// angle will always be negative. +// +// This quantity is also called the "geodesic curvature" of the loop. +func (l *Loop) TurningAngle() float64 { + // For empty and full loops, we return the limit value as the loop area + // approaches 0 or 4*Pi respectively. + if l.isEmptyOrFull() { + if l.ContainsOrigin() { + return -2 * math.Pi + } + return 2 * math.Pi + } + + // Don't crash even if the loop is not well-defined. + if len(l.vertices) < 3 { + return 0 + } + + // To ensure that we get the same result when the vertex order is rotated, + // and that the result is negated when the vertex order is reversed, we need + // to add up the individual turn angles in a consistent order. (In general, + // adding up a set of numbers in a different order can change the sum due to + // rounding errors.) + // + // Furthermore, if we just accumulate an ordinary sum then the worst-case + // error is quadratic in the number of vertices. (This can happen with + // spiral shapes, where the partial sum of the turning angles can be linear + // in the number of vertices.) To avoid this we use the Kahan summation + // algorithm (http://en.wikipedia.org/wiki/Kahan_summation_algorithm). + n := len(l.vertices) + i, dir := l.CanonicalFirstVertex() + sum := TurnAngle(l.Vertex((i+n-dir)%n), l.Vertex(i), l.Vertex((i+dir)%n)) + + compensation := s1.Angle(0) + for n-1 > 0 { + i += dir + angle := TurnAngle(l.Vertex(i-dir), l.Vertex(i), l.Vertex(i+dir)) + oldSum := sum + angle += compensation + sum += angle + compensation = (oldSum - sum) + angle + n-- + } + return float64(dir) * float64(sum+compensation) +} + +// turningAngleMaxError return the maximum error in TurningAngle. The value is not +// constant; it depends on the loop. +func (l *Loop) turningAngleMaxError() float64 { + // The maximum error can be bounded as follows: + // 2.24 * dblEpsilon for RobustCrossProd(b, a) + // 2.24 * dblEpsilon for RobustCrossProd(c, b) + // 3.25 * dblEpsilon for Angle() + // 2.00 * dblEpsilon for each addition in the Kahan summation + // ------------------ + // 9.73 * dblEpsilon + maxErrorPerVertex := 9.73 * dblEpsilon + return maxErrorPerVertex * float64(len(l.vertices)) +} + +// IsHole reports whether this loop represents a hole in its containing polygon. +func (l *Loop) IsHole() bool { return l.depth&1 != 0 } + +// Sign returns -1 if this Loop represents a hole in its containing polygon, and +1 otherwise. +func (l *Loop) Sign() int { + if l.IsHole() { + return -1 + } + return 1 +} + +// IsNormalized reports whether the loop area is at most 2*pi. Degenerate loops are +// handled consistently with Sign, i.e., if a loop can be +// expressed as the union of degenerate or nearly-degenerate CCW triangles, +// then it will always be considered normalized. +func (l *Loop) IsNormalized() bool { + // Optimization: if the longitude span is less than 180 degrees, then the + // loop covers less than half the sphere and is therefore normalized. + if l.bound.Lng.Length() < math.Pi { + return true + } + + // We allow some error so that hemispheres are always considered normalized. + // TODO(roberts): This is no longer required by the Polygon implementation, + // so alternatively we could create the invariant that a loop is normalized + // if and only if its complement is not normalized. + return l.TurningAngle() >= -l.turningAngleMaxError() +} + +// Normalize inverts the loop if necessary so that the area enclosed by the loop +// is at most 2*pi. +func (l *Loop) Normalize() { + if !l.IsNormalized() { + l.Invert() + } +} + +// Invert reverses the order of the loop vertices, effectively complementing the +// region represented by the loop. For example, the loop ABCD (with edges +// AB, BC, CD, DA) becomes the loop DCBA (with edges DC, CB, BA, AD). +// Notice that the last edge is the same in both cases except that its +// direction has been reversed. +func (l *Loop) Invert() { + l.index.Reset() + if l.isEmptyOrFull() { + if l.IsFull() { + l.vertices[0] = emptyLoopPoint + } else { + l.vertices[0] = fullLoopPoint + } + } else { + // For non-special loops, reverse the slice of vertices. + for i := len(l.vertices)/2 - 1; i >= 0; i-- { + opp := len(l.vertices) - 1 - i + l.vertices[i], l.vertices[opp] = l.vertices[opp], l.vertices[i] + } + } + + // originInside must be set correctly before building the ShapeIndex. + l.originInside = !l.originInside + if l.bound.Lat.Lo > -math.Pi/2 && l.bound.Lat.Hi < math.Pi/2 { + // The complement of this loop contains both poles. + l.bound = FullRect() + l.subregionBound = l.bound + } else { + l.initBound() + } + l.index.Add(l) +} + +// findVertex returns the index of the vertex at the given Point in the range +// 1..numVertices, and a boolean indicating if a vertex was found. +func (l *Loop) findVertex(p Point) (index int, ok bool) { + const notFound = 0 + if len(l.vertices) < 10 { + // Exhaustive search for loops below a small threshold. + for i := 1; i <= len(l.vertices); i++ { + if l.Vertex(i) == p { + return i, true + } + } + return notFound, false + } + + it := l.index.Iterator() + if !it.LocatePoint(p) { + return notFound, false + } + + aClipped := it.IndexCell().findByShapeID(0) + for i := aClipped.numEdges() - 1; i >= 0; i-- { + ai := aClipped.edges[i] + if l.Vertex(ai) == p { + if ai == 0 { + return len(l.vertices), true + } + return ai, true + } + + if l.Vertex(ai+1) == p { + return ai + 1, true + } + } + return notFound, false +} + +// ContainsNested reports whether the given loops is contained within this loop. +// This function does not test for edge intersections. The two loops must meet +// all of the Polygon requirements; for example this implies that their +// boundaries may not cross or have any shared edges (although they may have +// shared vertices). +func (l *Loop) ContainsNested(other *Loop) bool { + if !l.subregionBound.Contains(other.bound) { + return false + } + + // Special cases to handle either loop being empty or full. Also bail out + // when B has no vertices to avoid heap overflow on the vertex(1) call + // below. (This method is called during polygon initialization before the + // client has an opportunity to call IsValid().) + if l.isEmptyOrFull() || other.NumVertices() < 2 { + return l.IsFull() || other.IsEmpty() + } + + // We are given that A and B do not share any edges, and that either one + // loop contains the other or they do not intersect. + m, ok := l.findVertex(other.Vertex(1)) + if !ok { + // Since other.vertex(1) is not shared, we can check whether A contains it. + return l.ContainsPoint(other.Vertex(1)) + } + + // Check whether the edge order around other.Vertex(1) is compatible with + // A containing B. + return WedgeContains(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), other.Vertex(0), other.Vertex(2)) +} + +// surfaceIntegralFloat64 computes the oriented surface integral of some quantity f(x) +// over the loop interior, given a function f(A,B,C) that returns the +// corresponding integral over the spherical triangle ABC. Here "oriented +// surface integral" means: +// +// (1) f(A,B,C) must be the integral of f if ABC is counterclockwise, +// and the integral of -f if ABC is clockwise. +// +// (2) The result of this function is *either* the integral of f over the +// loop interior, or the integral of (-f) over the loop exterior. +// +// Note that there are at least two common situations where it easy to work +// around property (2) above: +// +// - If the integral of f over the entire sphere is zero, then it doesn't +// matter which case is returned because they are always equal. +// +// - If f is non-negative, then it is easy to detect when the integral over +// the loop exterior has been returned, and the integral over the loop +// interior can be obtained by adding the integral of f over the entire +// unit sphere (a constant) to the result. +// +// Any changes to this method may need corresponding changes to surfaceIntegralPoint as well. +func (l *Loop) surfaceIntegralFloat64(f func(a, b, c Point) float64) float64 { + // We sum f over a collection T of oriented triangles, possibly + // overlapping. Let the sign of a triangle be +1 if it is CCW and -1 + // otherwise, and let the sign of a point x be the sum of the signs of the + // triangles containing x. Then the collection of triangles T is chosen + // such that either: + // + // (1) Each point in the loop interior has sign +1, and sign 0 otherwise; or + // (2) Each point in the loop exterior has sign -1, and sign 0 otherwise. + // + // The triangles basically consist of a fan from vertex 0 to every loop + // edge that does not include vertex 0. These triangles will always satisfy + // either (1) or (2). However, what makes this a bit tricky is that + // spherical edges become numerically unstable as their length approaches + // 180 degrees. Of course there is not much we can do if the loop itself + // contains such edges, but we would like to make sure that all the triangle + // edges under our control (i.e., the non-loop edges) are stable. For + // example, consider a loop around the equator consisting of four equally + // spaced points. This is a well-defined loop, but we cannot just split it + // into two triangles by connecting vertex 0 to vertex 2. + // + // We handle this type of situation by moving the origin of the triangle fan + // whenever we are about to create an unstable edge. We choose a new + // location for the origin such that all relevant edges are stable. We also + // create extra triangles with the appropriate orientation so that the sum + // of the triangle signs is still correct at every point. + + // The maximum length of an edge for it to be considered numerically stable. + // The exact value is fairly arbitrary since it depends on the stability of + // the function f. The value below is quite conservative but could be + // reduced further if desired. + const maxLength = math.Pi - 1e-5 + + var sum float64 + origin := l.Vertex(0) + for i := 1; i+1 < len(l.vertices); i++ { + // Let V_i be vertex(i), let O be the current origin, and let length(A,B) + // be the length of edge (A,B). At the start of each loop iteration, the + // "leading edge" of the triangle fan is (O,V_i), and we want to extend + // the triangle fan so that the leading edge is (O,V_i+1). + // + // Invariants: + // 1. length(O,V_i) < maxLength for all (i > 1). + // 2. Either O == V_0, or O is approximately perpendicular to V_0. + // 3. "sum" is the oriented integral of f over the area defined by + // (O, V_0, V_1, ..., V_i). + if l.Vertex(i+1).Angle(origin.Vector) > maxLength { + // We are about to create an unstable edge, so choose a new origin O' + // for the triangle fan. + oldOrigin := origin + if origin == l.Vertex(0) { + // The following point is well-separated from V_i and V_0 (and + // therefore V_i+1 as well). + origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()} + } else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength { + // All edges of the triangle (O, V_0, V_i) are stable, so we can + // revert to using V_0 as the origin. + origin = l.Vertex(0) + } else { + // (O, V_i+1) and (V_0, V_i) are antipodal pairs, and O and V_0 are + // perpendicular. Therefore V_0.CrossProd(O) is approximately + // perpendicular to all of {O, V_0, V_i, V_i+1}, and we can choose + // this point O' as the new origin. + origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)} + + // Advance the edge (V_0,O) to (V_0,O'). + sum += f(l.Vertex(0), oldOrigin, origin) + } + // Advance the edge (O,V_i) to (O',V_i). + sum += f(oldOrigin, l.Vertex(i), origin) + } + // Advance the edge (O,V_i) to (O,V_i+1). + sum += f(origin, l.Vertex(i), l.Vertex(i+1)) + } + // If the origin is not V_0, we need to sum one more triangle. + if origin != l.Vertex(0) { + // Advance the edge (O,V_n-1) to (O,V_0). + sum += f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)) + } + return sum +} + +// surfaceIntegralPoint mirrors the surfaceIntegralFloat64 method but over Points; +// see that method for commentary. The C++ version uses a templated method. +// Any changes to this method may need corresponding changes to surfaceIntegralFloat64 as well. +func (l *Loop) surfaceIntegralPoint(f func(a, b, c Point) Point) Point { + const maxLength = math.Pi - 1e-5 + var sum r3.Vector + + origin := l.Vertex(0) + for i := 1; i+1 < len(l.vertices); i++ { + if l.Vertex(i+1).Angle(origin.Vector) > maxLength { + oldOrigin := origin + if origin == l.Vertex(0) { + origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()} + } else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength { + origin = l.Vertex(0) + } else { + origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)} + sum = sum.Add(f(l.Vertex(0), oldOrigin, origin).Vector) + } + sum = sum.Add(f(oldOrigin, l.Vertex(i), origin).Vector) + } + sum = sum.Add(f(origin, l.Vertex(i), l.Vertex(i+1)).Vector) + } + if origin != l.Vertex(0) { + sum = sum.Add(f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)).Vector) + } + return Point{sum} +} + +// Area returns the area of the loop interior, i.e. the region on the left side of +// the loop. The return value is between 0 and 4*pi. (Note that the return +// value is not affected by whether this loop is a "hole" or a "shell".) +func (l *Loop) Area() float64 { + // It is surprisingly difficult to compute the area of a loop robustly. The + // main issues are (1) whether degenerate loops are considered to be CCW or + // not (i.e., whether their area is close to 0 or 4*pi), and (2) computing + // the areas of small loops with good relative accuracy. + // + // With respect to degeneracies, we would like Area to be consistent + // with ContainsPoint in that loops that contain many points + // should have large areas, and loops that contain few points should have + // small areas. For example, if a degenerate triangle is considered CCW + // according to s2predicates Sign, then it will contain very few points and + // its area should be approximately zero. On the other hand if it is + // considered clockwise, then it will contain virtually all points and so + // its area should be approximately 4*pi. + // + // More precisely, let U be the set of Points for which IsUnitLength + // is true, let P(U) be the projection of those points onto the mathematical + // unit sphere, and let V(P(U)) be the Voronoi diagram of the projected + // points. Then for every loop x, we would like Area to approximately + // equal the sum of the areas of the Voronoi regions of the points p for + // which x.ContainsPoint(p) is true. + // + // The second issue is that we want to compute the area of small loops + // accurately. This requires having good relative precision rather than + // good absolute precision. For example, if the area of a loop is 1e-12 and + // the error is 1e-15, then the area only has 3 digits of accuracy. (For + // reference, 1e-12 is about 40 square meters on the surface of the earth.) + // We would like to have good relative accuracy even for small loops. + // + // To achieve these goals, we combine two different methods of computing the + // area. This first method is based on the Gauss-Bonnet theorem, which says + // that the area enclosed by the loop equals 2*pi minus the total geodesic + // curvature of the loop (i.e., the sum of the "turning angles" at all the + // loop vertices). The big advantage of this method is that as long as we + // use Sign to compute the turning angle at each vertex, then + // degeneracies are always handled correctly. In other words, if a + // degenerate loop is CCW according to the symbolic perturbations used by + // Sign, then its turning angle will be approximately 2*pi. + // + // The disadvantage of the Gauss-Bonnet method is that its absolute error is + // about 2e-15 times the number of vertices (see turningAngleMaxError). + // So, it cannot compute the area of small loops accurately. + // + // The second method is based on splitting the loop into triangles and + // summing the area of each triangle. To avoid the difficulty and expense + // of decomposing the loop into a union of non-overlapping triangles, + // instead we compute a signed sum over triangles that may overlap (see the + // comments for surfaceIntegral). The advantage of this method + // is that the area of each triangle can be computed with much better + // relative accuracy (using l'Huilier's theorem). The disadvantage is that + // the result is a signed area: CCW loops may yield a small positive value, + // while CW loops may yield a small negative value (which is converted to a + // positive area by adding 4*pi). This means that small errors in computing + // the signed area may translate into a very large error in the result (if + // the sign of the sum is incorrect). + // + // So, our strategy is to combine these two methods as follows. First we + // compute the area using the "signed sum over triangles" approach (since it + // is generally more accurate). We also estimate the maximum error in this + // result. If the signed area is too close to zero (i.e., zero is within + // the error bounds), then we double-check the sign of the result using the + // Gauss-Bonnet method. (In fact we just call IsNormalized, which is + // based on this method.) If the two methods disagree, we return either 0 + // or 4*pi based on the result of IsNormalized. Otherwise we return the + // area that we computed originally. + if l.isEmptyOrFull() { + if l.ContainsOrigin() { + return 4 * math.Pi + } + return 0 + } + area := l.surfaceIntegralFloat64(SignedArea) + + // TODO(roberts): This error estimate is very approximate. There are two + // issues: (1) SignedArea needs some improvements to ensure that its error + // is actually never higher than GirardArea, and (2) although the number of + // triangles in the sum is typically N-2, in theory it could be as high as + // 2*N for pathological inputs. But in other respects this error bound is + // very conservative since it assumes that the maximum error is achieved on + // every triangle. + maxError := l.turningAngleMaxError() + + // The signed area should be between approximately -4*pi and 4*pi. + if area < 0 { + // We have computed the negative of the area of the loop exterior. + area += 4 * math.Pi + } + + if area > 4*math.Pi { + area = 4 * math.Pi + } + if area < 0 { + area = 0 + } + + // If the area is close enough to zero or 4*pi so that the loop orientation + // is ambiguous, then we compute the loop orientation explicitly. + if area < maxError && !l.IsNormalized() { + return 4 * math.Pi + } else if area > (4*math.Pi-maxError) && l.IsNormalized() { + return 0 + } + + return area +} + +// Centroid returns the true centroid of the loop multiplied by the area of the +// loop. The result is not unit length, so you may want to normalize it. Also +// note that in general, the centroid may not be contained by the loop. +// +// We prescale by the loop area for two reasons: (1) it is cheaper to +// compute this way, and (2) it makes it easier to compute the centroid of +// more complicated shapes (by splitting them into disjoint regions and +// adding their centroids). +// +// Note that the return value is not affected by whether this loop is a +// "hole" or a "shell". +func (l *Loop) Centroid() Point { + // surfaceIntegralPoint() returns either the integral of position over loop + // interior, or the negative of the integral of position over the loop + // exterior. But these two values are the same (!), because the integral of + // position over the entire sphere is (0, 0, 0). + return l.surfaceIntegralPoint(TrueCentroid) +} + +// Encode encodes the Loop. +func (l Loop) Encode(w io.Writer) error { + e := &encoder{w: w} + l.encode(e) + return e.err +} + +func (l Loop) encode(e *encoder) { + e.writeInt8(encodingVersion) + e.writeUint32(uint32(len(l.vertices))) + for _, v := range l.vertices { + e.writeFloat64(v.X) + e.writeFloat64(v.Y) + e.writeFloat64(v.Z) + } + + e.writeBool(l.originInside) + e.writeInt32(int32(l.depth)) + + // Encode the bound. + l.bound.encode(e) +} + +// Decode decodes a loop. +func (l *Loop) Decode(r io.Reader) error { + *l = Loop{} + d := &decoder{r: asByteReader(r)} + l.decode(d) + return d.err +} + +func (l *Loop) decode(d *decoder) { + version := int8(d.readUint8()) + if d.err != nil { + return + } + if version != encodingVersion { + d.err = fmt.Errorf("cannot decode version %d", version) + return + } + + // Empty loops are explicitly allowed here: a newly created loop has zero vertices + // and such loops encode and decode properly. + nvertices := d.readUint32() + if nvertices > maxEncodedVertices { + if d.err == nil { + d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices) + + } + return + } + l.vertices = make([]Point, nvertices) + for i := range l.vertices { + l.vertices[i].X = d.readFloat64() + l.vertices[i].Y = d.readFloat64() + l.vertices[i].Z = d.readFloat64() + } + l.originInside = d.readBool() + l.depth = int(d.readUint32()) + l.bound.decode(d) + l.subregionBound = ExpandForSubregions(l.bound) + + l.index = NewShapeIndex() + l.index.Add(l) +} + +// Bitmasks to read from properties. +const ( + originInside = 1 << iota + boundEncoded +) + +func (l *Loop) xyzFaceSiTiVertices() []xyzFaceSiTi { + ret := make([]xyzFaceSiTi, len(l.vertices)) + for i, v := range l.vertices { + ret[i].xyz = v + ret[i].face, ret[i].si, ret[i].ti, ret[i].level = xyzToFaceSiTi(v) + } + return ret +} + +func (l *Loop) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) { + if len(l.vertices) != len(vertices) { + panic("encodeCompressed: vertices must be the same length as l.vertices") + } + if len(vertices) > maxEncodedVertices { + if e.err == nil { + e.err = fmt.Errorf("too many vertices (%d; max is %d)", len(vertices), maxEncodedVertices) + } + return + } + e.writeUvarint(uint64(len(vertices))) + encodePointsCompressed(e, vertices, snapLevel) + + props := l.compressedEncodingProperties() + e.writeUvarint(props) + e.writeUvarint(uint64(l.depth)) + if props&boundEncoded != 0 { + l.bound.encode(e) + } +} + +func (l *Loop) compressedEncodingProperties() uint64 { + var properties uint64 + if l.originInside { + properties |= originInside + } + + // Write whether there is a bound so we can change the threshold later. + // Recomputing the bound multiplies the decode time taken per vertex + // by a factor of about 3.5. Without recomputing the bound, decode + // takes approximately 125 ns / vertex. A loop with 63 vertices + // encoded without the bound will take ~30us to decode, which is + // acceptable. At ~3.5 bytes / vertex without the bound, adding + // the bound will increase the size by <15%, which is also acceptable. + const minVerticesForBound = 64 + if len(l.vertices) >= minVerticesForBound { + properties |= boundEncoded + } + + return properties +} + +func (l *Loop) decodeCompressed(d *decoder, snapLevel int) { + nvertices := d.readUvarint() + if d.err != nil { + return + } + if nvertices > maxEncodedVertices { + d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices) + return + } + l.vertices = make([]Point, nvertices) + decodePointsCompressed(d, snapLevel, l.vertices) + properties := d.readUvarint() + + // Make sure values are valid before using. + if d.err != nil { + return + } + + l.originInside = (properties & originInside) != 0 + + l.depth = int(d.readUvarint()) + + if (properties & boundEncoded) != 0 { + l.bound.decode(d) + if d.err != nil { + return + } + l.subregionBound = ExpandForSubregions(l.bound) + } else { + l.initBound() + } + + l.index = NewShapeIndex() + l.index.Add(l) +} + +// crossingTarget is an enum representing the possible crossing target cases for relations. +type crossingTarget int + +const ( + crossingTargetDontCare crossingTarget = iota + crossingTargetDontCross + crossingTargetCross +) + +// loopRelation defines the interface for checking a type of relationship between two loops. +// Some examples of relations are Contains, Intersects, or CompareBoundary. +type loopRelation interface { + // Optionally, aCrossingTarget and bCrossingTarget can specify an early-exit + // condition for the loop relation. If any point P is found such that + // + // A.ContainsPoint(P) == aCrossingTarget() && + // B.ContainsPoint(P) == bCrossingTarget() + // + // then the loop relation is assumed to be the same as if a pair of crossing + // edges were found. For example, the ContainsPoint relation has + // + // aCrossingTarget() == crossingTargetDontCross + // bCrossingTarget() == crossingTargetCross + // + // because if A.ContainsPoint(P) == false and B.ContainsPoint(P) == true + // for any point P, then it is equivalent to finding an edge crossing (i.e., + // since Contains returns false in both cases). + // + // Loop relations that do not have an early-exit condition of this form + // should return crossingTargetDontCare for both crossing targets. + + // aCrossingTarget reports whether loop A crosses the target point with + // the given relation type. + aCrossingTarget() crossingTarget + // bCrossingTarget reports whether loop B crosses the target point with + // the given relation type. + bCrossingTarget() crossingTarget + + // wedgesCross reports if a shared vertex ab1 and the two associated wedges + // (a0, ab1, b2) and (b0, ab1, b2) are equivalent to an edge crossing. + // The loop relation is also allowed to maintain its own internal state, and + // can return true if it observes any sequence of wedges that are equivalent + // to an edge crossing. + wedgesCross(a0, ab1, a2, b0, b2 Point) bool +} + +// loopCrosser is a helper type for determining whether two loops cross. +// It is instantiated twice for each pair of loops to be tested, once for the +// pair (A,B) and once for the pair (B,A), in order to be able to process +// edges in either loop nesting order. +type loopCrosser struct { + a, b *Loop + relation loopRelation + swapped bool + aCrossingTarget crossingTarget + bCrossingTarget crossingTarget + + // state maintained by startEdge and edgeCrossesCell. + crosser *EdgeCrosser + aj, bjPrev int + + // temporary data declared here to avoid repeated memory allocations. + bQuery *CrossingEdgeQuery + bCells []*ShapeIndexCell +} + +// newLoopCrosser creates a loopCrosser from the given values. If swapped is true, +// the loops A and B have been swapped. This affects how arguments are passed to +// the given loop relation, since for example A.Contains(B) is not the same as +// B.Contains(A). +func newLoopCrosser(a, b *Loop, relation loopRelation, swapped bool) *loopCrosser { + l := &loopCrosser{ + a: a, + b: b, + relation: relation, + swapped: swapped, + aCrossingTarget: relation.aCrossingTarget(), + bCrossingTarget: relation.bCrossingTarget(), + bQuery: NewCrossingEdgeQuery(b.index), + } + if swapped { + l.aCrossingTarget, l.bCrossingTarget = l.bCrossingTarget, l.aCrossingTarget + } + + return l +} + +// startEdge sets the crossers state for checking the given edge of loop A. +func (l *loopCrosser) startEdge(aj int) { + l.crosser = NewEdgeCrosser(l.a.Vertex(aj), l.a.Vertex(aj+1)) + l.aj = aj + l.bjPrev = -2 +} + +// edgeCrossesCell reports whether the current edge of loop A has any crossings with +// edges of the index cell of loop B. +func (l *loopCrosser) edgeCrossesCell(bClipped *clippedShape) bool { + // Test the current edge of A against all edges of bClipped + bNumEdges := bClipped.numEdges() + for j := 0; j < bNumEdges; j++ { + bj := bClipped.edges[j] + if bj != l.bjPrev+1 { + l.crosser.RestartAt(l.b.Vertex(bj)) + } + l.bjPrev = bj + if crossing := l.crosser.ChainCrossingSign(l.b.Vertex(bj + 1)); crossing == DoNotCross { + continue + } else if crossing == Cross { + return true + } + + // We only need to check each shared vertex once, so we only + // consider the case where l.aVertex(l.aj+1) == l.b.Vertex(bj+1). + if l.a.Vertex(l.aj+1) == l.b.Vertex(bj+1) { + if l.swapped { + if l.relation.wedgesCross(l.b.Vertex(bj), l.b.Vertex(bj+1), l.b.Vertex(bj+2), l.a.Vertex(l.aj), l.a.Vertex(l.aj+2)) { + return true + } + } else { + if l.relation.wedgesCross(l.a.Vertex(l.aj), l.a.Vertex(l.aj+1), l.a.Vertex(l.aj+2), l.b.Vertex(bj), l.b.Vertex(bj+2)) { + return true + } + } + } + } + + return false +} + +// cellCrossesCell reports whether there are any edge crossings or wedge crossings +// within the two given cells. +func (l *loopCrosser) cellCrossesCell(aClipped, bClipped *clippedShape) bool { + // Test all edges of aClipped against all edges of bClipped. + for _, edge := range aClipped.edges { + l.startEdge(edge) + if l.edgeCrossesCell(bClipped) { + return true + } + } + + return false +} + +// cellCrossesAnySubcell reports whether given an index cell of A, if there are any +// edge or wedge crossings with any index cell of B contained within bID. +func (l *loopCrosser) cellCrossesAnySubcell(aClipped *clippedShape, bID CellID) bool { + // Test all edges of aClipped against all edges of B. The relevant B + // edges are guaranteed to be children of bID, which lets us find the + // correct index cells more efficiently. + bRoot := PaddedCellFromCellID(bID, 0) + for _, aj := range aClipped.edges { + // Use an CrossingEdgeQuery starting at bRoot to find the index cells + // of B that might contain crossing edges. + l.bCells = l.bQuery.getCells(l.a.Vertex(aj), l.a.Vertex(aj+1), bRoot) + if len(l.bCells) == 0 { + continue + } + l.startEdge(aj) + for c := 0; c < len(l.bCells); c++ { + if l.edgeCrossesCell(l.bCells[c].shapes[0]) { + return true + } + } + } + + return false +} + +// hasCrossing reports whether given two iterators positioned such that +// ai.cellID().ContainsCellID(bi.cellID()), there is an edge or wedge crossing +// anywhere within ai.cellID(). This function advances bi only past ai.cellID(). +func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool { + // If ai.CellID() intersects many edges of B, then it is faster to use + // CrossingEdgeQuery to narrow down the candidates. But if it intersects + // only a few edges, it is faster to check all the crossings directly. + // We handle this by advancing bi and keeping track of how many edges we + // would need to test. + const edgeQueryMinEdges = 20 // Tuned from benchmarks. + var totalEdges int + l.bCells = nil + + for { + if n := bi.it.IndexCell().shapes[0].numEdges(); n > 0 { + totalEdges += n + if totalEdges >= edgeQueryMinEdges { + // There are too many edges to test them directly, so use CrossingEdgeQuery. + if l.cellCrossesAnySubcell(ai.it.IndexCell().shapes[0], ai.cellID()) { + return true + } + bi.seekBeyond(ai) + return false + } + l.bCells = append(l.bCells, bi.indexCell()) + } + bi.next() + if bi.cellID() > ai.rangeMax { + break + } + } + + // Test all the edge crossings directly. + for _, c := range l.bCells { + if l.cellCrossesCell(ai.it.IndexCell().shapes[0], c.shapes[0]) { + return true + } + } + + return false +} + +// containsCenterMatches reports if the clippedShapes containsCenter boolean corresponds +// to the crossing target type given. (This is to work around C++ allowing false == 0, +// true == 1 type implicit conversions and comparisons) +func containsCenterMatches(a *clippedShape, target crossingTarget) bool { + return (!a.containsCenter && target == crossingTargetDontCross) || + (a.containsCenter && target == crossingTargetCross) +} + +// hasCrossingRelation reports whether given two iterators positioned such that +// ai.cellID().ContainsCellID(bi.cellID()), there is a crossing relationship +// anywhere within ai.cellID(). Specifically, this method returns true if there +// is an edge crossing, a wedge crossing, or a point P that matches both relations +// crossing targets. This function advances both iterators past ai.cellID. +func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool { + aClipped := ai.it.IndexCell().shapes[0] + if aClipped.numEdges() != 0 { + // The current cell of A has at least one edge, so check for crossings. + if l.hasCrossing(ai, bi) { + return true + } + ai.next() + return false + } + + if containsCenterMatches(aClipped, l.aCrossingTarget) { + // The crossing target for A is not satisfied, so we skip over these cells of B. + bi.seekBeyond(ai) + ai.next() + return false + } + + // All points within ai.cellID() satisfy the crossing target for A, so it's + // worth iterating through the cells of B to see whether any cell + // centers also satisfy the crossing target for B. + for bi.cellID() <= ai.rangeMax { + bClipped := bi.it.IndexCell().shapes[0] + if containsCenterMatches(bClipped, l.bCrossingTarget) { + return true + } + bi.next() + } + ai.next() + return false +} + +// hasCrossingRelation checks all edges of loop A for intersection against all edges +// of loop B and reports if there are any that satisfy the given relation. If there +// is any shared vertex, the wedges centered at this vertex are sent to the given +// relation to be tested. +// +// If the two loop boundaries cross, this method is guaranteed to return +// true. It also returns true in certain cases if the loop relationship is +// equivalent to crossing. For example, if the relation is Contains and a +// point P is found such that B contains P but A does not contain P, this +// method will return true to indicate that the result is the same as though +// a pair of crossing edges were found (since Contains returns false in +// both cases). +// +// See Contains, Intersects and CompareBoundary for the three uses of this function. +func hasCrossingRelation(a, b *Loop, relation loopRelation) bool { + // We look for CellID ranges where the indexes of A and B overlap, and + // then test those edges for crossings. + ai := newRangeIterator(a.index) + bi := newRangeIterator(b.index) + + ab := newLoopCrosser(a, b, relation, false) // Tests edges of A against B + ba := newLoopCrosser(b, a, relation, true) // Tests edges of B against A + + for !ai.done() || !bi.done() { + if ai.rangeMax < bi.rangeMin { + // The A and B cells don't overlap, and A precedes B. + ai.seekTo(bi) + } else if bi.rangeMax < ai.rangeMin { + // The A and B cells don't overlap, and B precedes A. + bi.seekTo(ai) + } else { + // One cell contains the other. Determine which cell is larger. + abRelation := int64(ai.it.CellID().lsb() - bi.it.CellID().lsb()) + if abRelation > 0 { + // A's index cell is larger. + if ab.hasCrossingRelation(ai, bi) { + return true + } + } else if abRelation < 0 { + // B's index cell is larger. + if ba.hasCrossingRelation(bi, ai) { + return true + } + } else { + // The A and B cells are the same. Since the two cells + // have the same center point P, check whether P satisfies + // the crossing targets. + aClipped := ai.it.IndexCell().shapes[0] + bClipped := bi.it.IndexCell().shapes[0] + if containsCenterMatches(aClipped, ab.aCrossingTarget) && + containsCenterMatches(bClipped, ab.bCrossingTarget) { + return true + } + // Otherwise test all the edge crossings directly. + if aClipped.numEdges() > 0 && bClipped.numEdges() > 0 && ab.cellCrossesCell(aClipped, bClipped) { + return true + } + ai.next() + bi.next() + } + } + } + return false +} + +// containsRelation implements loopRelation for a contains operation. If +// A.ContainsPoint(P) == false && B.ContainsPoint(P) == true, it is equivalent +// to having an edge crossing (i.e., Contains returns false). +type containsRelation struct { + foundSharedVertex bool +} + +func (c *containsRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCross } +func (c *containsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross } +func (c *containsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { + c.foundSharedVertex = true + return !WedgeContains(a0, ab1, a2, b0, b2) +} + +// intersectsRelation implements loopRelation for an intersects operation. Given +// two loops, A and B, if A.ContainsPoint(P) == true && B.ContainsPoint(P) == true, +// it is equivalent to having an edge crossing (i.e., Intersects returns true). +type intersectsRelation struct { + foundSharedVertex bool +} + +func (i *intersectsRelation) aCrossingTarget() crossingTarget { return crossingTargetCross } +func (i *intersectsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross } +func (i *intersectsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { + i.foundSharedVertex = true + return WedgeIntersects(a0, ab1, a2, b0, b2) +} + +// compareBoundaryRelation implements loopRelation for comparing boundaries. +// +// The compare boundary relation does not have a useful early-exit condition, +// so we return crossingTargetDontCare for both crossing targets. +// +// Aside: A possible early exit condition could be based on the following. +// If A contains a point of both B and ~B, then A intersects Boundary(B). +// If ~A contains a point of both B and ~B, then ~A intersects Boundary(B). +// So if the intersections of {A, ~A} with {B, ~B} are all non-empty, +// the return value is 0, i.e., Boundary(A) intersects Boundary(B). +// Unfortunately it isn't worth detecting this situation because by the +// time we have seen a point in all four intersection regions, we are also +// guaranteed to have seen at least one pair of crossing edges. +type compareBoundaryRelation struct { + reverse bool // True if the other loop should be reversed. + foundSharedVertex bool // True if any wedge was processed. + containsEdge bool // True if any edge of the other loop is contained by this loop. + excludesEdge bool // True if any edge of the other loop is excluded by this loop. +} + +func newCompareBoundaryRelation(reverse bool) *compareBoundaryRelation { + return &compareBoundaryRelation{reverse: reverse} +} + +func (c *compareBoundaryRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCare } +func (c *compareBoundaryRelation) bCrossingTarget() crossingTarget { return crossingTargetDontCare } +func (c *compareBoundaryRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { + // Because we don't care about the interior of the other, only its boundary, + // it is sufficient to check whether this one contains the semiwedge (ab1, b2). + c.foundSharedVertex = true + if wedgeContainsSemiwedge(a0, ab1, a2, b2, c.reverse) { + c.containsEdge = true + } else { + c.excludesEdge = true + } + return c.containsEdge && c.excludesEdge +} + +// wedgeContainsSemiwedge reports whether the wedge (a0, ab1, a2) contains the +// "semiwedge" defined as any non-empty open set of rays immediately CCW from +// the edge (ab1, b2). If reverse is true, then substitute clockwise for CCW; +// this simulates what would happen if the direction of the other loop was reversed. +func wedgeContainsSemiwedge(a0, ab1, a2, b2 Point, reverse bool) bool { + if b2 == a0 || b2 == a2 { + // We have a shared or reversed edge. + return (b2 == a0) == reverse + } + return OrderedCCW(a0, a2, b2, ab1) +} + +// containsNonCrossingBoundary reports whether given two loops whose boundaries +// do not cross (see compareBoundary), if this loop contains the boundary of the +// other loop. If reverse is true, the boundary of the other loop is reversed +// first (which only affects the result when there are shared edges). This method +// is cheaper than compareBoundary because it does not test for edge intersections. +// +// This function requires that neither loop is empty, and that if the other is full, +// then reverse == false. +func (l *Loop) containsNonCrossingBoundary(other *Loop, reverseOther bool) bool { + // The bounds must intersect for containment. + if !l.bound.Intersects(other.bound) { + return false + } + + // Full loops are handled as though the loop surrounded the entire sphere. + if l.IsFull() { + return true + } + if other.IsFull() { + return false + } + + m, ok := l.findVertex(other.Vertex(0)) + if !ok { + // Since the other loops vertex 0 is not shared, we can check if this contains it. + return l.ContainsPoint(other.Vertex(0)) + } + // Otherwise check whether the edge (b0, b1) is contained by this loop. + return wedgeContainsSemiwedge(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), + other.Vertex(1), reverseOther) +} + +// TODO(roberts): Differences from the C++ version: +// DistanceToPoint +// DistanceToBoundary +// Project +// ProjectToBoundary +// BoundaryApproxEqual +// BoundaryNear diff --git a/vendor/github.com/golang/geo/s2/matrix3x3.go b/vendor/github.com/golang/geo/s2/matrix3x3.go new file mode 100644 index 000000000..01696fe83 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/matrix3x3.go @@ -0,0 +1,127 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + + "github.com/golang/geo/r3" +) + +// matrix3x3 represents a traditional 3x3 matrix of floating point values. +// This is not a full fledged matrix. It only contains the pieces needed +// to satisfy the computations done within the s2 package. +type matrix3x3 [3][3]float64 + +// col returns the given column as a Point. +func (m *matrix3x3) col(col int) Point { + return Point{r3.Vector{m[0][col], m[1][col], m[2][col]}} +} + +// row returns the given row as a Point. +func (m *matrix3x3) row(row int) Point { + return Point{r3.Vector{m[row][0], m[row][1], m[row][2]}} +} + +// setCol sets the specified column to the value in the given Point. +func (m *matrix3x3) setCol(col int, p Point) *matrix3x3 { + m[0][col] = p.X + m[1][col] = p.Y + m[2][col] = p.Z + + return m +} + +// setRow sets the specified row to the value in the given Point. +func (m *matrix3x3) setRow(row int, p Point) *matrix3x3 { + m[row][0] = p.X + m[row][1] = p.Y + m[row][2] = p.Z + + return m +} + +// scale multiplies the matrix by the given value. +func (m *matrix3x3) scale(f float64) *matrix3x3 { + return &matrix3x3{ + [3]float64{f * m[0][0], f * m[0][1], f * m[0][2]}, + [3]float64{f * m[1][0], f * m[1][1], f * m[1][2]}, + [3]float64{f * m[2][0], f * m[2][1], f * m[2][2]}, + } +} + +// mul returns the multiplication of m by the Point p and converts the +// resulting 1x3 matrix into a Point. +func (m *matrix3x3) mul(p Point) Point { + return Point{r3.Vector{ + m[0][0]*p.X + m[0][1]*p.Y + m[0][2]*p.Z, + m[1][0]*p.X + m[1][1]*p.Y + m[1][2]*p.Z, + m[2][0]*p.X + m[2][1]*p.Y + m[2][2]*p.Z, + }} +} + +// det returns the determinant of this matrix. +func (m *matrix3x3) det() float64 { + // | a b c | + // det | d e f | = aei + bfg + cdh - ceg - bdi - afh + // | g h i | + return m[0][0]*m[1][1]*m[2][2] + m[0][1]*m[1][2]*m[2][0] + m[0][2]*m[1][0]*m[2][1] - + m[0][2]*m[1][1]*m[2][0] - m[0][1]*m[1][0]*m[2][2] - m[0][0]*m[1][2]*m[2][1] +} + +// transpose reflects the matrix along its diagonal and returns the result. +func (m *matrix3x3) transpose() *matrix3x3 { + m[0][1], m[1][0] = m[1][0], m[0][1] + m[0][2], m[2][0] = m[2][0], m[0][2] + m[1][2], m[2][1] = m[2][1], m[1][2] + + return m +} + +// String formats the matrix into an easier to read layout. +func (m *matrix3x3) String() string { + return fmt.Sprintf("[ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ]", + m[0][0], m[0][1], m[0][2], + m[1][0], m[1][1], m[1][2], + m[2][0], m[2][1], m[2][2], + ) +} + +// getFrame returns the orthonormal frame for the given point on the unit sphere. +func getFrame(p Point) matrix3x3 { + // Given the point p on the unit sphere, extend this into a right-handed + // coordinate frame of unit-length column vectors m = (x,y,z). Note that + // the vectors (x,y) are an orthonormal frame for the tangent space at point p, + // while p itself is an orthonormal frame for the normal space at p. + m := matrix3x3{} + m.setCol(2, p) + m.setCol(1, Point{p.Ortho()}) + m.setCol(0, Point{m.col(1).Cross(p.Vector)}) + return m +} + +// toFrame returns the coordinates of the given point with respect to its orthonormal basis m. +// The resulting point q satisfies the identity (m * q == p). +func toFrame(m matrix3x3, p Point) Point { + // The inverse of an orthonormal matrix is its transpose. + return m.transpose().mul(p) +} + +// fromFrame returns the coordinates of the given point in standard axis-aligned basis +// from its orthonormal basis m. +// The resulting point p satisfies the identity (p == m * q). +func fromFrame(m matrix3x3, q Point) Point { + return m.mul(q) +} diff --git a/vendor/github.com/golang/geo/s2/max_distance_targets.go b/vendor/github.com/golang/geo/s2/max_distance_targets.go new file mode 100644 index 000000000..589231890 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/max_distance_targets.go @@ -0,0 +1,306 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/s1" +) + +// maxDistance implements distance as the supplementary distance (Pi - x) to find +// results that are the furthest using the distance related algorithms. +type maxDistance s1.ChordAngle + +func (m maxDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) } +func (m maxDistance) zero() distance { return maxDistance(s1.StraightChordAngle) } +func (m maxDistance) negative() distance { return maxDistance(s1.InfChordAngle()) } +func (m maxDistance) infinity() distance { return maxDistance(s1.NegativeChordAngle) } +func (m maxDistance) less(other distance) bool { return m.chordAngle() > other.chordAngle() } +func (m maxDistance) sub(other distance) distance { + return maxDistance(m.chordAngle() + other.chordAngle()) +} +func (m maxDistance) chordAngleBound() s1.ChordAngle { + return s1.StraightChordAngle - m.chordAngle() +} +func (m maxDistance) updateDistance(dist distance) (distance, bool) { + if dist.less(m) { + m = maxDistance(dist.chordAngle()) + return m, true + } + return m, false +} + +func (m maxDistance) fromChordAngle(o s1.ChordAngle) distance { + return maxDistance(o) +} + +// MaxDistanceToPointTarget is used for computing the maximum distance to a Point. +type MaxDistanceToPointTarget struct { + point Point + dist distance +} + +// NewMaxDistanceToPointTarget returns a new target for the given Point. +func NewMaxDistanceToPointTarget(point Point) *MaxDistanceToPointTarget { + m := maxDistance(0) + return &MaxDistanceToPointTarget{point: point, dist: &m} +} + +func (m *MaxDistanceToPointTarget) capBound() Cap { + return CapFromCenterChordAngle(Point{m.point.Mul(-1)}, (s1.ChordAngle(0))) +} + +func (m *MaxDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(ChordAngleBetweenPoints(p, m.point))) +} + +func (m *MaxDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + if d, ok := UpdateMaxDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(maxDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MaxDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(cell.MaxDistance(m.point))) +} + +func (m *MaxDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // For furthest points, we visit the polygons whose interior contains + // the antipode of the target point. These are the polygons whose + // distance to the target is maxDistance.zero() + q := NewContainsPointQuery(index, VertexModelSemiOpen) + return q.visitContainingShapes(Point{m.point.Mul(-1)}, func(shape Shape) bool { + return v(shape, m.point) + }) +} + +func (m *MaxDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MaxDistanceToPointTarget) maxBruteForceIndexSize() int { return 300 } +func (m *MaxDistanceToPointTarget) distance() distance { return m.dist } + +// MaxDistanceToEdgeTarget is used for computing the maximum distance to an Edge. +type MaxDistanceToEdgeTarget struct { + e Edge + dist distance +} + +// NewMaxDistanceToEdgeTarget returns a new target for the given Edge. +func NewMaxDistanceToEdgeTarget(e Edge) *MaxDistanceToEdgeTarget { + m := maxDistance(0) + return &MaxDistanceToEdgeTarget{e: e, dist: m} +} + +// capBound returns a Cap that bounds the antipode of the target. (This +// is the set of points whose maxDistance to the target is maxDistance.zero) +func (m *MaxDistanceToEdgeTarget) capBound() Cap { + // The following computes a radius equal to half the edge length in an + // efficient and numerically stable way. + d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1)) + r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2)) + return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Mul(-1).Normalize()}, s1.ChordAngleFromSquaredLength(r2)) +} + +func (m *MaxDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + if d, ok := UpdateMaxDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(maxDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MaxDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + if d, ok := updateEdgePairMaxDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(maxDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MaxDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(cell.MaxDistanceToEdge(m.e.V0, m.e.V1))) +} + +func (m *MaxDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // We only need to test one edge point. That is because the method *must* + // visit a polygon if it fully contains the target, and *is allowed* to + // visit a polygon if it intersects the target. If the tested vertex is not + // contained, we know the full edge is not contained; if the tested vertex is + // contained, then the edge either is fully contained (must be visited) or it + // intersects (is allowed to be visited). We visit the center of the edge so + // that edge AB gives identical results to BA. + target := NewMaxDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) + return target.visitContainingShapes(index, v) +} + +func (m *MaxDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MaxDistanceToEdgeTarget) maxBruteForceIndexSize() int { return 110 } +func (m *MaxDistanceToEdgeTarget) distance() distance { return m.dist } + +// MaxDistanceToCellTarget is used for computing the maximum distance to a Cell. +type MaxDistanceToCellTarget struct { + cell Cell + dist distance +} + +// NewMaxDistanceToCellTarget returns a new target for the given Cell. +func NewMaxDistanceToCellTarget(cell Cell) *MaxDistanceToCellTarget { + m := maxDistance(0) + return &MaxDistanceToCellTarget{cell: cell, dist: m} +} + +func (m *MaxDistanceToCellTarget) capBound() Cap { + c := m.cell.CapBound() + return CapFromCenterAngle(Point{c.Center().Mul(-1)}, c.Radius()) +} + +func (m *MaxDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(m.cell.MaxDistance(p))) +} + +func (m *MaxDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(m.cell.MaxDistanceToEdge(edge.V0, edge.V1))) +} + +func (m *MaxDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(maxDistance(m.cell.MaxDistanceToCell(cell))) +} + +func (m *MaxDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // We only need to check one point here - cell center is simplest. + // See comment at MaxDistanceToEdgeTarget's visitContainingShapes. + target := NewMaxDistanceToPointTarget(m.cell.Center()) + return target.visitContainingShapes(index, v) +} + +func (m *MaxDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MaxDistanceToCellTarget) maxBruteForceIndexSize() int { return 100 } +func (m *MaxDistanceToCellTarget) distance() distance { return m.dist } + +// MaxDistanceToShapeIndexTarget is used for computing the maximum distance to a ShapeIndex. +type MaxDistanceToShapeIndexTarget struct { + index *ShapeIndex + query *EdgeQuery + dist distance +} + +// NewMaxDistanceToShapeIndexTarget returns a new target for the given ShapeIndex. +func NewMaxDistanceToShapeIndexTarget(index *ShapeIndex) *MaxDistanceToShapeIndexTarget { + m := maxDistance(0) + return &MaxDistanceToShapeIndexTarget{ + index: index, + dist: m, + query: NewFurthestEdgeQuery(index, NewFurthestEdgeQueryOptions()), + } +} + +// capBound returns a Cap that bounds the antipode of the target. This +// is the set of points whose maxDistance to the target is maxDistance.zero() +func (m *MaxDistanceToShapeIndexTarget) capBound() Cap { + // TODO(roberts): Depends on ShapeIndexRegion + // c := makeShapeIndexRegion(m.index).CapBound() + // return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius()) + panic("not implemented yet") +} + +func (m *MaxDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMaxDistanceToPointTarget(p) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +func (m *MaxDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMaxDistanceToEdgeTarget(edge) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +func (m *MaxDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMaxDistanceToCellTarget(cell) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +// visitContainingShapes returns the polygons containing the antipodal +// reflection of *any* connected component for target types consisting of +// multiple connected components. It is sufficient to test containment of +// one vertex per connected component, since this allows us to also return +// any polygon whose boundary has distance.zero() to the target. +func (m *MaxDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // It is sufficient to find the set of chain starts in the target index + // (i.e., one vertex per connected component of edges) that are contained by + // the query index, except for one special case to handle full polygons. + // + // TODO(roberts): Do this by merge-joining the two ShapeIndexes and share + // the code with BooleanOperation. + for _, shape := range m.index.shapes { + numChains := shape.NumChains() + // Shapes that don't have any edges require a special case (below). + testedPoint := false + for c := 0; c < numChains; c++ { + chain := shape.Chain(c) + if chain.Length == 0 { + continue + } + testedPoint = true + target := NewMaxDistanceToPointTarget(shape.ChainEdge(c, 0).V0) + if !target.visitContainingShapes(index, v) { + return false + } + } + if !testedPoint { + // Special case to handle full polygons. + ref := shape.ReferencePoint() + if !ref.Contained { + continue + } + target := NewMaxDistanceToPointTarget(ref.Point) + if !target.visitContainingShapes(index, v) { + return false + } + } + } + return true +} + +func (m *MaxDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool { + m.query.opts.maxError = maxErr + return true +} +func (m *MaxDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 70 } +func (m *MaxDistanceToShapeIndexTarget) distance() distance { return m.dist } +func (m *MaxDistanceToShapeIndexTarget) setIncludeInteriors(b bool) { + m.query.opts.includeInteriors = b +} +func (m *MaxDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b } + +// TODO(roberts): Remaining methods +// +// func (m *MaxDistanceToShapeIndexTarget) capBound() Cap { +// CellUnionTarget diff --git a/vendor/github.com/golang/geo/s2/metric.go b/vendor/github.com/golang/geo/s2/metric.go new file mode 100644 index 000000000..53db3d317 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/metric.go @@ -0,0 +1,164 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// This file implements functions for various S2 measurements. + +import "math" + +// A Metric is a measure for cells. It is used to describe the shape and size +// of cells. They are useful for deciding which cell level to use in order to +// satisfy a given condition (e.g. that cell vertices must be no further than +// "x" apart). You can use the Value(level) method to compute the corresponding +// length or area on the unit sphere for cells at a given level. The minimum +// and maximum bounds are valid for cells at all levels, but they may be +// somewhat conservative for very large cells (e.g. face cells). +type Metric struct { + // Dim is either 1 or 2, for a 1D or 2D metric respectively. + Dim int + // Deriv is the scaling factor for the metric. + Deriv float64 +} + +// Defined metrics. +// Of the projection methods defined in C++, Go only supports the quadratic projection. + +// Each cell is bounded by four planes passing through its four edges and +// the center of the sphere. These metrics relate to the angle between each +// pair of opposite bounding planes, or equivalently, between the planes +// corresponding to two different s-values or two different t-values. +var ( + MinAngleSpanMetric = Metric{1, 4.0 / 3} + AvgAngleSpanMetric = Metric{1, math.Pi / 2} + MaxAngleSpanMetric = Metric{1, 1.704897179199218452} +) + +// The width of geometric figure is defined as the distance between two +// parallel bounding lines in a given direction. For cells, the minimum +// width is always attained between two opposite edges, and the maximum +// width is attained between two opposite vertices. However, for our +// purposes we redefine the width of a cell as the perpendicular distance +// between a pair of opposite edges. A cell therefore has two widths, one +// in each direction. The minimum width according to this definition agrees +// with the classic geometric one, but the maximum width is different. (The +// maximum geometric width corresponds to MaxDiag defined below.) +// +// The average width in both directions for all cells at level k is approximately +// AvgWidthMetric.Value(k). +// +// The width is useful for bounding the minimum or maximum distance from a +// point on one edge of a cell to the closest point on the opposite edge. +// For example, this is useful when growing regions by a fixed distance. +var ( + MinWidthMetric = Metric{1, 2 * math.Sqrt2 / 3} + AvgWidthMetric = Metric{1, 1.434523672886099389} + MaxWidthMetric = Metric{1, MaxAngleSpanMetric.Deriv} +) + +// The edge length metrics can be used to bound the minimum, maximum, +// or average distance from the center of one cell to the center of one of +// its edge neighbors. In particular, it can be used to bound the distance +// between adjacent cell centers along the space-filling Hilbert curve for +// cells at any given level. +var ( + MinEdgeMetric = Metric{1, 2 * math.Sqrt2 / 3} + AvgEdgeMetric = Metric{1, 1.459213746386106062} + MaxEdgeMetric = Metric{1, MaxAngleSpanMetric.Deriv} + + // MaxEdgeAspect is the maximum edge aspect ratio over all cells at any level, + // where the edge aspect ratio of a cell is defined as the ratio of its longest + // edge length to its shortest edge length. + MaxEdgeAspect = 1.442615274452682920 + + MinAreaMetric = Metric{2, 8 * math.Sqrt2 / 9} + AvgAreaMetric = Metric{2, 4 * math.Pi / 6} + MaxAreaMetric = Metric{2, 2.635799256963161491} +) + +// The maximum diagonal is also the maximum diameter of any cell, +// and also the maximum geometric width (see the comment for widths). For +// example, the distance from an arbitrary point to the closest cell center +// at a given level is at most half the maximum diagonal length. +var ( + MinDiagMetric = Metric{1, 8 * math.Sqrt2 / 9} + AvgDiagMetric = Metric{1, 2.060422738998471683} + MaxDiagMetric = Metric{1, 2.438654594434021032} + + // MaxDiagAspect is the maximum diagonal aspect ratio over all cells at any + // level, where the diagonal aspect ratio of a cell is defined as the ratio + // of its longest diagonal length to its shortest diagonal length. + MaxDiagAspect = math.Sqrt(3) +) + +// Value returns the value of the metric at the given level. +func (m Metric) Value(level int) float64 { + return math.Ldexp(m.Deriv, -m.Dim*level) +} + +// MinLevel returns the minimum level such that the metric is at most +// the given value, or maxLevel (30) if there is no such level. +// +// For example, MinLevel(0.1) returns the minimum level such that all cell diagonal +// lengths are 0.1 or smaller. The returned value is always a valid level. +// +// In C++, this is called GetLevelForMaxValue. +func (m Metric) MinLevel(val float64) int { + if val < 0 { + return maxLevel + } + + level := -(math.Ilogb(val/m.Deriv) >> uint(m.Dim-1)) + if level > maxLevel { + level = maxLevel + } + if level < 0 { + level = 0 + } + return level +} + +// MaxLevel returns the maximum level such that the metric is at least +// the given value, or zero if there is no such level. +// +// For example, MaxLevel(0.1) returns the maximum level such that all cells have a +// minimum width of 0.1 or larger. The returned value is always a valid level. +// +// In C++, this is called GetLevelForMinValue. +func (m Metric) MaxLevel(val float64) int { + if val <= 0 { + return maxLevel + } + + level := math.Ilogb(m.Deriv/val) >> uint(m.Dim-1) + if level > maxLevel { + level = maxLevel + } + if level < 0 { + level = 0 + } + return level +} + +// ClosestLevel returns the level at which the metric has approximately the given +// value. The return value is always a valid level. For example, +// AvgEdgeMetric.ClosestLevel(0.1) returns the level at which the average cell edge +// length is approximately 0.1. +func (m Metric) ClosestLevel(val float64) int { + x := math.Sqrt2 + if m.Dim == 2 { + x = 2 + } + return m.MinLevel(x * val) +} diff --git a/vendor/github.com/golang/geo/s2/min_distance_targets.go b/vendor/github.com/golang/geo/s2/min_distance_targets.go new file mode 100644 index 000000000..b1948b203 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/min_distance_targets.go @@ -0,0 +1,362 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/s1" +) + +// minDistance implements distance interface to find closest distance types. +type minDistance s1.ChordAngle + +func (m minDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) } +func (m minDistance) zero() distance { return minDistance(0) } +func (m minDistance) negative() distance { return minDistance(s1.NegativeChordAngle) } +func (m minDistance) infinity() distance { return minDistance(s1.InfChordAngle()) } +func (m minDistance) less(other distance) bool { return m.chordAngle() < other.chordAngle() } +func (m minDistance) sub(other distance) distance { + return minDistance(m.chordAngle() - other.chordAngle()) +} +func (m minDistance) chordAngleBound() s1.ChordAngle { + return m.chordAngle().Expanded(m.chordAngle().MaxAngleError()) +} + +// updateDistance updates its own value if the other value is less() than it is, +// and reports if it updated. +func (m minDistance) updateDistance(dist distance) (distance, bool) { + if dist.less(m) { + m = minDistance(dist.chordAngle()) + return m, true + } + return m, false +} + +func (m minDistance) fromChordAngle(o s1.ChordAngle) distance { + return minDistance(o) +} + +// MinDistanceToPointTarget is a type for computing the minimum distance to a Point. +type MinDistanceToPointTarget struct { + point Point + dist distance +} + +// NewMinDistanceToPointTarget returns a new target for the given Point. +func NewMinDistanceToPointTarget(point Point) *MinDistanceToPointTarget { + m := minDistance(0) + return &MinDistanceToPointTarget{point: point, dist: &m} +} + +func (m *MinDistanceToPointTarget) capBound() Cap { + return CapFromCenterChordAngle(m.point, s1.ChordAngle(0)) +} + +func (m *MinDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + var ok bool + dist, ok = dist.updateDistance(minDistance(ChordAngleBetweenPoints(p, m.point))) + return dist, ok +} + +func (m *MinDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + if d, ok := UpdateMinDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(minDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MinDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + var ok bool + dist, ok = dist.updateDistance(minDistance(cell.Distance(m.point))) + return dist, ok +} + +func (m *MinDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // For furthest points, we visit the polygons whose interior contains + // the antipode of the target point. These are the polygons whose + // distance to the target is maxDistance.zero() + q := NewContainsPointQuery(index, VertexModelSemiOpen) + return q.visitContainingShapes(m.point, func(shape Shape) bool { + return v(shape, m.point) + }) +} + +func (m *MinDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MinDistanceToPointTarget) maxBruteForceIndexSize() int { return 120 } +func (m *MinDistanceToPointTarget) distance() distance { return m.dist } + +// ---------------------------------------------------------- + +// MinDistanceToEdgeTarget is a type for computing the minimum distance to an Edge. +type MinDistanceToEdgeTarget struct { + e Edge + dist distance +} + +// NewMinDistanceToEdgeTarget returns a new target for the given Edge. +func NewMinDistanceToEdgeTarget(e Edge) *MinDistanceToEdgeTarget { + m := minDistance(0) + return &MinDistanceToEdgeTarget{e: e, dist: m} +} + +// capBound returns a Cap that bounds the antipode of the target. (This +// is the set of points whose maxDistance to the target is maxDistance.zero) +func (m *MinDistanceToEdgeTarget) capBound() Cap { + // The following computes a radius equal to half the edge length in an + // efficient and numerically stable way. + d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1)) + r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2)) + return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}, s1.ChordAngleFromSquaredLength(r2)) +} + +func (m *MinDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + if d, ok := UpdateMinDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(minDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MinDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + if d, ok := updateEdgePairMinDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok { + dist, _ = dist.updateDistance(minDistance(d)) + return dist, true + } + return dist, false +} + +func (m *MinDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(minDistance(cell.DistanceToEdge(m.e.V0, m.e.V1))) +} + +func (m *MinDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // We test the center of the edge in order to ensure that edge targets AB + // and BA yield identical results (which is not guaranteed by the API but + // users might expect). Other options would be to test both endpoints, or + // return different results for AB and BA in some cases. + target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) + return target.visitContainingShapes(index, v) +} + +func (m *MinDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MinDistanceToEdgeTarget) maxBruteForceIndexSize() int { return 60 } +func (m *MinDistanceToEdgeTarget) distance() distance { return m.dist } + +// ---------------------------------------------------------- + +// MinDistanceToCellTarget is a type for computing the minimum distance to a Cell. +type MinDistanceToCellTarget struct { + cell Cell + dist distance +} + +// NewMinDistanceToCellTarget returns a new target for the given Cell. +func NewMinDistanceToCellTarget(cell Cell) *MinDistanceToCellTarget { + m := minDistance(0) + return &MinDistanceToCellTarget{cell: cell, dist: m} +} + +func (m *MinDistanceToCellTarget) capBound() Cap { + return m.cell.CapBound() +} + +func (m *MinDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + return dist.updateDistance(minDistance(m.cell.Distance(p))) +} + +func (m *MinDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + return dist.updateDistance(minDistance(m.cell.DistanceToEdge(edge.V0, edge.V1))) +} + +func (m *MinDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + return dist.updateDistance(minDistance(m.cell.DistanceToCell(cell))) +} + +func (m *MinDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // The simplest approach is simply to return the polygons that contain the + // cell center. Alternatively, if the index cell is smaller than the target + // cell then we could return all polygons that are present in the + // shapeIndexCell, but since the index is built conservatively this may + // include some polygons that don't quite intersect the cell. So we would + // either need to recheck for intersection more accurately, or weaken the + // VisitContainingShapes contract so that it only guarantees approximate + // intersection, neither of which seems like a good tradeoff. + target := NewMinDistanceToPointTarget(m.cell.Center()) + return target.visitContainingShapes(index, v) +} +func (m *MinDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } +func (m *MinDistanceToCellTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MinDistanceToCellTarget) distance() distance { return m.dist } + +// ---------------------------------------------------------- + +/* +// MinDistanceToCellUnionTarget is a type for computing the minimum distance to a CellUnion. +type MinDistanceToCellUnionTarget struct { + cu CellUnion + query *ClosestCellQuery + dist distance +} + +// NewMinDistanceToCellUnionTarget returns a new target for the given CellUnion. +func NewMinDistanceToCellUnionTarget(cu CellUnion) *MinDistanceToCellUnionTarget { + m := minDistance(0) + return &MinDistanceToCellUnionTarget{cu: cu, dist: m} +} + +func (m *MinDistanceToCellUnionTarget) capBound() Cap { + return m.cu.CapBound() +} + +func (m *MinDistanceToCellUnionTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + m.query.opts.DistanceLimit = dist.chordAngle() + target := NewMinDistanceToPointTarget(p) + r := m.query.findEdge(target) + if r.ShapeID < 0 { + return dist, false + } + return minDistance(r.Distance), true +} + +func (m *MinDistanceToCellUnionTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // We test the center of the edge in order to ensure that edge targets AB + // and BA yield identical results (which is not guaranteed by the API but + // users might expect). Other options would be to test both endpoints, or + // return different results for AB and BA in some cases. + target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) + return target.visitContainingShapes(index, v) +} +func (m *MinDistanceToCellUnionTarget) setMaxError(maxErr s1.ChordAngle) bool { + m.query.opts.MaxError = maxErr + return true +} +func (m *MinDistanceToCellUnionTarget) maxBruteForceIndexSize() int { return 30 } +func (m *MinDistanceToCellUnionTarget) distance() distance { return m.dist } +*/ + +// ---------------------------------------------------------- + +// MinDistanceToShapeIndexTarget is a type for computing the minimum distance to a ShapeIndex. +type MinDistanceToShapeIndexTarget struct { + index *ShapeIndex + query *EdgeQuery + dist distance +} + +// NewMinDistanceToShapeIndexTarget returns a new target for the given ShapeIndex. +func NewMinDistanceToShapeIndexTarget(index *ShapeIndex) *MinDistanceToShapeIndexTarget { + m := minDistance(0) + return &MinDistanceToShapeIndexTarget{ + index: index, + dist: m, + query: NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions()), + } +} + +func (m *MinDistanceToShapeIndexTarget) capBound() Cap { + // TODO(roberts): Depends on ShapeIndexRegion existing. + // c := makeS2ShapeIndexRegion(m.index).CapBound() + // return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius()) + panic("not implemented yet") +} + +func (m *MinDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMinDistanceToPointTarget(p) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +func (m *MinDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMinDistanceToEdgeTarget(edge) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +func (m *MinDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { + m.query.opts.distanceLimit = dist.chordAngle() + target := NewMinDistanceToCellTarget(cell) + r := m.query.findEdge(target, m.query.opts) + if r.shapeID < 0 { + return dist, false + } + return r.distance, true +} + +// For target types consisting of multiple connected components (such as this one), +// this method should return the polygons containing the antipodal reflection of +// *any* connected component. (It is sufficient to test containment of one vertex per +// connected component, since this allows us to also return any polygon whose +// boundary has distance.zero() to the target.) +func (m *MinDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { + // It is sufficient to find the set of chain starts in the target index + // (i.e., one vertex per connected component of edges) that are contained by + // the query index, except for one special case to handle full polygons. + // + // TODO(roberts): Do this by merge-joining the two ShapeIndexes. + for _, shape := range m.index.shapes { + numChains := shape.NumChains() + // Shapes that don't have any edges require a special case (below). + testedPoint := false + for c := 0; c < numChains; c++ { + chain := shape.Chain(c) + if chain.Length == 0 { + continue + } + testedPoint = true + target := NewMinDistanceToPointTarget(shape.ChainEdge(c, 0).V0) + if !target.visitContainingShapes(index, v) { + return false + } + } + if !testedPoint { + // Special case to handle full polygons. + ref := shape.ReferencePoint() + if !ref.Contained { + continue + } + target := NewMinDistanceToPointTarget(ref.Point) + if !target.visitContainingShapes(index, v) { + return false + } + } + } + return true +} + +func (m *MinDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool { + m.query.opts.maxError = maxErr + return true +} +func (m *MinDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 25 } +func (m *MinDistanceToShapeIndexTarget) distance() distance { return m.dist } +func (m *MinDistanceToShapeIndexTarget) setIncludeInteriors(b bool) { + m.query.opts.includeInteriors = b +} +func (m *MinDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b } + +// TODO(roberts): Remaining methods +// +// func (m *MinDistanceToShapeIndexTarget) capBound() Cap { +// CellUnionTarget diff --git a/vendor/github.com/golang/geo/s2/nthderivative.go b/vendor/github.com/golang/geo/s2/nthderivative.go new file mode 100644 index 000000000..73445d6c9 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/nthderivative.go @@ -0,0 +1,88 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// nthDerivativeCoder provides Nth Derivative Coding. +// (In signal processing disciplines, this is known as N-th Delta Coding.) +// +// Good for varint coding integer sequences with polynomial trends. +// +// Instead of coding a sequence of values directly, code its nth-order discrete +// derivative. Overflow in integer addition and subtraction makes this a +// lossless transform. +// +// constant linear quadratic +// trend trend trend +// / \ / \ / \_ +// input |0 0 0 0 1 2 3 4 9 16 25 36 +// 0th derivative(identity) |0 0 0 0 1 2 3 4 9 16 25 36 +// 1st derivative(delta coding) | 0 0 0 1 1 1 1 5 7 9 11 +// 2nd derivative(linear prediction) | 0 0 1 0 0 0 4 2 2 2 +// ------------------------------------- +// 0 1 2 3 4 5 6 7 8 9 10 11 +// n in sequence +// +// Higher-order codings can break even or be detrimental on other sequences. +// +// random oscillating +// / \ / \_ +// input |5 9 6 1 8 8 2 -2 4 -4 6 -6 +// 0th derivative(identity) |5 9 6 1 8 8 2 -2 4 -4 6 -6 +// 1st derivative(delta coding) | 4 -3 -5 7 0 -6 -4 6 -8 10 -12 +// 2nd derivative(linear prediction) | -7 -2 12 -7 -6 2 10 -14 18 -22 +// --------------------------------------- +// 0 1 2 3 4 5 6 7 8 9 10 11 +// n in sequence +// +// Note that the nth derivative isn't available until sequence item n. Earlier +// values are coded at lower order. For the above table, read 5 4 -7 -2 12 ... +type nthDerivativeCoder struct { + n, m int + memory [10]int32 +} + +// newNthDerivativeCoder returns a new coder, where n is the derivative order of the encoder (the N in NthDerivative). +// n must be within [0,10]. +func newNthDerivativeCoder(n int) *nthDerivativeCoder { + c := &nthDerivativeCoder{n: n} + if n < 0 || n > len(c.memory) { + panic("unsupported n. Must be within [0,10].") + } + return c +} + +func (c *nthDerivativeCoder) encode(k int32) int32 { + for i := 0; i < c.m; i++ { + delta := k - c.memory[i] + c.memory[i] = k + k = delta + } + if c.m < c.n { + c.memory[c.m] = k + c.m++ + } + return k +} + +func (c *nthDerivativeCoder) decode(k int32) int32 { + if c.m < c.n { + c.m++ + } + for i := c.m - 1; i >= 0; i-- { + c.memory[i] += k + k = c.memory[i] + } + return k +} diff --git a/vendor/github.com/golang/geo/s2/paddedcell.go b/vendor/github.com/golang/geo/s2/paddedcell.go new file mode 100644 index 000000000..ac304a6cc --- /dev/null +++ b/vendor/github.com/golang/geo/s2/paddedcell.go @@ -0,0 +1,252 @@ +// Copyright 2016 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" +) + +// PaddedCell represents a Cell whose (u,v)-range has been expanded on +// all sides by a given amount of "padding". Unlike Cell, its methods and +// representation are optimized for clipping edges against Cell boundaries +// to determine which cells are intersected by a given set of edges. +type PaddedCell struct { + id CellID + padding float64 + bound r2.Rect + middle r2.Rect // A rect in (u, v)-space that belongs to all four children. + iLo, jLo int // Minimum (i,j)-coordinates of this cell before padding + orientation int // Hilbert curve orientation of this cell. + level int +} + +// PaddedCellFromCellID constructs a padded cell with the given padding. +func PaddedCellFromCellID(id CellID, padding float64) *PaddedCell { + p := &PaddedCell{ + id: id, + padding: padding, + middle: r2.EmptyRect(), + } + + // Fast path for constructing a top-level face (the most common case). + if id.isFace() { + limit := padding + 1 + p.bound = r2.Rect{r1.Interval{-limit, limit}, r1.Interval{-limit, limit}} + p.middle = r2.Rect{r1.Interval{-padding, padding}, r1.Interval{-padding, padding}} + p.orientation = id.Face() & 1 + return p + } + + _, p.iLo, p.jLo, p.orientation = id.faceIJOrientation() + p.level = id.Level() + p.bound = ijLevelToBoundUV(p.iLo, p.jLo, p.level).ExpandedByMargin(padding) + ijSize := sizeIJ(p.level) + p.iLo &= -ijSize + p.jLo &= -ijSize + + return p +} + +// PaddedCellFromParentIJ constructs the child of parent with the given (i,j) index. +// The four child cells have indices of (0,0), (0,1), (1,0), (1,1), where the i and j +// indices correspond to increasing u- and v-values respectively. +func PaddedCellFromParentIJ(parent *PaddedCell, i, j int) *PaddedCell { + // Compute the position and orientation of the child incrementally from the + // orientation of the parent. + pos := ijToPos[parent.orientation][2*i+j] + + p := &PaddedCell{ + id: parent.id.Children()[pos], + padding: parent.padding, + bound: parent.bound, + orientation: parent.orientation ^ posToOrientation[pos], + level: parent.level + 1, + middle: r2.EmptyRect(), + } + + ijSize := sizeIJ(p.level) + p.iLo = parent.iLo + i*ijSize + p.jLo = parent.jLo + j*ijSize + + // For each child, one corner of the bound is taken directly from the parent + // while the diagonally opposite corner is taken from middle(). + middle := parent.Middle() + if i == 1 { + p.bound.X.Lo = middle.X.Lo + } else { + p.bound.X.Hi = middle.X.Hi + } + if j == 1 { + p.bound.Y.Lo = middle.Y.Lo + } else { + p.bound.Y.Hi = middle.Y.Hi + } + + return p +} + +// CellID returns the CellID this padded cell represents. +func (p PaddedCell) CellID() CellID { + return p.id +} + +// Padding returns the amount of padding on this cell. +func (p PaddedCell) Padding() float64 { + return p.padding +} + +// Level returns the level this cell is at. +func (p PaddedCell) Level() int { + return p.level +} + +// Center returns the center of this cell. +func (p PaddedCell) Center() Point { + ijSize := sizeIJ(p.level) + si := uint32(2*p.iLo + ijSize) + ti := uint32(2*p.jLo + ijSize) + return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()} +} + +// Middle returns the rectangle in the middle of this cell that belongs to +// all four of its children in (u,v)-space. +func (p *PaddedCell) Middle() r2.Rect { + // We compute this field lazily because it is not needed the majority of the + // time (i.e., for cells where the recursion terminates). + if p.middle.IsEmpty() { + ijSize := sizeIJ(p.level) + u := stToUV(siTiToST(uint32(2*p.iLo + ijSize))) + v := stToUV(siTiToST(uint32(2*p.jLo + ijSize))) + p.middle = r2.Rect{ + r1.Interval{u - p.padding, u + p.padding}, + r1.Interval{v - p.padding, v + p.padding}, + } + } + return p.middle +} + +// Bound returns the bounds for this cell in (u,v)-space including padding. +func (p PaddedCell) Bound() r2.Rect { + return p.bound +} + +// ChildIJ returns the (i,j) coordinates for the child cell at the given traversal +// position. The traversal position corresponds to the order in which child +// cells are visited by the Hilbert curve. +func (p PaddedCell) ChildIJ(pos int) (i, j int) { + ij := posToIJ[p.orientation][pos] + return ij >> 1, ij & 1 +} + +// EntryVertex return the vertex where the space-filling curve enters this cell. +func (p PaddedCell) EntryVertex() Point { + // The curve enters at the (0,0) vertex unless the axis directions are + // reversed, in which case it enters at the (1,1) vertex. + i := p.iLo + j := p.jLo + if p.orientation&invertMask != 0 { + ijSize := sizeIJ(p.level) + i += ijSize + j += ijSize + } + return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()} +} + +// ExitVertex returns the vertex where the space-filling curve exits this cell. +func (p PaddedCell) ExitVertex() Point { + // The curve exits at the (1,0) vertex unless the axes are swapped or + // inverted but not both, in which case it exits at the (0,1) vertex. + i := p.iLo + j := p.jLo + ijSize := sizeIJ(p.level) + if p.orientation == 0 || p.orientation == swapMask+invertMask { + i += ijSize + } else { + j += ijSize + } + return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()} +} + +// ShrinkToFit returns the smallest CellID that contains all descendants of this +// padded cell whose bounds intersect the given rect. For algorithms that use +// recursive subdivision to find the cells that intersect a particular object, this +// method can be used to skip all of the initial subdivision steps where only +// one child needs to be expanded. +// +// Note that this method is not the same as returning the smallest cell that contains +// the intersection of this cell with rect. Because of the padding, even if one child +// completely contains rect it is still possible that a neighboring child may also +// intersect the given rect. +// +// The provided Rect must intersect the bounds of this cell. +func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID { + // Quick rejection test: if rect contains the center of this cell along + // either axis, then no further shrinking is possible. + if p.level == 0 { + // Fast path (most calls to this function start with a face cell). + if rect.X.Contains(0) || rect.Y.Contains(0) { + return p.id + } + } + + ijSize := sizeIJ(p.level) + if rect.X.Contains(stToUV(siTiToST(uint32(2*p.iLo+ijSize)))) || + rect.Y.Contains(stToUV(siTiToST(uint32(2*p.jLo+ijSize)))) { + return p.id + } + + // Otherwise we expand rect by the given padding on all sides and find + // the range of coordinates that it spans along the i- and j-axes. We then + // compute the highest bit position at which the min and max coordinates + // differ. This corresponds to the first cell level at which at least two + // children intersect rect. + + // Increase the padding to compensate for the error in uvToST. + // (The constant below is a provable upper bound on the additional error.) + padded := rect.ExpandedByMargin(p.padding + 1.5*dblEpsilon) + iMin, jMin := p.iLo, p.jLo // Min i- or j- coordinate spanned by padded + var iXor, jXor int // XOR of the min and max i- or j-coordinates + + if iMin < stToIJ(uvToST(padded.X.Lo)) { + iMin = stToIJ(uvToST(padded.X.Lo)) + } + if a, b := p.iLo+ijSize-1, stToIJ(uvToST(padded.X.Hi)); a <= b { + iXor = iMin ^ a + } else { + iXor = iMin ^ b + } + + if jMin < stToIJ(uvToST(padded.Y.Lo)) { + jMin = stToIJ(uvToST(padded.Y.Lo)) + } + if a, b := p.jLo+ijSize-1, stToIJ(uvToST(padded.Y.Hi)); a <= b { + jXor = jMin ^ a + } else { + jXor = jMin ^ b + } + + // Compute the highest bit position where the two i- or j-endpoints differ, + // and then choose the cell level that includes both of these endpoints. So + // if both pairs of endpoints are equal we choose maxLevel; if they differ + // only at bit 0, we choose (maxLevel - 1), and so on. + levelMSB := uint64(((iXor | jXor) << 1) + 1) + level := maxLevel - findMSBSetNonZero64(levelMSB) + if level <= p.level { + return p.id + } + + return cellIDFromFaceIJ(p.id.Face(), iMin, jMin).Parent(level) +} diff --git a/vendor/github.com/golang/geo/s2/point.go b/vendor/github.com/golang/geo/s2/point.go new file mode 100644 index 000000000..89e7ae0ed --- /dev/null +++ b/vendor/github.com/golang/geo/s2/point.go @@ -0,0 +1,258 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "math" + "sort" + + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// Point represents a point on the unit sphere as a normalized 3D vector. +// Fields should be treated as read-only. Use one of the factory methods for creation. +type Point struct { + r3.Vector +} + +// sortPoints sorts the slice of Points in place. +func sortPoints(e []Point) { + sort.Sort(points(e)) +} + +// points implements the Sort interface for slices of Point. +type points []Point + +func (p points) Len() int { return len(p) } +func (p points) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p points) Less(i, j int) bool { return p[i].Cmp(p[j].Vector) == -1 } + +// PointFromCoords creates a new normalized point from coordinates. +// +// This always returns a valid point. If the given coordinates can not be normalized +// the origin point will be returned. +// +// This behavior is different from the C++ construction of a S2Point from coordinates +// (i.e. S2Point(x, y, z)) in that in C++ they do not Normalize. +func PointFromCoords(x, y, z float64) Point { + if x == 0 && y == 0 && z == 0 { + return OriginPoint() + } + return Point{r3.Vector{x, y, z}.Normalize()} +} + +// OriginPoint returns a unique "origin" on the sphere for operations that need a fixed +// reference point. In particular, this is the "point at infinity" used for +// point-in-polygon testing (by counting the number of edge crossings). +// +// It should *not* be a point that is commonly used in edge tests in order +// to avoid triggering code to handle degenerate cases (this rules out the +// north and south poles). It should also not be on the boundary of any +// low-level S2Cell for the same reason. +func OriginPoint() Point { + return Point{r3.Vector{-0.0099994664350250197, 0.0025924542609324121, 0.99994664350250195}} +} + +// PointCross returns a Point that is orthogonal to both p and op. This is similar to +// p.Cross(op) (the true cross product) except that it does a better job of +// ensuring orthogonality when the Point is nearly parallel to op, it returns +// a non-zero result even when p == op or p == -op and the result is a Point. +// +// It satisfies the following properties (f == PointCross): +// +// (1) f(p, op) != 0 for all p, op +// (2) f(op,p) == -f(p,op) unless p == op or p == -op +// (3) f(-p,op) == -f(p,op) unless p == op or p == -op +// (4) f(p,-op) == -f(p,op) unless p == op or p == -op +func (p Point) PointCross(op Point) Point { + // NOTE(dnadasi): In the C++ API the equivalent method here was known as "RobustCrossProd", + // but PointCross more accurately describes how this method is used. + x := p.Add(op.Vector).Cross(op.Sub(p.Vector)) + + // Compare exactly to the 0 vector. + if x == (r3.Vector{}) { + // The only result that makes sense mathematically is to return zero, but + // we find it more convenient to return an arbitrary orthogonal vector. + return Point{p.Ortho()} + } + + return Point{x} +} + +// OrderedCCW returns true if the edges OA, OB, and OC are encountered in that +// order while sweeping CCW around the point O. +// +// You can think of this as testing whether A <= B <= C with respect to the +// CCW ordering around O that starts at A, or equivalently, whether B is +// contained in the range of angles (inclusive) that starts at A and extends +// CCW to C. Properties: +// +// (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b +// (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c +// (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c +// (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true +// (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false +func OrderedCCW(a, b, c, o Point) bool { + sum := 0 + if RobustSign(b, o, a) != Clockwise { + sum++ + } + if RobustSign(c, o, b) != Clockwise { + sum++ + } + if RobustSign(a, o, c) == CounterClockwise { + sum++ + } + return sum >= 2 +} + +// Distance returns the angle between two points. +func (p Point) Distance(b Point) s1.Angle { + return p.Vector.Angle(b.Vector) +} + +// ApproxEqual reports whether the two points are similar enough to be equal. +func (p Point) ApproxEqual(other Point) bool { + return p.approxEqual(other, s1.Angle(epsilon)) +} + +// approxEqual reports whether the two points are within the given epsilon. +func (p Point) approxEqual(other Point, eps s1.Angle) bool { + return p.Vector.Angle(other.Vector) <= eps +} + +// ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance +// between the two given points. The points must be unit length. +func ChordAngleBetweenPoints(x, y Point) s1.ChordAngle { + return s1.ChordAngle(math.Min(4.0, x.Sub(y.Vector).Norm2())) +} + +// regularPoints generates a slice of points shaped as a regular polygon with +// the numVertices vertices, all located on a circle of the specified angular radius +// around the center. The radius is the actual distance from center to each vertex. +func regularPoints(center Point, radius s1.Angle, numVertices int) []Point { + return regularPointsForFrame(getFrame(center), radius, numVertices) +} + +// regularPointsForFrame generates a slice of points shaped as a regular polygon +// with numVertices vertices, all on a circle of the specified angular radius around +// the center. The radius is the actual distance from the center to each vertex. +func regularPointsForFrame(frame matrix3x3, radius s1.Angle, numVertices int) []Point { + // We construct the loop in the given frame coordinates, with the center at + // (0, 0, 1). For a loop of radius r, the loop vertices have the form + // (x, y, z) where x^2 + y^2 = sin(r) and z = cos(r). The distance on the + // sphere (arc length) from each vertex to the center is acos(cos(r)) = r. + z := math.Cos(radius.Radians()) + r := math.Sin(radius.Radians()) + radianStep := 2 * math.Pi / float64(numVertices) + var vertices []Point + + for i := 0; i < numVertices; i++ { + angle := float64(i) * radianStep + p := Point{r3.Vector{r * math.Cos(angle), r * math.Sin(angle), z}} + vertices = append(vertices, Point{fromFrame(frame, p).Normalize()}) + } + + return vertices +} + +// CapBound returns a bounding cap for this point. +func (p Point) CapBound() Cap { + return CapFromPoint(p) +} + +// RectBound returns a bounding latitude-longitude rectangle from this point. +func (p Point) RectBound() Rect { + return RectFromLatLng(LatLngFromPoint(p)) +} + +// ContainsCell returns false as Points do not contain any other S2 types. +func (p Point) ContainsCell(c Cell) bool { return false } + +// IntersectsCell reports whether this Point intersects the given cell. +func (p Point) IntersectsCell(c Cell) bool { + return c.ContainsPoint(p) +} + +// ContainsPoint reports if this Point contains the other Point. +// (This method is named to satisfy the Region interface.) +func (p Point) ContainsPoint(other Point) bool { + return p.Contains(other) +} + +// CellUnionBound computes a covering of the Point. +func (p Point) CellUnionBound() []CellID { + return p.CapBound().CellUnionBound() +} + +// Contains reports if this Point contains the other Point. +// (This method matches all other s2 types where the reflexive Contains +// method does not contain the type's name.) +func (p Point) Contains(other Point) bool { return p == other } + +// Encode encodes the Point. +func (p Point) Encode(w io.Writer) error { + e := &encoder{w: w} + p.encode(e) + return e.err +} + +func (p Point) encode(e *encoder) { + e.writeInt8(encodingVersion) + e.writeFloat64(p.X) + e.writeFloat64(p.Y) + e.writeFloat64(p.Z) +} + +// Decode decodes the Point. +func (p *Point) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + p.decode(d) + return d.err +} + +func (p *Point) decode(d *decoder) { + version := d.readInt8() + if d.err != nil { + return + } + if version != encodingVersion { + d.err = fmt.Errorf("only version %d is supported", encodingVersion) + return + } + p.X = d.readFloat64() + p.Y = d.readFloat64() + p.Z = d.readFloat64() +} + +// Rotate the given point about the given axis by the given angle. p and +// axis must be unit length; angle has no restrictions (e.g., it can be +// positive, negative, greater than 360 degrees, etc). +func Rotate(p, axis Point, angle s1.Angle) Point { + // Let M be the plane through P that is perpendicular to axis, and let + // center be the point where M intersects axis. We construct a + // right-handed orthogonal frame (dx, dy, center) such that dx is the + // vector from center to P, and dy has the same length as dx. The + // result can then be expressed as (cos(angle)*dx + sin(angle)*dy + center). + center := axis.Mul(p.Dot(axis.Vector)) + dx := p.Sub(center) + dy := axis.Cross(p.Vector) + // Mathematically the result is unit length, but normalization is necessary + // to ensure that numerical errors don't accumulate. + return Point{dx.Mul(math.Cos(angle.Radians())).Add(dy.Mul(math.Sin(angle.Radians()))).Add(center).Normalize()} +} diff --git a/vendor/github.com/golang/geo/s2/point_measures.go b/vendor/github.com/golang/geo/s2/point_measures.go new file mode 100644 index 000000000..6fa9b7ae4 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/point_measures.go @@ -0,0 +1,149 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/s1" +) + +// PointArea returns the area of triangle ABC. This method combines two different +// algorithms to get accurate results for both large and small triangles. +// The maximum error is about 5e-15 (about 0.25 square meters on the Earth's +// surface), the same as GirardArea below, but unlike that method it is +// also accurate for small triangles. Example: when the true area is 100 +// square meters, PointArea yields an error about 1 trillion times smaller than +// GirardArea. +// +// All points should be unit length, and no two points should be antipodal. +// The area is always positive. +func PointArea(a, b, c Point) float64 { + // This method is based on l'Huilier's theorem, + // + // tan(E/4) = sqrt(tan(s/2) tan((s-a)/2) tan((s-b)/2) tan((s-c)/2)) + // + // where E is the spherical excess of the triangle (i.e. its area), + // a, b, c are the side lengths, and + // s is the semiperimeter (a + b + c) / 2. + // + // The only significant source of error using l'Huilier's method is the + // cancellation error of the terms (s-a), (s-b), (s-c). This leads to a + // *relative* error of about 1e-16 * s / min(s-a, s-b, s-c). This compares + // to a relative error of about 1e-15 / E using Girard's formula, where E is + // the true area of the triangle. Girard's formula can be even worse than + // this for very small triangles, e.g. a triangle with a true area of 1e-30 + // might evaluate to 1e-5. + // + // So, we prefer l'Huilier's formula unless dmin < s * (0.1 * E), where + // dmin = min(s-a, s-b, s-c). This basically includes all triangles + // except for extremely long and skinny ones. + // + // Since we don't know E, we would like a conservative upper bound on + // the triangle area in terms of s and dmin. It's possible to show that + // E <= k1 * s * sqrt(s * dmin), where k1 = 2*sqrt(3)/Pi (about 1). + // Using this, it's easy to show that we should always use l'Huilier's + // method if dmin >= k2 * s^5, where k2 is about 1e-2. Furthermore, + // if dmin < k2 * s^5, the triangle area is at most k3 * s^4, where + // k3 is about 0.1. Since the best case error using Girard's formula + // is about 1e-15, this means that we shouldn't even consider it unless + // s >= 3e-4 or so. + sa := float64(b.Angle(c.Vector)) + sb := float64(c.Angle(a.Vector)) + sc := float64(a.Angle(b.Vector)) + s := 0.5 * (sa + sb + sc) + if s >= 3e-4 { + // Consider whether Girard's formula might be more accurate. + dmin := s - math.Max(sa, math.Max(sb, sc)) + if dmin < 1e-2*s*s*s*s*s { + // This triangle is skinny enough to use Girard's formula. + area := GirardArea(a, b, c) + if dmin < s*0.1*area { + return area + } + } + } + + // Use l'Huilier's formula. + return 4 * math.Atan(math.Sqrt(math.Max(0.0, math.Tan(0.5*s)*math.Tan(0.5*(s-sa))* + math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc))))) +} + +// GirardArea returns the area of the triangle computed using Girard's formula. +// All points should be unit length, and no two points should be antipodal. +// +// This method is about twice as fast as PointArea() but has poor relative +// accuracy for small triangles. The maximum error is about 5e-15 (about +// 0.25 square meters on the Earth's surface) and the average error is about +// 1e-15. These bounds apply to triangles of any size, even as the maximum +// edge length of the triangle approaches 180 degrees. But note that for +// such triangles, tiny perturbations of the input points can change the +// true mathematical area dramatically. +func GirardArea(a, b, c Point) float64 { + // This is equivalent to the usual Girard's formula but is slightly more + // accurate, faster to compute, and handles a == b == c without a special + // case. PointCross is necessary to get good accuracy when two of + // the input points are very close together. + ab := a.PointCross(b) + bc := b.PointCross(c) + ac := a.PointCross(c) + + area := float64(ab.Angle(ac.Vector) - ab.Angle(bc.Vector) + bc.Angle(ac.Vector)) + if area < 0 { + area = 0 + } + return area +} + +// SignedArea returns a positive value for counterclockwise triangles and a negative +// value otherwise (similar to PointArea). +func SignedArea(a, b, c Point) float64 { + return float64(RobustSign(a, b, c)) * PointArea(a, b, c) +} + +// Angle returns the interior angle at the vertex B in the triangle ABC. The +// return value is always in the range [0, pi]. All points should be +// normalized. Ensures that Angle(a,b,c) == Angle(c,b,a) for all a,b,c. +// +// The angle is undefined if A or C is diametrically opposite from B, and +// becomes numerically unstable as the length of edge AB or BC approaches +// 180 degrees. +func Angle(a, b, c Point) s1.Angle { + // PointCross is necessary to get good accuracy when two of the input + // points are very close together. + return a.PointCross(b).Angle(c.PointCross(b).Vector) +} + +// TurnAngle returns the exterior angle at vertex B in the triangle ABC. The +// return value is positive if ABC is counterclockwise and negative otherwise. +// If you imagine an ant walking from A to B to C, this is the angle that the +// ant turns at vertex B (positive = left = CCW, negative = right = CW). +// This quantity is also known as the "geodesic curvature" at B. +// +// Ensures that TurnAngle(a,b,c) == -TurnAngle(c,b,a) for all distinct +// a,b,c. The result is undefined if (a == b || b == c), but is either +// -Pi or Pi if (a == c). All points should be normalized. +func TurnAngle(a, b, c Point) s1.Angle { + // We use PointCross to get good accuracy when two points are very + // close together, and RobustSign to ensure that the sign is correct for + // turns that are close to 180 degrees. + angle := a.PointCross(b).Angle(b.PointCross(c).Vector) + + // Don't return RobustSign * angle because it is legal to have (a == c). + if RobustSign(a, b, c) == CounterClockwise { + return angle + } + return -angle +} diff --git a/vendor/github.com/golang/geo/s2/point_vector.go b/vendor/github.com/golang/geo/s2/point_vector.go new file mode 100644 index 000000000..f8e6f65b5 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/point_vector.go @@ -0,0 +1,42 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// Shape interface enforcement +var ( + _ Shape = (*PointVector)(nil) +) + +// PointVector is a Shape representing a set of Points. Each point +// is represented as a degenerate edge with the same starting and ending +// vertices. +// +// This type is useful for adding a collection of points to an ShapeIndex. +// +// Its methods are on *PointVector due to implementation details of ShapeIndex. +type PointVector []Point + +func (p *PointVector) NumEdges() int { return len(*p) } +func (p *PointVector) Edge(i int) Edge { return Edge{(*p)[i], (*p)[i]} } +func (p *PointVector) ReferencePoint() ReferencePoint { return OriginReferencePoint(false) } +func (p *PointVector) NumChains() int { return len(*p) } +func (p *PointVector) Chain(i int) Chain { return Chain{i, 1} } +func (p *PointVector) ChainEdge(i, j int) Edge { return Edge{(*p)[i], (*p)[j]} } +func (p *PointVector) ChainPosition(e int) ChainPosition { return ChainPosition{e, 0} } +func (p *PointVector) Dimension() int { return 0 } +func (p *PointVector) IsEmpty() bool { return defaultShapeIsEmpty(p) } +func (p *PointVector) IsFull() bool { return defaultShapeIsFull(p) } +func (p *PointVector) typeTag() typeTag { return typeTagPointVector } +func (p *PointVector) privateInterface() {} diff --git a/vendor/github.com/golang/geo/s2/pointcompression.go b/vendor/github.com/golang/geo/s2/pointcompression.go new file mode 100644 index 000000000..018381799 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/pointcompression.go @@ -0,0 +1,319 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "errors" + "fmt" + + "github.com/golang/geo/r3" +) + +// maxEncodedVertices is the maximum number of vertices, in a row, to be encoded or decoded. +// On decode, this defends against malicious encodings that try and have us exceed RAM. +const maxEncodedVertices = 50000000 + +// xyzFaceSiTi represents the The XYZ and face,si,ti coordinates of a Point +// and, if this point is equal to the center of a Cell, the level of this cell +// (-1 otherwise). This is used for Loops and Polygons to store data in a more +// compressed format. +type xyzFaceSiTi struct { + xyz Point + face int + si, ti uint32 + level int +} + +const derivativeEncodingOrder = 2 + +func appendFace(faces []faceRun, face int) []faceRun { + if len(faces) == 0 || faces[len(faces)-1].face != face { + return append(faces, faceRun{face, 1}) + } + faces[len(faces)-1].count++ + return faces +} + +// encodePointsCompressed uses an optimized compressed format to encode the given values. +func encodePointsCompressed(e *encoder, vertices []xyzFaceSiTi, level int) { + var faces []faceRun + for _, v := range vertices { + faces = appendFace(faces, v.face) + } + encodeFaces(e, faces) + + type piQi struct { + pi, qi uint32 + } + verticesPiQi := make([]piQi, len(vertices)) + for i, v := range vertices { + verticesPiQi[i] = piQi{siTitoPiQi(v.si, level), siTitoPiQi(v.ti, level)} + } + piCoder, qiCoder := newNthDerivativeCoder(derivativeEncodingOrder), newNthDerivativeCoder(derivativeEncodingOrder) + for i, v := range verticesPiQi { + f := encodePointCompressed + if i == 0 { + // The first point will be just the (pi, qi) coordinates + // of the Point. NthDerivativeCoder will not save anything + // in that case, so we encode in fixed format rather than varint + // to avoid the varint overhead. + f = encodeFirstPointFixedLength + } + f(e, v.pi, v.qi, level, piCoder, qiCoder) + } + + var offCenter []int + for i, v := range vertices { + if v.level != level { + offCenter = append(offCenter, i) + } + } + e.writeUvarint(uint64(len(offCenter))) + for _, idx := range offCenter { + e.writeUvarint(uint64(idx)) + e.writeFloat64(vertices[idx].xyz.X) + e.writeFloat64(vertices[idx].xyz.Y) + e.writeFloat64(vertices[idx].xyz.Z) + } +} + +func encodeFirstPointFixedLength(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) { + // Do not ZigZagEncode the first point, since it cannot be negative. + codedPi, codedQi := piCoder.encode(int32(pi)), qiCoder.encode(int32(qi)) + // Interleave to reduce overhead from two partial bytes to one. + interleaved := interleaveUint32(uint32(codedPi), uint32(codedQi)) + + // Write as little endian. + bytesRequired := (level + 7) / 8 * 2 + for i := 0; i < bytesRequired; i++ { + e.writeUint8(uint8(interleaved)) + interleaved >>= 8 + } +} + +// encodePointCompressed encodes points into e. +// Given a sequence of Points assumed to be the center of level-k cells, +// compresses it into a stream using the following method: +// - decompose the points into (face, si, ti) tuples. +// - run-length encode the faces, combining face number and count into a +// varint32. See the faceRun struct. +// - right shift the (si, ti) to remove the part that's constant for all cells +// of level-k. The result is called the (pi, qi) space. +// - 2nd derivative encode the pi and qi sequences (linear prediction) +// - zig-zag encode all derivative values but the first, which cannot be +// negative +// - interleave the zig-zag encoded values +// - encode the first interleaved value in a fixed length encoding +// (varint would make this value larger) +// - encode the remaining interleaved values as varint64s, as the +// derivative encoding should make the values small. +// In addition, provides a lossless method to compress a sequence of points even +// if some points are not the center of level-k cells. These points are stored +// exactly, using 3 double precision values, after the above encoded string, +// together with their index in the sequence (this leads to some redundancy - it +// is expected that only a small fraction of the points are not cell centers). +// +// To encode leaf cells, this requires 8 bytes for the first vertex plus +// an average of 3.8 bytes for each additional vertex, when computed on +// Google's geographic repository. +func encodePointCompressed(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) { + // ZigZagEncode, as varint requires the maximum number of bytes for + // negative numbers. + zzPi := zigzagEncode(piCoder.encode(int32(pi))) + zzQi := zigzagEncode(qiCoder.encode(int32(qi))) + // Interleave to reduce overhead from two partial bytes to one. + interleaved := interleaveUint32(zzPi, zzQi) + e.writeUvarint(interleaved) +} + +type faceRun struct { + face, count int +} + +func decodeFaceRun(d *decoder) faceRun { + faceAndCount := d.readUvarint() + ret := faceRun{ + face: int(faceAndCount % numFaces), + count: int(faceAndCount / numFaces), + } + if ret.count <= 0 && d.err == nil { + d.err = errors.New("non-positive count for face run") + } + return ret +} + +func decodeFaces(numVertices int, d *decoder) []faceRun { + var frs []faceRun + for nparsed := 0; nparsed < numVertices; { + fr := decodeFaceRun(d) + if d.err != nil { + return nil + } + frs = append(frs, fr) + nparsed += fr.count + } + return frs +} + +// encodeFaceRun encodes each faceRun as a varint64 with value numFaces * count + face. +func encodeFaceRun(e *encoder, fr faceRun) { + // It isn't necessary to encode the number of faces left for the last run, + // but since this would only help if there were more than 21 faces, it will + // be a small overall savings, much smaller than the bound encoding. + coded := numFaces*uint64(fr.count) + uint64(fr.face) + e.writeUvarint(coded) +} + +func encodeFaces(e *encoder, frs []faceRun) { + for _, fr := range frs { + encodeFaceRun(e, fr) + } +} + +type facesIterator struct { + faces []faceRun + // How often have we yet shown the current face? + numCurrentFaceShown int + curFace int +} + +func (fi *facesIterator) next() (ok bool) { + if len(fi.faces) == 0 { + return false + } + fi.curFace = fi.faces[0].face + fi.numCurrentFaceShown++ + + // Advance fs if needed. + if fi.faces[0].count <= fi.numCurrentFaceShown { + fi.faces = fi.faces[1:] + fi.numCurrentFaceShown = 0 + } + + return true +} + +func decodePointsCompressed(d *decoder, level int, target []Point) { + faces := decodeFaces(len(target), d) + + piCoder := newNthDerivativeCoder(derivativeEncodingOrder) + qiCoder := newNthDerivativeCoder(derivativeEncodingOrder) + + iter := facesIterator{faces: faces} + for i := range target { + decodeFn := decodePointCompressed + if i == 0 { + decodeFn = decodeFirstPointFixedLength + } + pi, qi := decodeFn(d, level, piCoder, qiCoder) + if ok := iter.next(); !ok && d.err == nil { + d.err = fmt.Errorf("ran out of faces at target %d", i) + return + } + target[i] = Point{facePiQitoXYZ(iter.curFace, pi, qi, level)} + } + + numOffCenter := int(d.readUvarint()) + if d.err != nil { + return + } + if numOffCenter > len(target) { + d.err = fmt.Errorf("numOffCenter = %d, should be at most len(target) = %d", numOffCenter, len(target)) + return + } + for i := 0; i < numOffCenter; i++ { + idx := int(d.readUvarint()) + if d.err != nil { + return + } + if idx >= len(target) { + d.err = fmt.Errorf("off center index = %d, should be < len(target) = %d", idx, len(target)) + return + } + target[idx].X = d.readFloat64() + target[idx].Y = d.readFloat64() + target[idx].Z = d.readFloat64() + } +} + +func decodeFirstPointFixedLength(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) { + bytesToRead := (level + 7) / 8 * 2 + var interleaved uint64 + for i := 0; i < bytesToRead; i++ { + rr := d.readUint8() + interleaved |= (uint64(rr) << uint(i*8)) + } + + piCoded, qiCoded := deinterleaveUint32(interleaved) + + return uint32(piCoder.decode(int32(piCoded))), uint32(qiCoder.decode(int32(qiCoded))) +} + +func zigzagEncode(x int32) uint32 { + return (uint32(x) << 1) ^ uint32(x>>31) +} + +func zigzagDecode(x uint32) int32 { + return int32((x >> 1) ^ uint32((int32(x&1)<<31)>>31)) +} + +func decodePointCompressed(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) { + interleavedZigZagEncodedDerivPiQi := d.readUvarint() + piZigzag, qiZigzag := deinterleaveUint32(interleavedZigZagEncodedDerivPiQi) + return uint32(piCoder.decode(zigzagDecode(piZigzag))), uint32(qiCoder.decode(zigzagDecode(qiZigzag))) +} + +// We introduce a new coordinate system (pi, qi), which is (si, ti) +// with the bits that are constant for cells of that level shifted +// off to the right. +// si = round(s * 2^31) +// pi = si >> (31 - level) +// = floor(s * 2^level) +// If the point has been snapped to the level, the bits that are +// shifted off will be a 1 in the msb, then 0s after that, so the +// fractional part discarded by the cast is (close to) 0.5. + +// stToPiQi returns the value transformed to the PiQi coordinate space. +func stToPiQi(s float64, level uint) uint32 { + return uint32(s * float64(int(1)< max { + s = max + } + + return uint32(s >> (maxLevel + 1 - uint(level))) +} + +// piQiToST returns the value transformed to ST space. +func piQiToST(pi uint32, level int) float64 { + // We want to recover the position at the center of the cell. If the point + // was snapped to the center of the cell, then math.Modf(s * 2^level) == 0.5. + // Inverting STtoPiQi gives: + // s = (pi + 0.5) / 2^level. + return (float64(pi) + 0.5) / float64(int(1)< l.turningAngleMaxError() { + // Normalize the loop. + if angle < 0 { + l.Invert() + } + } else { + // Ensure that the loop does not contain the origin. + if l.ContainsOrigin() { + l.Invert() + } + } + } + + p := PolygonFromLoops(loops) + + if p.NumLoops() > 0 { + originLoop := p.Loop(0) + polygonContainsOrigin := false + for _, l := range p.Loops() { + if l.ContainsOrigin() { + polygonContainsOrigin = !polygonContainsOrigin + + originLoop = l + } + } + if containedOrigin[originLoop] != polygonContainsOrigin { + p.Invert() + } + } + + return p +} + +// Invert inverts the polygon (replaces it by its complement). +func (p *Polygon) Invert() { + // Inverting any one loop will invert the polygon. The best loop to invert + // is the one whose area is largest, since this yields the smallest area + // after inversion. The loop with the largest area is always at depth 0. + // The descendents of this loop all have their depth reduced by 1, while the + // former siblings of this loop all have their depth increased by 1. + + // The empty and full polygons are handled specially. + if p.IsEmpty() { + *p = *FullPolygon() + return + } + if p.IsFull() { + *p = Polygon{} + return + } + + // Find the loop whose area is largest (i.e., whose turning angle is + // smallest), minimizing calls to TurningAngle(). In particular, for + // polygons with a single shell at level 0 there is no need to call + // TurningAngle() at all. (This method is relatively expensive.) + best := 0 + const none = 10.0 // Flag that means "not computed yet" + bestAngle := none + for i := 1; i < p.NumLoops(); i++ { + if p.Loop(i).depth != 0 { + continue + } + // We defer computing the turning angle of loop 0 until we discover + // that the polygon has another top-level shell. + if bestAngle == none { + bestAngle = p.Loop(best).TurningAngle() + } + angle := p.Loop(i).TurningAngle() + // We break ties deterministically in order to avoid having the output + // depend on the input order of the loops. + if angle < bestAngle || (angle == bestAngle && compareLoops(p.Loop(i), p.Loop(best)) < 0) { + best = i + bestAngle = angle + } + } + // Build the new loops vector, starting with the inverted loop. + p.Loop(best).Invert() + newLoops := make([]*Loop, 0, p.NumLoops()) + // Add the former siblings of this loop as descendants. + lastBest := p.LastDescendant(best) + newLoops = append(newLoops, p.Loop(best)) + for i, l := range p.Loops() { + if i < best || i > lastBest { + l.depth++ + newLoops = append(newLoops, l) + } + } + // Add the former children of this loop as siblings. + for i, l := range p.Loops() { + if i > best && i <= lastBest { + l.depth-- + newLoops = append(newLoops, l) + } + } + p.loops = newLoops + p.initLoopProperties() +} + +// Defines a total ordering on Loops that does not depend on the cyclic +// order of loop vertices. This function is used to choose which loop to +// invert in the case where several loops have exactly the same area. +func compareLoops(a, b *Loop) int { + if na, nb := a.NumVertices(), b.NumVertices(); na != nb { + return na - nb + } + ai, aDir := a.CanonicalFirstVertex() + bi, bDir := b.CanonicalFirstVertex() + if aDir != bDir { + return aDir - bDir + } + for n := a.NumVertices() - 1; n >= 0; n, ai, bi = n-1, ai+aDir, bi+bDir { + if cmp := a.Vertex(ai).Cmp(b.Vertex(bi).Vector); cmp != 0 { + return cmp + } + } + return 0 +} + +// PolygonFromCell returns a Polygon from a single loop created from the given Cell. +func PolygonFromCell(cell Cell) *Polygon { + return PolygonFromLoops([]*Loop{LoopFromCell(cell)}) +} + +// initNested takes the set of loops in this polygon and performs the nesting +// computations to set the proper nesting and parent/child relationships. +func (p *Polygon) initNested() { + if len(p.loops) == 1 { + p.initOneLoop() + return + } + + lm := make(loopMap) + + for _, l := range p.loops { + lm.insertLoop(l, nil) + } + // The loops have all been added to the loopMap for ordering. Clear the + // loops slice because we add all the loops in-order in initLoops. + p.loops = nil + + // Reorder the loops in depth-first traversal order. + p.initLoops(lm) + p.initLoopProperties() +} + +// loopMap is a map of a loop to its immediate children with respect to nesting. +// It is used to determine which loops are shells and which are holes. +type loopMap map[*Loop][]*Loop + +// insertLoop adds the given loop to the loop map under the specified parent. +// All children of the new entry are checked to see if the need to move up to +// a different level. +func (lm loopMap) insertLoop(newLoop, parent *Loop) { + var children []*Loop + for done := false; !done; { + children = lm[parent] + done = true + for _, child := range children { + if child.ContainsNested(newLoop) { + parent = child + done = false + break + } + } + } + + // Now, we have found a parent for this loop, it may be that some of the + // children of the parent of this loop may now be children of the new loop. + newChildren := lm[newLoop] + for i := 0; i < len(children); { + child := children[i] + if newLoop.ContainsNested(child) { + newChildren = append(newChildren, child) + children = append(children[0:i], children[i+1:]...) + } else { + i++ + } + } + + lm[newLoop] = newChildren + lm[parent] = append(children, newLoop) +} + +// loopStack simplifies access to the loops while being initialized. +type loopStack []*Loop + +func (s *loopStack) push(v *Loop) { + *s = append(*s, v) +} +func (s *loopStack) pop() *Loop { + l := len(*s) + r := (*s)[l-1] + *s = (*s)[:l-1] + return r +} + +// initLoops walks the mapping of loops to all of their children, and adds them in +// order into to the polygons set of loops. +func (p *Polygon) initLoops(lm loopMap) { + var stack loopStack + stack.push(nil) + depth := -1 + + for len(stack) > 0 { + loop := stack.pop() + if loop != nil { + depth = loop.depth + p.loops = append(p.loops, loop) + } + children := lm[loop] + for i := len(children) - 1; i >= 0; i-- { + child := children[i] + child.depth = depth + 1 + stack.push(child) + } + } +} + +// initOneLoop set the properties for a polygon made of a single loop. +// TODO(roberts): Can this be merged with initLoopProperties +func (p *Polygon) initOneLoop() { + p.hasHoles = false + p.numVertices = len(p.loops[0].vertices) + p.bound = p.loops[0].RectBound() + p.subregionBound = ExpandForSubregions(p.bound) + // Ensure the loops depth is set correctly. + p.loops[0].depth = 0 + + p.initEdgesAndIndex() +} + +// initLoopProperties sets the properties for polygons with multiple loops. +func (p *Polygon) initLoopProperties() { + // the loops depths are set by initNested/initOriented prior to this. + p.bound = EmptyRect() + p.hasHoles = false + for _, l := range p.loops { + if l.IsHole() { + p.hasHoles = true + } else { + p.bound = p.bound.Union(l.RectBound()) + } + p.numVertices += l.NumVertices() + } + p.subregionBound = ExpandForSubregions(p.bound) + + p.initEdgesAndIndex() +} + +// initEdgesAndIndex performs the shape related initializations and adds the final +// polygon to the index. +func (p *Polygon) initEdgesAndIndex() { + if p.IsFull() { + return + } + const maxLinearSearchLoops = 12 // Based on benchmarks. + if len(p.loops) > maxLinearSearchLoops { + p.cumulativeEdges = make([]int, 0, len(p.loops)) + } + + for _, l := range p.loops { + if p.cumulativeEdges != nil { + p.cumulativeEdges = append(p.cumulativeEdges, p.numEdges) + } + p.numEdges += len(l.vertices) + } + + p.index = NewShapeIndex() + p.index.Add(p) +} + +// FullPolygon returns a special "full" polygon. +func FullPolygon() *Polygon { + ret := &Polygon{ + loops: []*Loop{ + FullLoop(), + }, + numVertices: len(FullLoop().Vertices()), + bound: FullRect(), + subregionBound: FullRect(), + } + ret.initEdgesAndIndex() + return ret +} + +// Validate checks whether this is a valid polygon, +// including checking whether all the loops are themselves valid. +func (p *Polygon) Validate() error { + for i, l := range p.loops { + // Check for loop errors that don't require building a ShapeIndex. + if err := l.findValidationErrorNoIndex(); err != nil { + return fmt.Errorf("loop %d: %v", i, err) + } + // Check that no loop is empty, and that the full loop only appears in the + // full polygon. + if l.IsEmpty() { + return fmt.Errorf("loop %d: empty loops are not allowed", i) + } + if l.IsFull() && len(p.loops) > 1 { + return fmt.Errorf("loop %d: full loop appears in non-full polygon", i) + } + } + + // TODO(roberts): Uncomment the remaining checks when they are completed. + + // Check for loop self-intersections and loop pairs that cross + // (including duplicate edges and vertices). + // if findSelfIntersection(p.index) { + // return fmt.Errorf("polygon has loop pairs that cross") + // } + + // Check whether initOriented detected inconsistent loop orientations. + // if p.hasInconsistentLoopOrientations { + // return fmt.Errorf("inconsistent loop orientations detected") + // } + + // Finally, verify the loop nesting hierarchy. + return p.findLoopNestingError() +} + +// findLoopNestingError reports if there is an error in the loop nesting hierarchy. +func (p *Polygon) findLoopNestingError() error { + // First check that the loop depths make sense. + lastDepth := -1 + for i, l := range p.loops { + depth := l.depth + if depth < 0 || depth > lastDepth+1 { + return fmt.Errorf("loop %d: invalid loop depth (%d)", i, depth) + } + lastDepth = depth + } + // Then check that they correspond to the actual loop nesting. This test + // is quadratic in the number of loops but the cost per iteration is small. + for i, l := range p.loops { + last := p.LastDescendant(i) + for j, l2 := range p.loops { + if i == j { + continue + } + nested := (j >= i+1) && (j <= last) + const reverseB = false + + if l.containsNonCrossingBoundary(l2, reverseB) != nested { + nestedStr := "" + if !nested { + nestedStr = "not " + } + return fmt.Errorf("invalid nesting: loop %d should %scontain loop %d", i, nestedStr, j) + } + } + } + return nil +} + +// IsEmpty reports whether this is the special "empty" polygon (consisting of no loops). +func (p *Polygon) IsEmpty() bool { + return len(p.loops) == 0 +} + +// IsFull reports whether this is the special "full" polygon (consisting of a +// single loop that encompasses the entire sphere). +func (p *Polygon) IsFull() bool { + return len(p.loops) == 1 && p.loops[0].IsFull() +} + +// NumLoops returns the number of loops in this polygon. +func (p *Polygon) NumLoops() int { + return len(p.loops) +} + +// Loops returns the loops in this polygon. +func (p *Polygon) Loops() []*Loop { + return p.loops +} + +// Loop returns the loop at the given index. Note that during initialization, +// the given loops are reordered according to a pre-order traversal of the loop +// nesting hierarchy. This implies that every loop is immediately followed by +// its descendants. This hierarchy can be traversed using the methods Parent, +// LastDescendant, and Loop.depth. +func (p *Polygon) Loop(k int) *Loop { + return p.loops[k] +} + +// Parent returns the index of the parent of loop k. +// If the loop does not have a parent, ok=false is returned. +func (p *Polygon) Parent(k int) (index int, ok bool) { + // See where we are on the depth hierarchy. + depth := p.loops[k].depth + if depth == 0 { + return -1, false + } + + // There may be several loops at the same nesting level as us that share a + // parent loop with us. (Imagine a slice of swiss cheese, of which we are one loop. + // we don't know how many may be next to us before we get back to our parent loop.) + // Move up one position from us, and then begin traversing back through the set of loops + // until we find the one that is our parent or we get to the top of the polygon. + for k--; k >= 0 && p.loops[k].depth <= depth; k-- { + } + return k, true +} + +// LastDescendant returns the index of the last loop that is contained within loop k. +// If k is negative, it returns the last loop in the polygon. +// Note that loops are indexed according to a pre-order traversal of the nesting +// hierarchy, so the immediate children of loop k can be found by iterating over +// the loops (k+1)..LastDescendant(k) and selecting those whose depth is equal +// to Loop(k).depth+1. +func (p *Polygon) LastDescendant(k int) int { + if k < 0 { + return len(p.loops) - 1 + } + + depth := p.loops[k].depth + + // Find the next loop immediately past us in the set of loops, and then start + // moving down the list until we either get to the end or find the next loop + // that is higher up the hierarchy than we are. + for k++; k < len(p.loops) && p.loops[k].depth > depth; k++ { + } + return k - 1 +} + +// CapBound returns a bounding spherical cap. +func (p *Polygon) CapBound() Cap { return p.bound.CapBound() } + +// RectBound returns a bounding latitude-longitude rectangle. +func (p *Polygon) RectBound() Rect { return p.bound } + +// ContainsPoint reports whether the polygon contains the point. +func (p *Polygon) ContainsPoint(point Point) bool { + // NOTE: A bounds check slows down this function by about 50%. It is + // worthwhile only when it might allow us to delay building the index. + if !p.index.IsFresh() && !p.bound.ContainsPoint(point) { + return false + } + + // For small polygons, and during initial construction, it is faster to just + // check all the crossing. + const maxBruteForceVertices = 32 + if p.numVertices < maxBruteForceVertices || p.index == nil { + inside := false + for _, l := range p.loops { + // use loops bruteforce to avoid building the index on each loop. + inside = inside != l.bruteForceContainsPoint(point) + } + return inside + } + + // Otherwise, look up the ShapeIndex cell containing this point. + it := p.index.Iterator() + if !it.LocatePoint(point) { + return false + } + + return p.iteratorContainsPoint(it, point) +} + +// ContainsCell reports whether the polygon contains the given cell. +func (p *Polygon) ContainsCell(cell Cell) bool { + it := p.index.Iterator() + relation := it.LocateCellID(cell.ID()) + + // If "cell" is disjoint from all index cells, it is not contained. + // Similarly, if "cell" is subdivided into one or more index cells then it + // is not contained, since index cells are subdivided only if they (nearly) + // intersect a sufficient number of edges. (But note that if "cell" itself + // is an index cell then it may be contained, since it could be a cell with + // no edges in the loop interior.) + if relation != Indexed { + return false + } + + // Otherwise check if any edges intersect "cell". + if p.boundaryApproxIntersects(it, cell) { + return false + } + + // Otherwise check if the loop contains the center of "cell". + return p.iteratorContainsPoint(it, cell.Center()) +} + +// IntersectsCell reports whether the polygon intersects the given cell. +func (p *Polygon) IntersectsCell(cell Cell) bool { + it := p.index.Iterator() + relation := it.LocateCellID(cell.ID()) + + // If cell does not overlap any index cell, there is no intersection. + if relation == Disjoint { + return false + } + // If cell is subdivided into one or more index cells, there is an + // intersection to within the S2ShapeIndex error bound (see Contains). + if relation == Subdivided { + return true + } + // If cell is an index cell, there is an intersection because index cells + // are created only if they have at least one edge or they are entirely + // contained by the loop. + if it.CellID() == cell.id { + return true + } + // Otherwise check if any edges intersect cell. + if p.boundaryApproxIntersects(it, cell) { + return true + } + // Otherwise check if the loop contains the center of cell. + return p.iteratorContainsPoint(it, cell.Center()) +} + +// CellUnionBound computes a covering of the Polygon. +func (p *Polygon) CellUnionBound() []CellID { + // TODO(roberts): Use ShapeIndexRegion when it's available. + return p.CapBound().CellUnionBound() +} + +// boundaryApproxIntersects reports whether the loop's boundary intersects cell. +// It may also return true when the loop boundary does not intersect cell but +// some edge comes within the worst-case error tolerance. +// +// This requires that it.Locate(cell) returned Indexed. +func (p *Polygon) boundaryApproxIntersects(it *ShapeIndexIterator, cell Cell) bool { + aClipped := it.IndexCell().findByShapeID(0) + + // If there are no edges, there is no intersection. + if len(aClipped.edges) == 0 { + return false + } + + // We can save some work if cell is the index cell itself. + if it.CellID() == cell.ID() { + return true + } + + // Otherwise check whether any of the edges intersect cell. + maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist) + bound := cell.BoundUV().ExpandedByMargin(maxError) + for _, e := range aClipped.edges { + edge := p.index.Shape(0).Edge(e) + v0, v1, ok := ClipToPaddedFace(edge.V0, edge.V1, cell.Face(), maxError) + if ok && edgeIntersectsRect(v0, v1, bound) { + return true + } + } + + return false +} + +// iteratorContainsPoint reports whether the iterator that is positioned at the +// ShapeIndexCell that may contain p, contains the point p. +func (p *Polygon) iteratorContainsPoint(it *ShapeIndexIterator, point Point) bool { + // Test containment by drawing a line segment from the cell center to the + // given point and counting edge crossings. + aClipped := it.IndexCell().findByShapeID(0) + inside := aClipped.containsCenter + + if len(aClipped.edges) == 0 { + return inside + } + + // This block requires ShapeIndex. + crosser := NewEdgeCrosser(it.Center(), point) + shape := p.index.Shape(0) + for _, e := range aClipped.edges { + edge := shape.Edge(e) + inside = inside != crosser.EdgeOrVertexCrossing(edge.V0, edge.V1) + } + + return inside +} + +// Shape Interface + +// NumEdges returns the number of edges in this shape. +func (p *Polygon) NumEdges() int { + return p.numEdges +} + +// Edge returns endpoints for the given edge index. +func (p *Polygon) Edge(e int) Edge { + var i int + + if len(p.cumulativeEdges) > 0 { + for i = range p.cumulativeEdges { + if i+1 >= len(p.cumulativeEdges) || e < p.cumulativeEdges[i+1] { + e -= p.cumulativeEdges[i] + break + } + } + } else { + // When the number of loops is small, use linear search. Most often + // there is exactly one loop and the code below executes zero times. + for i = 0; e >= len(p.Loop(i).vertices); i++ { + e -= len(p.Loop(i).vertices) + } + } + + return Edge{p.Loop(i).OrientedVertex(e), p.Loop(i).OrientedVertex(e + 1)} +} + +// ReferencePoint returns the reference point for this polygon. +func (p *Polygon) ReferencePoint() ReferencePoint { + containsOrigin := false + for _, l := range p.loops { + containsOrigin = containsOrigin != l.ContainsOrigin() + } + return OriginReferencePoint(containsOrigin) +} + +// NumChains reports the number of contiguous edge chains in the Polygon. +func (p *Polygon) NumChains() int { + return p.NumLoops() +} + +// Chain returns the i-th edge Chain (loop) in the Shape. +func (p *Polygon) Chain(chainID int) Chain { + if p.cumulativeEdges != nil { + return Chain{p.cumulativeEdges[chainID], len(p.Loop(chainID).vertices)} + } + e := 0 + for j := 0; j < chainID; j++ { + e += len(p.Loop(j).vertices) + } + + // Polygon represents a full loop as a loop with one vertex, while + // Shape represents a full loop as a chain with no vertices. + if numVertices := p.Loop(chainID).NumVertices(); numVertices != 1 { + return Chain{e, numVertices} + } + return Chain{e, 0} +} + +// ChainEdge returns the j-th edge of the i-th edge Chain (loop). +func (p *Polygon) ChainEdge(i, j int) Edge { + return Edge{p.Loop(i).OrientedVertex(j), p.Loop(i).OrientedVertex(j + 1)} +} + +// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge +// of the i-th edge Chain. +func (p *Polygon) ChainPosition(edgeID int) ChainPosition { + var i int + + if len(p.cumulativeEdges) > 0 { + for i = range p.cumulativeEdges { + if i+1 >= len(p.cumulativeEdges) || edgeID < p.cumulativeEdges[i+1] { + edgeID -= p.cumulativeEdges[i] + break + } + } + } else { + // When the number of loops is small, use linear search. Most often + // there is exactly one loop and the code below executes zero times. + for i = 0; edgeID >= len(p.Loop(i).vertices); i++ { + edgeID -= len(p.Loop(i).vertices) + } + } + // TODO(roberts): unify this and Edge since they are mostly identical. + return ChainPosition{i, edgeID} +} + +// Dimension returns the dimension of the geometry represented by this Polygon. +func (p *Polygon) Dimension() int { return 2 } + +func (p *Polygon) typeTag() typeTag { return typeTagPolygon } + +func (p *Polygon) privateInterface() {} + +// Contains reports whether this polygon contains the other polygon. +// Specifically, it reports whether all the points in the other polygon +// are also in this polygon. +func (p *Polygon) Contains(o *Polygon) bool { + // If both polygons have one loop, use the more efficient Loop method. + // Note that Loop's Contains does its own bounding rectangle check. + if len(p.loops) == 1 && len(o.loops) == 1 { + return p.loops[0].Contains(o.loops[0]) + } + + // Otherwise if neither polygon has holes, we can still use the more + // efficient Loop's Contains method (rather than compareBoundary), + // but it's worthwhile to do our own bounds check first. + if !p.subregionBound.Contains(o.bound) { + // Even though Bound(A) does not contain Bound(B), it is still possible + // that A contains B. This can only happen when union of the two bounds + // spans all longitudes. For example, suppose that B consists of two + // shells with a longitude gap between them, while A consists of one shell + // that surrounds both shells of B but goes the other way around the + // sphere (so that it does not intersect the longitude gap). + if !p.bound.Lng.Union(o.bound.Lng).IsFull() { + return false + } + } + + if !p.hasHoles && !o.hasHoles { + for _, l := range o.loops { + if !p.anyLoopContains(l) { + return false + } + } + return true + } + + // Polygon A contains B iff B does not intersect the complement of A. From + // the intersection algorithm below, this means that the complement of A + // must exclude the entire boundary of B, and B must exclude all shell + // boundaries of the complement of A. (It can be shown that B must then + // exclude the entire boundary of the complement of A.) The first call + // below returns false if the boundaries cross, therefore the second call + // does not need to check for any crossing edges (which makes it cheaper). + return p.containsBoundary(o) && o.excludesNonCrossingComplementShells(p) +} + +// Intersects reports whether this polygon intersects the other polygon, i.e. +// if there is a point that is contained by both polygons. +func (p *Polygon) Intersects(o *Polygon) bool { + // If both polygons have one loop, use the more efficient Loop method. + // Note that Loop Intersects does its own bounding rectangle check. + if len(p.loops) == 1 && len(o.loops) == 1 { + return p.loops[0].Intersects(o.loops[0]) + } + + // Otherwise if neither polygon has holes, we can still use the more + // efficient Loop.Intersects method. The polygons intersect if and + // only if some pair of loop regions intersect. + if !p.bound.Intersects(o.bound) { + return false + } + + if !p.hasHoles && !o.hasHoles { + for _, l := range o.loops { + if p.anyLoopIntersects(l) { + return true + } + } + return false + } + + // Polygon A is disjoint from B if A excludes the entire boundary of B and B + // excludes all shell boundaries of A. (It can be shown that B must then + // exclude the entire boundary of A.) The first call below returns false if + // the boundaries cross, therefore the second call does not need to check + // for crossing edges. + return !p.excludesBoundary(o) || !o.excludesNonCrossingShells(p) +} + +// compareBoundary returns +1 if this polygon contains the boundary of B, -1 if A +// excludes the boundary of B, and 0 if the boundaries of A and B cross. +func (p *Polygon) compareBoundary(o *Loop) int { + result := -1 + for i := 0; i < len(p.loops) && result != 0; i++ { + // If B crosses any loop of A, the result is 0. Otherwise the result + // changes sign each time B is contained by a loop of A. + result *= -p.loops[i].compareBoundary(o) + } + return result +} + +// containsBoundary reports whether this polygon contains the entire boundary of B. +func (p *Polygon) containsBoundary(o *Polygon) bool { + for _, l := range o.loops { + if p.compareBoundary(l) <= 0 { + return false + } + } + return true +} + +// excludesBoundary reports whether this polygon excludes the entire boundary of B. +func (p *Polygon) excludesBoundary(o *Polygon) bool { + for _, l := range o.loops { + if p.compareBoundary(l) >= 0 { + return false + } + } + return true +} + +// containsNonCrossingBoundary reports whether polygon A contains the boundary of +// loop B. Shared edges are handled according to the rule described in loops +// containsNonCrossingBoundary. +func (p *Polygon) containsNonCrossingBoundary(o *Loop, reverse bool) bool { + var inside bool + for _, l := range p.loops { + x := l.containsNonCrossingBoundary(o, reverse) + inside = (inside != x) + } + return inside +} + +// excludesNonCrossingShells reports wheterh given two polygons A and B such that the +// boundary of A does not cross any loop of B, if A excludes all shell boundaries of B. +func (p *Polygon) excludesNonCrossingShells(o *Polygon) bool { + for _, l := range o.loops { + if l.IsHole() { + continue + } + if p.containsNonCrossingBoundary(l, false) { + return false + } + } + return true +} + +// excludesNonCrossingComplementShells reports whether given two polygons A and B +// such that the boundary of A does not cross any loop of B, if A excludes all +// shell boundaries of the complement of B. +func (p *Polygon) excludesNonCrossingComplementShells(o *Polygon) bool { + // Special case to handle the complement of the empty or full polygons. + if o.IsEmpty() { + return !p.IsFull() + } + if o.IsFull() { + return true + } + + // Otherwise the complement of B may be obtained by inverting loop(0) and + // then swapping the shell/hole status of all other loops. This implies + // that the shells of the complement consist of loop 0 plus all the holes of + // the original polygon. + for j, l := range o.loops { + if j > 0 && !l.IsHole() { + continue + } + + // The interior of the complement is to the right of loop 0, and to the + // left of the loops that were originally holes. + if p.containsNonCrossingBoundary(l, j == 0) { + return false + } + } + return true +} + +// anyLoopContains reports whether any loop in this polygon contains the given loop. +func (p *Polygon) anyLoopContains(o *Loop) bool { + for _, l := range p.loops { + if l.Contains(o) { + return true + } + } + return false +} + +// anyLoopIntersects reports whether any loop in this polygon intersects the given loop. +func (p *Polygon) anyLoopIntersects(o *Loop) bool { + for _, l := range p.loops { + if l.Intersects(o) { + return true + } + } + return false +} + +// Area returns the area of the polygon interior, i.e. the region on the left side +// of an odd number of loops. The return value is between 0 and 4*Pi. +func (p *Polygon) Area() float64 { + var area float64 + for _, loop := range p.loops { + area += float64(loop.Sign()) * loop.Area() + } + return area +} + +// Encode encodes the Polygon +func (p *Polygon) Encode(w io.Writer) error { + e := &encoder{w: w} + p.encode(e) + return e.err +} + +// encode only supports lossless encoding and not compressed format. +func (p *Polygon) encode(e *encoder) { + if p.numVertices == 0 { + p.encodeCompressed(e, maxLevel, nil) + return + } + + // Convert all the polygon vertices to XYZFaceSiTi format. + vs := make([]xyzFaceSiTi, 0, p.numVertices) + for _, l := range p.loops { + vs = append(vs, l.xyzFaceSiTiVertices()...) + } + + // Computes a histogram of the cell levels at which the vertices are snapped. + // (histogram[0] is the number of unsnapped vertices, histogram[i] the number + // of vertices snapped at level i-1). + histogram := make([]int, maxLevel+2) + for _, v := range vs { + histogram[v.level+1]++ + } + + // Compute the level at which most of the vertices are snapped. + // If multiple levels have the same maximum number of vertices + // snapped to it, the first one (lowest level number / largest + // area / smallest encoding length) will be chosen, so this + // is desired. + var snapLevel, numSnapped int + for level, h := range histogram[1:] { + if h > numSnapped { + snapLevel, numSnapped = level, h + } + } + + // Choose an encoding format based on the number of unsnapped vertices and a + // rough estimate of the encoded sizes. + numUnsnapped := p.numVertices - numSnapped // Number of vertices that won't be snapped at snapLevel. + const pointSize = 3 * 8 // s2.Point is an r3.Vector, which is 3 float64s. That's 3*8 = 24 bytes. + compressedSize := 4*p.numVertices + (pointSize+2)*numUnsnapped + losslessSize := pointSize * p.numVertices + if compressedSize < losslessSize { + p.encodeCompressed(e, snapLevel, vs) + } else { + p.encodeLossless(e) + } +} + +// encodeLossless encodes the polygon's Points as float64s. +func (p *Polygon) encodeLossless(e *encoder) { + e.writeInt8(encodingVersion) + e.writeBool(true) // a legacy c++ value. must be true. + e.writeBool(p.hasHoles) + e.writeUint32(uint32(len(p.loops))) + + if e.err != nil { + return + } + if len(p.loops) > maxEncodedLoops { + e.err = fmt.Errorf("too many loops (%d; max is %d)", len(p.loops), maxEncodedLoops) + return + } + for _, l := range p.loops { + l.encode(e) + } + + // Encode the bound. + p.bound.encode(e) +} + +func (p *Polygon) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) { + e.writeUint8(uint8(encodingCompressedVersion)) + e.writeUint8(uint8(snapLevel)) + e.writeUvarint(uint64(len(p.loops))) + + if e.err != nil { + return + } + if l := len(p.loops); l > maxEncodedLoops { + e.err = fmt.Errorf("too many loops to encode: %d; max is %d", l, maxEncodedLoops) + return + } + + for _, l := range p.loops { + l.encodeCompressed(e, snapLevel, vertices[:len(l.vertices)]) + vertices = vertices[len(l.vertices):] + } + // Do not write the bound, num_vertices, or has_holes_ as they can be + // cheaply recomputed by decodeCompressed. Microbenchmarks show the + // speed difference is inconsequential. +} + +// Decode decodes the Polygon. +func (p *Polygon) Decode(r io.Reader) error { + d := &decoder{r: asByteReader(r)} + version := int8(d.readUint8()) + var dec func(*decoder) + switch version { + case encodingVersion: + dec = p.decode + case encodingCompressedVersion: + dec = p.decodeCompressed + default: + return fmt.Errorf("unsupported version %d", version) + } + dec(d) + return d.err +} + +// maxEncodedLoops is the biggest supported number of loops in a polygon during encoding. +// Setting a maximum guards an allocation: it prevents an attacker from easily pushing us OOM. +const maxEncodedLoops = 10000000 + +func (p *Polygon) decode(d *decoder) { + *p = Polygon{} + d.readUint8() // Ignore irrelevant serialized owns_loops_ value. + + p.hasHoles = d.readBool() + + // Polygons with no loops are explicitly allowed here: a newly created + // polygon has zero loops and such polygons encode and decode properly. + nloops := d.readUint32() + if d.err != nil { + return + } + if nloops > maxEncodedLoops { + d.err = fmt.Errorf("too many loops (%d; max is %d)", nloops, maxEncodedLoops) + return + } + p.loops = make([]*Loop, nloops) + for i := range p.loops { + p.loops[i] = new(Loop) + p.loops[i].decode(d) + p.numVertices += len(p.loops[i].vertices) + } + + p.bound.decode(d) + if d.err != nil { + return + } + p.subregionBound = ExpandForSubregions(p.bound) + p.initEdgesAndIndex() +} + +func (p *Polygon) decodeCompressed(d *decoder) { + snapLevel := int(d.readUint8()) + + if snapLevel > maxLevel { + d.err = fmt.Errorf("snaplevel too big: %d", snapLevel) + return + } + // Polygons with no loops are explicitly allowed here: a newly created + // polygon has zero loops and such polygons encode and decode properly. + nloops := int(d.readUvarint()) + if nloops > maxEncodedLoops { + d.err = fmt.Errorf("too many loops (%d; max is %d)", nloops, maxEncodedLoops) + } + p.loops = make([]*Loop, nloops) + for i := range p.loops { + p.loops[i] = new(Loop) + p.loops[i].decodeCompressed(d, snapLevel) + } + p.initLoopProperties() +} + +// TODO(roberts): Differences from C++ +// Centroid +// SnapLevel +// DistanceToPoint +// DistanceToBoundary +// Project +// ProjectToBoundary +// ApproxContains/ApproxDisjoint for Polygons +// InitTo{Intersection/ApproxIntersection/Union/ApproxUnion/Diff/ApproxDiff} +// InitToSimplified +// InitToSnapped +// IntersectWithPolyline +// ApproxIntersectWithPolyline +// SubtractFromPolyline +// ApproxSubtractFromPolyline +// DestructiveUnion +// DestructiveApproxUnion +// InitToCellUnionBorder +// IsNormalized +// Equal/BoundaryEqual/BoundaryApproxEqual/BoundaryNear Polygons +// BreakEdgesAndAddToBuilder +// +// clearLoops +// findLoopNestingError +// initToSimplifiedInternal +// internalClipPolyline +// clipBoundary diff --git a/vendor/github.com/golang/geo/s2/polyline.go b/vendor/github.com/golang/geo/s2/polyline.go new file mode 100644 index 000000000..517968342 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/polyline.go @@ -0,0 +1,589 @@ +// Copyright 2016 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "math" + + "github.com/golang/geo/s1" +) + +// Polyline represents a sequence of zero or more vertices connected by +// straight edges (geodesics). Edges of length 0 and 180 degrees are not +// allowed, i.e. adjacent vertices should not be identical or antipodal. +type Polyline []Point + +// PolylineFromLatLngs creates a new Polyline from the given LatLngs. +func PolylineFromLatLngs(points []LatLng) *Polyline { + p := make(Polyline, len(points)) + for k, v := range points { + p[k] = PointFromLatLng(v) + } + return &p +} + +// Reverse reverses the order of the Polyline vertices. +func (p *Polyline) Reverse() { + for i := 0; i < len(*p)/2; i++ { + (*p)[i], (*p)[len(*p)-i-1] = (*p)[len(*p)-i-1], (*p)[i] + } +} + +// Length returns the length of this Polyline. +func (p *Polyline) Length() s1.Angle { + var length s1.Angle + + for i := 1; i < len(*p); i++ { + length += (*p)[i-1].Distance((*p)[i]) + } + return length +} + +// Centroid returns the true centroid of the polyline multiplied by the length of the +// polyline. The result is not unit length, so you may wish to normalize it. +// +// Scaling by the Polyline length makes it easy to compute the centroid +// of several Polylines (by simply adding up their centroids). +func (p *Polyline) Centroid() Point { + var centroid Point + for i := 1; i < len(*p); i++ { + // The centroid (multiplied by length) is a vector toward the midpoint + // of the edge, whose length is twice the sin of half the angle between + // the two vertices. Defining theta to be this angle, we have: + vSum := (*p)[i-1].Add((*p)[i].Vector) // Length == 2*cos(theta) + vDiff := (*p)[i-1].Sub((*p)[i].Vector) // Length == 2*sin(theta) + + // Length == 2*sin(theta) + centroid = Point{centroid.Add(vSum.Mul(math.Sqrt(vDiff.Norm2() / vSum.Norm2())))} + } + return centroid +} + +// Equal reports whether the given Polyline is exactly the same as this one. +func (p *Polyline) Equal(b *Polyline) bool { + if len(*p) != len(*b) { + return false + } + for i, v := range *p { + if v != (*b)[i] { + return false + } + } + + return true +} + +// ApproxEqual reports whether two polylines have the same number of vertices, +// and corresponding vertex pairs are separated by no more the standard margin. +func (p *Polyline) ApproxEqual(o *Polyline) bool { + return p.approxEqual(o, s1.Angle(epsilon)) +} + +// approxEqual reports whether two polylines are equal within the given margin. +func (p *Polyline) approxEqual(o *Polyline, maxError s1.Angle) bool { + if len(*p) != len(*o) { + return false + } + for offset, val := range *p { + if !val.approxEqual((*o)[offset], maxError) { + return false + } + } + return true +} + +// CapBound returns the bounding Cap for this Polyline. +func (p *Polyline) CapBound() Cap { + return p.RectBound().CapBound() +} + +// RectBound returns the bounding Rect for this Polyline. +func (p *Polyline) RectBound() Rect { + rb := NewRectBounder() + for _, v := range *p { + rb.AddPoint(v) + } + return rb.RectBound() +} + +// ContainsCell reports whether this Polyline contains the given Cell. Always returns false +// because "containment" is not numerically well-defined except at the Polyline vertices. +func (p *Polyline) ContainsCell(cell Cell) bool { + return false +} + +// IntersectsCell reports whether this Polyline intersects the given Cell. +func (p *Polyline) IntersectsCell(cell Cell) bool { + if len(*p) == 0 { + return false + } + + // We only need to check whether the cell contains vertex 0 for correctness, + // but these tests are cheap compared to edge crossings so we might as well + // check all the vertices. + for _, v := range *p { + if cell.ContainsPoint(v) { + return true + } + } + + cellVertices := []Point{ + cell.Vertex(0), + cell.Vertex(1), + cell.Vertex(2), + cell.Vertex(3), + } + + for j := 0; j < 4; j++ { + crosser := NewChainEdgeCrosser(cellVertices[j], cellVertices[(j+1)&3], (*p)[0]) + for i := 1; i < len(*p); i++ { + if crosser.ChainCrossingSign((*p)[i]) != DoNotCross { + // There is a proper crossing, or two vertices were the same. + return true + } + } + } + return false +} + +// ContainsPoint returns false since Polylines are not closed. +func (p *Polyline) ContainsPoint(point Point) bool { + return false +} + +// CellUnionBound computes a covering of the Polyline. +func (p *Polyline) CellUnionBound() []CellID { + return p.CapBound().CellUnionBound() +} + +// NumEdges returns the number of edges in this shape. +func (p *Polyline) NumEdges() int { + if len(*p) == 0 { + return 0 + } + return len(*p) - 1 +} + +// Edge returns endpoints for the given edge index. +func (p *Polyline) Edge(i int) Edge { + return Edge{(*p)[i], (*p)[i+1]} +} + +// ReferencePoint returns the default reference point with negative containment because Polylines are not closed. +func (p *Polyline) ReferencePoint() ReferencePoint { + return OriginReferencePoint(false) +} + +// NumChains reports the number of contiguous edge chains in this Polyline. +func (p *Polyline) NumChains() int { + return minInt(1, p.NumEdges()) +} + +// Chain returns the i-th edge Chain in the Shape. +func (p *Polyline) Chain(chainID int) Chain { + return Chain{0, p.NumEdges()} +} + +// ChainEdge returns the j-th edge of the i-th edge Chain. +func (p *Polyline) ChainEdge(chainID, offset int) Edge { + return Edge{(*p)[offset], (*p)[offset+1]} +} + +// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge +func (p *Polyline) ChainPosition(edgeID int) ChainPosition { + return ChainPosition{0, edgeID} +} + +// Dimension returns the dimension of the geometry represented by this Polyline. +func (p *Polyline) Dimension() int { return 1 } + +// IsEmpty reports whether this shape contains no points. +func (p *Polyline) IsEmpty() bool { return defaultShapeIsEmpty(p) } + +// IsFull reports whether this shape contains all points on the sphere. +func (p *Polyline) IsFull() bool { return defaultShapeIsFull(p) } + +func (p *Polyline) typeTag() typeTag { return typeTagPolyline } + +func (p *Polyline) privateInterface() {} + +// findEndVertex reports the maximal end index such that the line segment between +// the start index and this one such that the line segment between these two +// vertices passes within the given tolerance of all interior vertices, in order. +func findEndVertex(p Polyline, tolerance s1.Angle, index int) int { + // The basic idea is to keep track of the "pie wedge" of angles + // from the starting vertex such that a ray from the starting + // vertex at that angle will pass through the discs of radius + // tolerance centered around all vertices processed so far. + // + // First we define a coordinate frame for the tangent and normal + // spaces at the starting vertex. Essentially this means picking + // three orthonormal vectors X,Y,Z such that X and Y span the + // tangent plane at the starting vertex, and Z is up. We use + // the coordinate frame to define a mapping from 3D direction + // vectors to a one-dimensional ray angle in the range (-π, + // π]. The angle of a direction vector is computed by + // transforming it into the X,Y,Z basis, and then calculating + // atan2(y,x). This mapping allows us to represent a wedge of + // angles as a 1D interval. Since the interval wraps around, we + // represent it as an Interval, i.e. an interval on the unit + // circle. + origin := p[index] + frame := getFrame(origin) + + // As we go along, we keep track of the current wedge of angles + // and the distance to the last vertex (which must be + // non-decreasing). + currentWedge := s1.FullInterval() + var lastDistance s1.Angle + + for index++; index < len(p); index++ { + candidate := p[index] + distance := origin.Distance(candidate) + + // We don't allow simplification to create edges longer than + // 90 degrees, to avoid numeric instability as lengths + // approach 180 degrees. We do need to allow for original + // edges longer than 90 degrees, though. + if distance > math.Pi/2 && lastDistance > 0 { + break + } + + // Vertices must be in increasing order along the ray, except + // for the initial disc around the origin. + if distance < lastDistance && lastDistance > tolerance { + break + } + + lastDistance = distance + + // Points that are within the tolerance distance of the origin + // do not constrain the ray direction, so we can ignore them. + if distance <= tolerance { + continue + } + + // If the current wedge of angles does not contain the angle + // to this vertex, then stop right now. Note that the wedge + // of possible ray angles is not necessarily empty yet, but we + // can't continue unless we are willing to backtrack to the + // last vertex that was contained within the wedge (since we + // don't create new vertices). This would be more complicated + // and also make the worst-case running time more than linear. + direction := toFrame(frame, candidate) + center := math.Atan2(direction.Y, direction.X) + if !currentWedge.Contains(center) { + break + } + + // To determine how this vertex constrains the possible ray + // angles, consider the triangle ABC where A is the origin, B + // is the candidate vertex, and C is one of the two tangent + // points between A and the spherical cap of radius + // tolerance centered at B. Then from the spherical law of + // sines, sin(a)/sin(A) = sin(c)/sin(C), where a and c are + // the lengths of the edges opposite A and C. In our case C + // is a 90 degree angle, therefore A = asin(sin(a) / sin(c)). + // Angle A is the half-angle of the allowable wedge. + halfAngle := math.Asin(math.Sin(tolerance.Radians()) / math.Sin(distance.Radians())) + target := s1.IntervalFromPointPair(center, center).Expanded(halfAngle) + currentWedge = currentWedge.Intersection(target) + } + + // We break out of the loop when we reach a vertex index that + // can't be included in the line segment, so back up by one + // vertex. + return index - 1 +} + +// SubsampleVertices returns a subsequence of vertex indices such that the +// polyline connecting these vertices is never further than the given tolerance from +// the original polyline. Provided the first and last vertices are distinct, +// they are always preserved; if they are not, the subsequence may contain +// only a single index. +// +// Some useful properties of the algorithm: +// +// - It runs in linear time. +// +// - The output always represents a valid polyline. In particular, adjacent +// output vertices are never identical or antipodal. +// +// - The method is not optimal, but it tends to produce 2-3% fewer +// vertices than the Douglas-Peucker algorithm with the same tolerance. +// +// - The output is parametrically equivalent to the original polyline to +// within the given tolerance. For example, if a polyline backtracks on +// itself and then proceeds onwards, the backtracking will be preserved +// (to within the given tolerance). This is different than the +// Douglas-Peucker algorithm which only guarantees geometric equivalence. +func (p *Polyline) SubsampleVertices(tolerance s1.Angle) []int { + var result []int + + if len(*p) < 1 { + return result + } + + result = append(result, 0) + clampedTolerance := s1.Angle(math.Max(tolerance.Radians(), 0)) + + for index := 0; index+1 < len(*p); { + nextIndex := findEndVertex(*p, clampedTolerance, index) + // Don't create duplicate adjacent vertices. + if (*p)[nextIndex] != (*p)[index] { + result = append(result, nextIndex) + } + index = nextIndex + } + + return result +} + +// Encode encodes the Polyline. +func (p Polyline) Encode(w io.Writer) error { + e := &encoder{w: w} + p.encode(e) + return e.err +} + +func (p Polyline) encode(e *encoder) { + e.writeInt8(encodingVersion) + e.writeUint32(uint32(len(p))) + for _, v := range p { + e.writeFloat64(v.X) + e.writeFloat64(v.Y) + e.writeFloat64(v.Z) + } +} + +// Decode decodes the polyline. +func (p *Polyline) Decode(r io.Reader) error { + d := decoder{r: asByteReader(r)} + p.decode(d) + return d.err +} + +func (p *Polyline) decode(d decoder) { + version := d.readInt8() + if d.err != nil { + return + } + if int(version) != int(encodingVersion) { + d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion) + return + } + nvertices := d.readUint32() + if d.err != nil { + return + } + if nvertices > maxEncodedVertices { + d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices) + return + } + *p = make([]Point, nvertices) + for i := range *p { + (*p)[i].X = d.readFloat64() + (*p)[i].Y = d.readFloat64() + (*p)[i].Z = d.readFloat64() + } +} + +// Project returns a point on the polyline that is closest to the given point, +// and the index of the next vertex after the projected point. The +// value of that index is always in the range [1, len(polyline)]. +// The polyline must not be empty. +func (p *Polyline) Project(point Point) (Point, int) { + if len(*p) == 1 { + // If there is only one vertex, it is always closest to any given point. + return (*p)[0], 1 + } + + // Initial value larger than any possible distance on the unit sphere. + minDist := 10 * s1.Radian + minIndex := -1 + + // Find the line segment in the polyline that is closest to the point given. + for i := 1; i < len(*p); i++ { + if dist := DistanceFromSegment(point, (*p)[i-1], (*p)[i]); dist < minDist { + minDist = dist + minIndex = i + } + } + + // Compute the point on the segment found that is closest to the point given. + closest := Project(point, (*p)[minIndex-1], (*p)[minIndex]) + if closest == (*p)[minIndex] { + minIndex++ + } + + return closest, minIndex +} + +// IsOnRight reports whether the point given is on the right hand side of the +// polyline, using a naive definition of "right-hand-sideness" where the point +// is on the RHS of the polyline iff the point is on the RHS of the line segment +// in the polyline which it is closest to. +// The polyline must have at least 2 vertices. +func (p *Polyline) IsOnRight(point Point) bool { + // If the closest point C is an interior vertex of the polyline, let B and D + // be the previous and next vertices. The given point P is on the right of + // the polyline (locally) if B, P, D are ordered CCW around vertex C. + closest, next := p.Project(point) + if closest == (*p)[next-1] && next > 1 && next < len(*p) { + if point == (*p)[next-1] { + // Polyline vertices are not on the RHS. + return false + } + return OrderedCCW((*p)[next-2], point, (*p)[next], (*p)[next-1]) + } + // Otherwise, the closest point C is incident to exactly one polyline edge. + // We test the point P against that edge. + if next == len(*p) { + next-- + } + return Sign(point, (*p)[next], (*p)[next-1]) +} + +// Validate checks whether this is a valid polyline or not. +func (p *Polyline) Validate() error { + // All vertices must be unit length. + for i, pt := range *p { + if !pt.IsUnit() { + return fmt.Errorf("vertex %d is not unit length", i) + } + } + + // Adjacent vertices must not be identical or antipodal. + for i := 1; i < len(*p); i++ { + prev, cur := (*p)[i-1], (*p)[i] + if prev == cur { + return fmt.Errorf("vertices %d and %d are identical", i-1, i) + } + if prev == (Point{cur.Mul(-1)}) { + return fmt.Errorf("vertices %d and %d are antipodal", i-1, i) + } + } + + return nil +} + +// Intersects reports whether this polyline intersects the given polyline. If +// the polylines share a vertex they are considered to be intersecting. When a +// polyline endpoint is the only intersection with the other polyline, the +// function may return true or false arbitrarily. +// +// The running time is quadratic in the number of vertices. +func (p *Polyline) Intersects(o *Polyline) bool { + if len(*p) == 0 || len(*o) == 0 { + return false + } + + if !p.RectBound().Intersects(o.RectBound()) { + return false + } + + // TODO(roberts): Use ShapeIndex here. + for i := 1; i < len(*p); i++ { + crosser := NewChainEdgeCrosser((*p)[i-1], (*p)[i], (*o)[0]) + for j := 1; j < len(*o); j++ { + if crosser.ChainCrossingSign((*o)[j]) != DoNotCross { + return true + } + } + } + return false +} + +// Interpolate returns the point whose distance from vertex 0 along the polyline is +// the given fraction of the polyline's total length, and the index of +// the next vertex after the interpolated point P. Fractions less than zero +// or greater than one are clamped. The return value is unit length. The cost of +// this function is currently linear in the number of vertices. +// +// This method allows the caller to easily construct a given suffix of the +// polyline by concatenating P with the polyline vertices starting at that next +// vertex. Note that P is guaranteed to be different than the point at the next +// vertex, so this will never result in a duplicate vertex. +// +// The polyline must not be empty. Note that if fraction >= 1.0, then the next +// vertex will be set to len(p) (indicating that no vertices from the polyline +// need to be appended). The value of the next vertex is always between 1 and +// len(p). +// +// This method can also be used to construct a prefix of the polyline, by +// taking the polyline vertices up to next vertex-1 and appending the +// returned point P if it is different from the last vertex (since in this +// case there is no guarantee of distinctness). +func (p *Polyline) Interpolate(fraction float64) (Point, int) { + // We intentionally let the (fraction >= 1) case fall through, since + // we need to handle it in the loop below in any case because of + // possible roundoff errors. + if fraction <= 0 { + return (*p)[0], 1 + } + target := s1.Angle(fraction) * p.Length() + + for i := 1; i < len(*p); i++ { + length := (*p)[i-1].Distance((*p)[i]) + if target < length { + // This interpolates with respect to arc length rather than + // straight-line distance, and produces a unit-length result. + result := InterpolateAtDistance(target, (*p)[i-1], (*p)[i]) + + // It is possible that (result == vertex(i)) due to rounding errors. + if result == (*p)[i] { + return result, i + 1 + } + return result, i + } + target -= length + } + + return (*p)[len(*p)-1], len(*p) +} + +// Uninterpolate is the inverse operation of Interpolate. Given a point on the +// polyline, it returns the ratio of the distance to the point from the +// beginning of the polyline over the length of the polyline. The return +// value is always betwen 0 and 1 inclusive. +// +// The polyline should not be empty. If it has fewer than 2 vertices, the +// return value is zero. +func (p *Polyline) Uninterpolate(point Point, nextVertex int) float64 { + if len(*p) < 2 { + return 0 + } + + var sum s1.Angle + for i := 1; i < nextVertex; i++ { + sum += (*p)[i-1].Distance((*p)[i]) + } + lengthToPoint := sum + (*p)[nextVertex-1].Distance(point) + for i := nextVertex; i < len(*p); i++ { + sum += (*p)[i-1].Distance((*p)[i]) + } + // The ratio can be greater than 1.0 due to rounding errors or because the + // point is not exactly on the polyline. + return minFloat64(1.0, float64(lengthToPoint/sum)) +} + +// TODO(roberts): Differences from C++. +// NearlyCoversPolyline +// InitToSnapped +// InitToSimplified +// SnapLevel +// encode/decode compressed diff --git a/vendor/github.com/golang/geo/s2/polyline_measures.go b/vendor/github.com/golang/geo/s2/polyline_measures.go new file mode 100644 index 000000000..38ce991b5 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/polyline_measures.go @@ -0,0 +1,53 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// This file defines various measures for polylines on the sphere. These are +// low-level methods that work directly with arrays of Points. They are used to +// implement the methods in various other measures files. + +import ( + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// polylineLength returns the length of the given Polyline. +// It returns 0 for polylines with fewer than two vertices. +func polylineLength(p []Point) s1.Angle { + var length s1.Angle + + for i := 1; i < len(p); i++ { + length += p[i-1].Distance(p[i]) + } + return length +} + +// polylineCentroid returns the true centroid of the polyline multiplied by the +// length of the polyline. The result is not unit length, so you may wish to +// normalize it. +// +// Scaling by the Polyline length makes it easy to compute the centroid +// of several Polylines (by simply adding up their centroids). +// +// Note that for degenerate Polylines (e.g., AA) this returns Point(0, 0, 0). +// (This answer is correct; the result of this function is a line integral over +// the polyline, whose value is always zero if the polyline is degenerate.) +func polylineCentroid(p []Point) Point { + var centroid r3.Vector + for i := 1; i < len(p); i++ { + centroid = centroid.Add(EdgeTrueCentroid(p[i-1], p[i]).Vector) + } + return Point{centroid} +} diff --git a/vendor/github.com/golang/geo/s2/predicates.go b/vendor/github.com/golang/geo/s2/predicates.go new file mode 100644 index 000000000..9fc5e1751 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/predicates.go @@ -0,0 +1,701 @@ +// Copyright 2016 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// This file contains various predicates that are guaranteed to produce +// correct, consistent results. They are also relatively efficient. This is +// achieved by computing conservative error bounds and falling back to high +// precision or even exact arithmetic when the result is uncertain. Such +// predicates are useful in implementing robust algorithms. +// +// See also EdgeCrosser, which implements various exact +// edge-crossing predicates more efficiently than can be done here. + +import ( + "math" + "math/big" + + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +const ( + // If any other machine architectures need to be suppported, these next three + // values will need to be updated. + + // epsilon is a small number that represents a reasonable level of noise between two + // values that can be considered to be equal. + epsilon = 1e-15 + // dblEpsilon is a smaller number for values that require more precision. + // This is the C++ DBL_EPSILON equivalent. + dblEpsilon = 2.220446049250313e-16 + // dblError is the C++ value for S2 rounding_epsilon(). + dblError = 1.110223024625156e-16 + + // maxDeterminantError is the maximum error in computing (AxB).C where all vectors + // are unit length. Using standard inequalities, it can be shown that + // + // fl(AxB) = AxB + D where |D| <= (|AxB| + (2/sqrt(3))*|A|*|B|) * e + // + // where "fl()" denotes a calculation done in floating-point arithmetic, + // |x| denotes either absolute value or the L2-norm as appropriate, and + // e is a reasonably small value near the noise level of floating point + // number accuracy. Similarly, + // + // fl(B.C) = B.C + d where |d| <= (|B.C| + 2*|B|*|C|) * e . + // + // Applying these bounds to the unit-length vectors A,B,C and neglecting + // relative error (which does not affect the sign of the result), we get + // + // fl((AxB).C) = (AxB).C + d where |d| <= (3 + 2/sqrt(3)) * e + maxDeterminantError = 1.8274 * dblEpsilon + + // detErrorMultiplier is the factor to scale the magnitudes by when checking + // for the sign of set of points with certainty. Using a similar technique to + // the one used for maxDeterminantError, the error is at most: + // + // |d| <= (3 + 6/sqrt(3)) * |A-C| * |B-C| * e + // + // If the determinant magnitude is larger than this value then we know + // its sign with certainty. + detErrorMultiplier = 3.2321 * dblEpsilon +) + +// Direction is an indication of the ordering of a set of points. +type Direction int + +// These are the three options for the direction of a set of points. +const ( + Clockwise Direction = -1 + Indeterminate Direction = 0 + CounterClockwise Direction = 1 +) + +// newBigFloat constructs a new big.Float with maximum precision. +func newBigFloat() *big.Float { return new(big.Float).SetPrec(big.MaxPrec) } + +// Sign returns true if the points A, B, C are strictly counterclockwise, +// and returns false if the points are clockwise or collinear (i.e. if they are all +// contained on some great circle). +// +// Due to numerical errors, situations may arise that are mathematically +// impossible, e.g. ABC may be considered strictly CCW while BCA is not. +// However, the implementation guarantees the following: +// +// If Sign(a,b,c), then !Sign(c,b,a) for all a,b,c. +func Sign(a, b, c Point) bool { + // NOTE(dnadasi): In the C++ API the equivalent method here was known as "SimpleSign". + + // We compute the signed volume of the parallelepiped ABC. The usual + // formula for this is (A ⨯ B) · C, but we compute it here using (C ⨯ A) · B + // in order to ensure that ABC and CBA are not both CCW. This follows + // from the following identities (which are true numerically, not just + // mathematically): + // + // (1) x ⨯ y == -(y ⨯ x) + // (2) -x · y == -(x · y) + return c.Cross(a.Vector).Dot(b.Vector) > 0 +} + +// RobustSign returns a Direction representing the ordering of the points. +// CounterClockwise is returned if the points are in counter-clockwise order, +// Clockwise for clockwise, and Indeterminate if any two points are the same (collinear), +// or the sign could not completely be determined. +// +// This function has additional logic to make sure that the above properties hold even +// when the three points are coplanar, and to deal with the limitations of +// floating-point arithmetic. +// +// RobustSign satisfies the following conditions: +// +// (1) RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a +// (2) RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c +// (3) RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c +// +// In other words: +// +// (1) The result is Indeterminate if and only if two points are the same. +// (2) Rotating the order of the arguments does not affect the result. +// (3) Exchanging any two arguments inverts the result. +// +// On the other hand, note that it is not true in general that +// RobustSign(-a,b,c) == -RobustSign(a,b,c), or any similar identities +// involving antipodal points. +func RobustSign(a, b, c Point) Direction { + sign := triageSign(a, b, c) + if sign == Indeterminate { + sign = expensiveSign(a, b, c) + } + return sign +} + +// stableSign reports the direction sign of the points in a numerically stable way. +// Unlike triageSign, this method can usually compute the correct determinant sign +// even when all three points are as collinear as possible. For example if three +// points are spaced 1km apart along a random line on the Earth's surface using +// the nearest representable points, there is only a 0.4% chance that this method +// will not be able to find the determinant sign. The probability of failure +// decreases as the points get closer together; if the collinear points are 1 meter +// apart, the failure rate drops to 0.0004%. +// +// This method could be extended to also handle nearly-antipodal points, but antipodal +// points are rare in practice so it seems better to simply fall back to +// exact arithmetic in that case. +func stableSign(a, b, c Point) Direction { + ab := b.Sub(a.Vector) + ab2 := ab.Norm2() + bc := c.Sub(b.Vector) + bc2 := bc.Norm2() + ca := a.Sub(c.Vector) + ca2 := ca.Norm2() + + // Now compute the determinant ((A-C)x(B-C)).C, where the vertices have been + // cyclically permuted if necessary so that AB is the longest edge. (This + // minimizes the magnitude of cross product.) At the same time we also + // compute the maximum error in the determinant. + + // The two shortest edges, pointing away from their common point. + var e1, e2, op r3.Vector + if ab2 >= bc2 && ab2 >= ca2 { + // AB is the longest edge. + e1, e2, op = ca, bc, c.Vector + } else if bc2 >= ca2 { + // BC is the longest edge. + e1, e2, op = ab, ca, a.Vector + } else { + // CA is the longest edge. + e1, e2, op = bc, ab, b.Vector + } + + det := -e1.Cross(e2).Dot(op) + maxErr := detErrorMultiplier * math.Sqrt(e1.Norm2()*e2.Norm2()) + + // If the determinant isn't zero, within maxErr, we know definitively the point ordering. + if det > maxErr { + return CounterClockwise + } + if det < -maxErr { + return Clockwise + } + return Indeterminate +} + +// triageSign returns the direction sign of the points. It returns Indeterminate if two +// points are identical or the result is uncertain. Uncertain cases can be resolved, if +// desired, by calling expensiveSign. +// +// The purpose of this method is to allow additional cheap tests to be done without +// calling expensiveSign. +func triageSign(a, b, c Point) Direction { + det := a.Cross(b.Vector).Dot(c.Vector) + if det > maxDeterminantError { + return CounterClockwise + } + if det < -maxDeterminantError { + return Clockwise + } + return Indeterminate +} + +// expensiveSign reports the direction sign of the points. It returns Indeterminate +// if two of the input points are the same. It uses multiple-precision arithmetic +// to ensure that its results are always self-consistent. +func expensiveSign(a, b, c Point) Direction { + // Return Indeterminate if and only if two points are the same. + // This ensures RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a. + // ie. Property 1 of RobustSign. + if a == b || b == c || c == a { + return Indeterminate + } + + // Next we try recomputing the determinant still using floating-point + // arithmetic but in a more precise way. This is more expensive than the + // simple calculation done by triageSign, but it is still *much* cheaper + // than using arbitrary-precision arithmetic. This optimization is able to + // compute the correct determinant sign in virtually all cases except when + // the three points are truly collinear (e.g., three points on the equator). + detSign := stableSign(a, b, c) + if detSign != Indeterminate { + return detSign + } + + // Otherwise fall back to exact arithmetic and symbolic permutations. + return exactSign(a, b, c, true) +} + +// exactSign reports the direction sign of the points computed using high-precision +// arithmetic and/or symbolic perturbations. +func exactSign(a, b, c Point, perturb bool) Direction { + // Sort the three points in lexicographic order, keeping track of the sign + // of the permutation. (Each exchange inverts the sign of the determinant.) + permSign := CounterClockwise + pa := &a + pb := &b + pc := &c + if pa.Cmp(pb.Vector) > 0 { + pa, pb = pb, pa + permSign = -permSign + } + if pb.Cmp(pc.Vector) > 0 { + pb, pc = pc, pb + permSign = -permSign + } + if pa.Cmp(pb.Vector) > 0 { + pa, pb = pb, pa + permSign = -permSign + } + + // Construct multiple-precision versions of the sorted points and compute + // their precise 3x3 determinant. + xa := r3.PreciseVectorFromVector(pa.Vector) + xb := r3.PreciseVectorFromVector(pb.Vector) + xc := r3.PreciseVectorFromVector(pc.Vector) + xbCrossXc := xb.Cross(xc) + det := xa.Dot(xbCrossXc) + + // The precision of big.Float is high enough that the result should always + // be exact enough (no rounding was performed). + + // If the exact determinant is non-zero, we're done. + detSign := Direction(det.Sign()) + if detSign == Indeterminate && perturb { + // Otherwise, we need to resort to symbolic perturbations to resolve the + // sign of the determinant. + detSign = symbolicallyPerturbedSign(xa, xb, xc, xbCrossXc) + } + return permSign * detSign +} + +// symbolicallyPerturbedSign reports the sign of the determinant of three points +// A, B, C under a model where every possible Point is slightly perturbed by +// a unique infinitesmal amount such that no three perturbed points are +// collinear and no four points are coplanar. The perturbations are so small +// that they do not change the sign of any determinant that was non-zero +// before the perturbations, and therefore can be safely ignored unless the +// determinant of three points is exactly zero (using multiple-precision +// arithmetic). This returns CounterClockwise or Clockwise according to the +// sign of the determinant after the symbolic perturbations are taken into account. +// +// Since the symbolic perturbation of a given point is fixed (i.e., the +// perturbation is the same for all calls to this method and does not depend +// on the other two arguments), the results of this method are always +// self-consistent. It will never return results that would correspond to an +// impossible configuration of non-degenerate points. +// +// This requires that the 3x3 determinant of A, B, C must be exactly zero. +// And the points must be distinct, with A < B < C in lexicographic order. +// +// Reference: +// "Simulation of Simplicity" (Edelsbrunner and Muecke, ACM Transactions on +// Graphics, 1990). +// +func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction { + // This method requires that the points are sorted in lexicographically + // increasing order. This is because every possible Point has its own + // symbolic perturbation such that if A < B then the symbolic perturbation + // for A is much larger than the perturbation for B. + // + // Alternatively, we could sort the points in this method and keep track of + // the sign of the permutation, but it is more efficient to do this before + // converting the inputs to the multi-precision representation, and this + // also lets us re-use the result of the cross product B x C. + // + // Every input coordinate x[i] is assigned a symbolic perturbation dx[i]. + // We then compute the sign of the determinant of the perturbed points, + // i.e. + // | a.X+da.X a.Y+da.Y a.Z+da.Z | + // | b.X+db.X b.Y+db.Y b.Z+db.Z | + // | c.X+dc.X c.Y+dc.Y c.Z+dc.Z | + // + // The perturbations are chosen such that + // + // da.Z > da.Y > da.X > db.Z > db.Y > db.X > dc.Z > dc.Y > dc.X + // + // where each perturbation is so much smaller than the previous one that we + // don't even need to consider it unless the coefficients of all previous + // perturbations are zero. In fact, it is so small that we don't need to + // consider it unless the coefficient of all products of the previous + // perturbations are zero. For example, we don't need to consider the + // coefficient of db.Y unless the coefficient of db.Z *da.X is zero. + // + // The follow code simply enumerates the coefficients of the perturbations + // (and products of perturbations) that appear in the determinant above, in + // order of decreasing perturbation magnitude. The first non-zero + // coefficient determines the sign of the result. The easiest way to + // enumerate the coefficients in the correct order is to pretend that each + // perturbation is some tiny value "eps" raised to a power of two: + // + // eps** 1 2 4 8 16 32 64 128 256 + // da.Z da.Y da.X db.Z db.Y db.X dc.Z dc.Y dc.X + // + // Essentially we can then just count in binary and test the corresponding + // subset of perturbations at each step. So for example, we must test the + // coefficient of db.Z*da.X before db.Y because eps**12 > eps**16. + // + // Of course, not all products of these perturbations appear in the + // determinant above, since the determinant only contains the products of + // elements in distinct rows and columns. Thus we don't need to consider + // da.Z*da.Y, db.Y *da.Y, etc. Furthermore, sometimes different pairs of + // perturbations have the same coefficient in the determinant; for example, + // da.Y*db.X and db.Y*da.X have the same coefficient (c.Z). Therefore + // we only need to test this coefficient the first time we encounter it in + // the binary order above (which will be db.Y*da.X). + // + // The sequence of tests below also appears in Table 4-ii of the paper + // referenced above, if you just want to look it up, with the following + // translations: [a,b,c] -> [i,j,k] and [0,1,2] -> [1,2,3]. Also note that + // some of the signs are different because the opposite cross product is + // used (e.g., B x C rather than C x B). + + detSign := bCrossC.Z.Sign() // da.Z + if detSign != 0 { + return Direction(detSign) + } + detSign = bCrossC.Y.Sign() // da.Y + if detSign != 0 { + return Direction(detSign) + } + detSign = bCrossC.X.Sign() // da.X + if detSign != 0 { + return Direction(detSign) + } + + detSign = newBigFloat().Sub(newBigFloat().Mul(c.X, a.Y), newBigFloat().Mul(c.Y, a.X)).Sign() // db.Z + if detSign != 0 { + return Direction(detSign) + } + detSign = c.X.Sign() // db.Z * da.Y + if detSign != 0 { + return Direction(detSign) + } + detSign = -(c.Y.Sign()) // db.Z * da.X + if detSign != 0 { + return Direction(detSign) + } + + detSign = newBigFloat().Sub(newBigFloat().Mul(c.Z, a.X), newBigFloat().Mul(c.X, a.Z)).Sign() // db.Y + if detSign != 0 { + return Direction(detSign) + } + detSign = c.Z.Sign() // db.Y * da.X + if detSign != 0 { + return Direction(detSign) + } + + // The following test is listed in the paper, but it is redundant because + // the previous tests guarantee that C == (0, 0, 0). + // (c.Y*a.Z - c.Z*a.Y).Sign() // db.X + + detSign = newBigFloat().Sub(newBigFloat().Mul(a.X, b.Y), newBigFloat().Mul(a.Y, b.X)).Sign() // dc.Z + if detSign != 0 { + return Direction(detSign) + } + detSign = -(b.X.Sign()) // dc.Z * da.Y + if detSign != 0 { + return Direction(detSign) + } + detSign = b.Y.Sign() // dc.Z * da.X + if detSign != 0 { + return Direction(detSign) + } + detSign = a.X.Sign() // dc.Z * db.Y + if detSign != 0 { + return Direction(detSign) + } + return CounterClockwise // dc.Z * db.Y * da.X +} + +// CompareDistances returns -1, 0, or +1 according to whether AX < BX, A == B, +// or AX > BX respectively. Distances are measured with respect to the positions +// of X, A, and B as though they were reprojected to lie exactly on the surface of +// the unit sphere. Furthermore, this method uses symbolic perturbations to +// ensure that the result is non-zero whenever A != B, even when AX == BX +// exactly, or even when A and B project to the same point on the sphere. +// Such results are guaranteed to be self-consistent, i.e. if AB < BC and +// BC < AC, then AB < AC. +func CompareDistances(x, a, b Point) int { + // We start by comparing distances using dot products (i.e., cosine of the + // angle), because (1) this is the cheapest technique, and (2) it is valid + // over the entire range of possible angles. (We can only use the sin^2 + // technique if both angles are less than 90 degrees or both angles are + // greater than 90 degrees.) + sign := triageCompareCosDistances(x, a, b) + if sign != 0 { + return sign + } + + // Optimization for (a == b) to avoid falling back to exact arithmetic. + if a == b { + return 0 + } + + // It is much better numerically to compare distances using cos(angle) if + // the distances are near 90 degrees and sin^2(angle) if the distances are + // near 0 or 180 degrees. We only need to check one of the two angles when + // making this decision because the fact that the test above failed means + // that angles "a" and "b" are very close together. + cosAX := a.Dot(x.Vector) + if cosAX > 1/math.Sqrt2 { + // Angles < 45 degrees. + sign = triageCompareSin2Distances(x, a, b) + } else if cosAX < -1/math.Sqrt2 { + // Angles > 135 degrees. sin^2(angle) is decreasing in this range. + sign = -triageCompareSin2Distances(x, a, b) + } + // C++ adds an additional check here using 80-bit floats. + // This is skipped in Go because we only have 32 and 64 bit floats. + + if sign != 0 { + return sign + } + + sign = exactCompareDistances(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(a.Vector), r3.PreciseVectorFromVector(b.Vector)) + if sign != 0 { + return sign + } + return symbolicCompareDistances(x, a, b) +} + +// cosDistance returns cos(XY) where XY is the angle between X and Y, and the +// maximum error amount in the result. This requires X and Y be normalized. +func cosDistance(x, y Point) (cos, err float64) { + cos = x.Dot(y.Vector) + return cos, 9.5*dblError*math.Abs(cos) + 1.5*dblError +} + +// sin2Distance returns sin**2(XY), where XY is the angle between X and Y, +// and the maximum error amount in the result. This requires X and Y be normalized. +func sin2Distance(x, y Point) (sin2, err float64) { + // The (x-y).Cross(x+y) trick eliminates almost all of error due to x + // and y being not quite unit length. This method is extremely accurate + // for small distances; the *relative* error in the result is O(dblError) for + // distances as small as dblError. + n := x.Sub(y.Vector).Cross(x.Add(y.Vector)) + sin2 = 0.25 * n.Norm2() + err = ((21+4*math.Sqrt(3))*dblError*sin2 + + 32*math.Sqrt(3)*dblError*dblError*math.Sqrt(sin2) + + 768*dblError*dblError*dblError*dblError) + return sin2, err +} + +// triageCompareCosDistances returns -1, 0, or +1 according to whether AX < BX, +// A == B, or AX > BX by comparing the distances between them using cosDistance. +func triageCompareCosDistances(x, a, b Point) int { + cosAX, cosAXerror := cosDistance(a, x) + cosBX, cosBXerror := cosDistance(b, x) + diff := cosAX - cosBX + err := cosAXerror + cosBXerror + if diff > err { + return -1 + } + if diff < -err { + return 1 + } + return 0 +} + +// triageCompareSin2Distances returns -1, 0, or +1 according to whether AX < BX, +// A == B, or AX > BX by comparing the distances between them using sin2Distance. +func triageCompareSin2Distances(x, a, b Point) int { + sin2AX, sin2AXerror := sin2Distance(a, x) + sin2BX, sin2BXerror := sin2Distance(b, x) + diff := sin2AX - sin2BX + err := sin2AXerror + sin2BXerror + if diff > err { + return 1 + } + if diff < -err { + return -1 + } + return 0 +} + +// exactCompareDistances returns -1, 0, or 1 after comparing using the values as +// PreciseVectors. +func exactCompareDistances(x, a, b r3.PreciseVector) int { + // This code produces the same result as though all points were reprojected + // to lie exactly on the surface of the unit sphere. It is based on testing + // whether x.Dot(a.Normalize()) < x.Dot(b.Normalize()), reformulated + // so that it can be evaluated using exact arithmetic. + cosAX := x.Dot(a) + cosBX := x.Dot(b) + + // If the two values have different signs, we need to handle that case now + // before squaring them below. + aSign := cosAX.Sign() + bSign := cosBX.Sign() + if aSign != bSign { + // If cos(AX) > cos(BX), then AX < BX. + if aSign > bSign { + return -1 + } + return 1 + } + cosAX2 := newBigFloat().Mul(cosAX, cosAX) + cosBX2 := newBigFloat().Mul(cosBX, cosBX) + cmp := newBigFloat().Sub(cosBX2.Mul(cosBX2, a.Norm2()), cosAX2.Mul(cosAX2, b.Norm2())) + return aSign * cmp.Sign() +} + +// symbolicCompareDistances returns -1, 0, or +1 given three points such that AX == BX +// (exactly) according to whether AX < BX, AX == BX, or AX > BX after symbolic +// perturbations are taken into account. +func symbolicCompareDistances(x, a, b Point) int { + // Our symbolic perturbation strategy is based on the following model. + // Similar to "simulation of simplicity", we assign a perturbation to every + // point such that if A < B, then the symbolic perturbation for A is much, + // much larger than the symbolic perturbation for B. We imagine that + // rather than projecting every point to lie exactly on the unit sphere, + // instead each point is positioned on its own tiny pedestal that raises it + // just off the surface of the unit sphere. This means that the distance AX + // is actually the true distance AX plus the (symbolic) heights of the + // pedestals for A and X. The pedestals are infinitesmally thin, so they do + // not affect distance measurements except at the two endpoints. If several + // points project to exactly the same point on the unit sphere, we imagine + // that they are placed on separate pedestals placed close together, where + // the distance between pedestals is much, much less than the height of any + // pedestal. (There are a finite number of Points, and therefore a finite + // number of pedestals, so this is possible.) + // + // If A < B, then A is on a higher pedestal than B, and therefore AX > BX. + switch a.Cmp(b.Vector) { + case -1: + return 1 + case 1: + return -1 + default: + return 0 + } +} + +var ( + // ca45Degrees is a predefined ChordAngle representing (approximately) 45 degrees. + ca45Degrees = s1.ChordAngleFromSquaredLength(2 - math.Sqrt2) +) + +// CompareDistance returns -1, 0, or +1 according to whether the distance XY is +// respectively less than, equal to, or greater than the provided chord angle. Distances are measured +// with respect to the positions of all points as though they are projected to lie +// exactly on the surface of the unit sphere. +func CompareDistance(x, y Point, r s1.ChordAngle) int { + // As with CompareDistances, we start by comparing dot products because + // the sin^2 method is only valid when the distance XY and the limit "r" are + // both less than 90 degrees. + sign := triageCompareCosDistance(x, y, float64(r)) + if sign != 0 { + return sign + } + + // Unlike with CompareDistances, it's not worth using the sin^2 method + // when the distance limit is near 180 degrees because the ChordAngle + // representation itself has has a rounding error of up to 2e-8 radians for + // distances near 180 degrees. + if r < ca45Degrees { + sign = triageCompareSin2Distance(x, y, float64(r)) + if sign != 0 { + return sign + } + } + return exactCompareDistance(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(y.Vector), big.NewFloat(float64(r)).SetPrec(big.MaxPrec)) +} + +// triageCompareCosDistance returns -1, 0, or +1 according to whether the distance XY is +// less than, equal to, or greater than r2 respectively using cos distance. +func triageCompareCosDistance(x, y Point, r2 float64) int { + cosXY, cosXYError := cosDistance(x, y) + cosR := 1.0 - 0.5*r2 + cosRError := 2.0 * dblError * cosR + diff := cosXY - cosR + err := cosXYError + cosRError + if diff > err { + return -1 + } + if diff < -err { + return 1 + } + return 0 +} + +// triageCompareSin2Distance returns -1, 0, or +1 according to whether the distance XY is +// less than, equal to, or greater than r2 respectively using sin^2 distance. +func triageCompareSin2Distance(x, y Point, r2 float64) int { + // Only valid for distance limits < 90 degrees. + sin2XY, sin2XYError := sin2Distance(x, y) + sin2R := r2 * (1.0 - 0.25*r2) + sin2RError := 3.0 * dblError * sin2R + diff := sin2XY - sin2R + err := sin2XYError + sin2RError + if diff > err { + return 1 + } + if diff < -err { + return -1 + } + return 0 +} + +var ( + bigOne = big.NewFloat(1.0).SetPrec(big.MaxPrec) + bigHalf = big.NewFloat(0.5).SetPrec(big.MaxPrec) +) + +// exactCompareDistance returns -1, 0, or +1 after comparing using PreciseVectors. +func exactCompareDistance(x, y r3.PreciseVector, r2 *big.Float) int { + // This code produces the same result as though all points were reprojected + // to lie exactly on the surface of the unit sphere. It is based on + // comparing the cosine of the angle XY (when both points are projected to + // lie exactly on the sphere) to the given threshold. + cosXY := x.Dot(y) + cosR := newBigFloat().Sub(bigOne, newBigFloat().Mul(bigHalf, r2)) + + // If the two values have different signs, we need to handle that case now + // before squaring them below. + xySign := cosXY.Sign() + rSign := cosR.Sign() + if xySign != rSign { + if xySign > rSign { + return -1 + } + return 1 // If cos(XY) > cos(r), then XY < r. + } + cmp := newBigFloat().Sub( + newBigFloat().Mul( + newBigFloat().Mul(cosR, cosR), newBigFloat().Mul(x.Norm2(), y.Norm2())), + newBigFloat().Mul(cosXY, cosXY)) + return xySign * cmp.Sign() +} + +// TODO(roberts): Differences from C++ +// CompareEdgeDistance +// CompareEdgeDirections +// EdgeCircumcenterSign +// GetVoronoiSiteExclusion +// GetClosestVertex +// TriageCompareLineSin2Distance +// TriageCompareLineCos2Distance +// TriageCompareLineDistance +// TriageCompareEdgeDistance +// ExactCompareLineDistance +// ExactCompareEdgeDistance +// TriageCompareEdgeDirections +// ExactCompareEdgeDirections +// ArePointsAntipodal +// ArePointsLinearlyDependent +// GetCircumcenter +// TriageEdgeCircumcenterSign +// ExactEdgeCircumcenterSign +// UnperturbedSign +// SymbolicEdgeCircumcenterSign +// ExactVoronoiSiteExclusion diff --git a/vendor/github.com/golang/geo/s2/projections.go b/vendor/github.com/golang/geo/s2/projections.go new file mode 100644 index 000000000..07b8e62d2 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/projections.go @@ -0,0 +1,203 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/r2" + "github.com/golang/geo/s1" +) + +// Projection defines an interface for different ways of mapping between s2 and r2 Points. +// It can also define the coordinate wrapping behavior along each axis. +type Projection interface { + // Project converts a point on the sphere to a projected 2D point. + Project(p Point) r2.Point + + // Unproject converts a projected 2D point to a point on the sphere. + // + // If wrapping is defined for a given axis (see below), then this method + // should accept any real number for the corresponding coordinate. + Unproject(p r2.Point) Point + + // FromLatLng is a convenience function equivalent to Project(LatLngToPoint(ll)), + // but the implementation is more efficient. + FromLatLng(ll LatLng) r2.Point + + // ToLatLng is a convenience function equivalent to LatLngFromPoint(Unproject(p)), + // but the implementation is more efficient. + ToLatLng(p r2.Point) LatLng + + // Interpolate returns the point obtained by interpolating the given + // fraction of the distance along the line from A to B. + // Fractions < 0 or > 1 result in extrapolation instead. + Interpolate(f float64, a, b r2.Point) r2.Point + + // WrapDistance reports the coordinate wrapping distance along each axis. + // If this value is non-zero for a given axis, the coordinates are assumed + // to "wrap" with the given period. For example, if WrapDistance.Y == 360 + // then (x, y) and (x, y + 360) should map to the same Point. + // + // This information is used to ensure that edges takes the shortest path + // between two given points. For example, if coordinates represent + // (latitude, longitude) pairs in degrees and WrapDistance().Y == 360, + // then the edge (5:179, 5:-179) would be interpreted as spanning 2 degrees + // of longitude rather than 358 degrees. + // + // If a given axis does not wrap, its WrapDistance should be set to zero. + WrapDistance() r2.Point +} + +// PlateCarreeProjection defines the "plate carree" (square plate) projection, +// which converts points on the sphere to (longitude, latitude) pairs. +// Coordinates can be scaled so that they represent radians, degrees, etc, but +// the projection is always centered around (latitude=0, longitude=0). +// +// Note that (x, y) coordinates are backwards compared to the usual (latitude, +// longitude) ordering, in order to match the usual convention for graphs in +// which "x" is horizontal and "y" is vertical. +type PlateCarreeProjection struct { + xWrap float64 + toRadians float64 // Multiplier to convert coordinates to radians. + fromRadians float64 // Multiplier to convert coordinates from radians. +} + +// NewPlateCarreeProjection constructs a plate carree projection where the +// x-coordinates (lng) span [-xScale, xScale] and the y coordinates (lat) +// span [-xScale/2, xScale/2]. For example if xScale==180 then the x +// range is [-180, 180] and the y range is [-90, 90]. +// +// By default coordinates are expressed in radians, i.e. the x range is +// [-Pi, Pi] and the y range is [-Pi/2, Pi/2]. +func NewPlateCarreeProjection(xScale float64) Projection { + return &PlateCarreeProjection{ + xWrap: 2 * xScale, + toRadians: math.Pi / xScale, + fromRadians: xScale / math.Pi, + } +} + +// Project converts a point on the sphere to a projected 2D point. +func (p *PlateCarreeProjection) Project(pt Point) r2.Point { + return p.FromLatLng(LatLngFromPoint(pt)) +} + +// Unproject converts a projected 2D point to a point on the sphere. +func (p *PlateCarreeProjection) Unproject(pt r2.Point) Point { + return PointFromLatLng(p.ToLatLng(pt)) +} + +// FromLatLng returns the LatLng projected into an R2 Point. +func (p *PlateCarreeProjection) FromLatLng(ll LatLng) r2.Point { + return r2.Point{ + X: p.fromRadians * ll.Lng.Radians(), + Y: p.fromRadians * ll.Lat.Radians(), + } +} + +// ToLatLng returns the LatLng projected from the given R2 Point. +func (p *PlateCarreeProjection) ToLatLng(pt r2.Point) LatLng { + return LatLng{ + Lat: s1.Angle(p.toRadians * pt.Y), + Lng: s1.Angle(p.toRadians * math.Remainder(pt.X, p.xWrap)), + } +} + +// Interpolate returns the point obtained by interpolating the given +// fraction of the distance along the line from A to B. +func (p *PlateCarreeProjection) Interpolate(f float64, a, b r2.Point) r2.Point { + return a.Mul(1 - f).Add(b.Mul(f)) +} + +// WrapDistance reports the coordinate wrapping distance along each axis. +func (p *PlateCarreeProjection) WrapDistance() r2.Point { + return r2.Point{p.xWrap, 0} +} + +// MercatorProjection defines the spherical Mercator projection. Google Maps +// uses this projection together with WGS84 coordinates, in which case it is +// known as the "Web Mercator" projection (see Wikipedia). This class makes +// no assumptions regarding the coordinate system of its input points, but +// simply applies the spherical Mercator projection to them. +// +// The Mercator projection is finite in width (x) but infinite in height (y). +// "x" corresponds to longitude, and spans a finite range such as [-180, 180] +// (with coordinate wrapping), while "y" is a function of latitude and spans +// an infinite range. (As "y" coordinates get larger, points get closer to +// the north pole but never quite reach it.) The north and south poles have +// infinite "y" values. (Note that this will cause problems if you tessellate +// a Mercator edge where one endpoint is a pole. If you need to do this, clip +// the edge first so that the "y" coordinate is no more than about 5 * maxX.) +type MercatorProjection struct { + xWrap float64 + toRadians float64 // Multiplier to convert coordinates to radians. + fromRadians float64 // Multiplier to convert coordinates from radians. +} + +// NewMercatorProjection constructs a Mercator projection with the given maximum +// longitude axis value corresponding to a range of [-maxLng, maxLng]. +// The horizontal and vertical axes are scaled equally. +func NewMercatorProjection(maxLng float64) Projection { + return &MercatorProjection{ + xWrap: 2 * maxLng, + toRadians: math.Pi / maxLng, + fromRadians: maxLng / math.Pi, + } +} + +// Project converts a point on the sphere to a projected 2D point. +func (p *MercatorProjection) Project(pt Point) r2.Point { + return p.FromLatLng(LatLngFromPoint(pt)) +} + +// Unproject converts a projected 2D point to a point on the sphere. +func (p *MercatorProjection) Unproject(pt r2.Point) Point { + return PointFromLatLng(p.ToLatLng(pt)) +} + +// FromLatLng returns the LatLng projected into an R2 Point. +func (p *MercatorProjection) FromLatLng(ll LatLng) r2.Point { + // This formula is more accurate near zero than the log(tan()) version. + // Note that latitudes of +/- 90 degrees yield "y" values of +/- infinity. + sinPhi := math.Sin(float64(ll.Lat)) + y := 0.5 * math.Log((1+sinPhi)/(1-sinPhi)) + return r2.Point{p.fromRadians * float64(ll.Lng), p.fromRadians * y} +} + +// ToLatLng returns the LatLng projected from the given R2 Point. +func (p *MercatorProjection) ToLatLng(pt r2.Point) LatLng { + // This formula is more accurate near zero than the atan(exp()) version. + x := p.toRadians * math.Remainder(pt.X, p.xWrap) + k := math.Exp(2 * p.toRadians * pt.Y) + var y float64 + if math.IsInf(k, 0) { + y = math.Pi / 2 + } else { + y = math.Asin((k - 1) / (k + 1)) + } + return LatLng{s1.Angle(y), s1.Angle(x)} +} + +// Interpolate returns the point obtained by interpolating the given +// fraction of the distance along the line from A to B. +func (p *MercatorProjection) Interpolate(f float64, a, b r2.Point) r2.Point { + return a.Mul(1 - f).Add(b.Mul(f)) +} + +// WrapDistance reports the coordinate wrapping distance along each axis. +func (p *MercatorProjection) WrapDistance() r2.Point { + return r2.Point{p.xWrap, 0} +} diff --git a/vendor/github.com/golang/geo/s2/query_options.go b/vendor/github.com/golang/geo/s2/query_options.go new file mode 100644 index 000000000..9b7e38d62 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/query_options.go @@ -0,0 +1,196 @@ +// Copyright 2019 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/s1" +) + +const maxQueryResults = math.MaxInt32 + +// queryOptions represents the set of all configurable parameters used by all of +// the Query types. Most of these fields have non-zero defaults, so initialization +// is handled within each Query type. All of the exported methods accept user +// supplied sets of options to set or adjust as necessary. +// +// Several of the defaults depend on the distance interface type being used +// (e.g. minDistance, maxDistance, etc.) +// +// If a user sets an option value that a given query type doesn't use, it is ignored. +type queryOptions struct { + // maxResults specifies that at most MaxResults edges should be returned. + // This must be at least 1. + // + // The default value is to return all results. + maxResults int + + // distanceLimit specifies that only edges whose distance to the target is + // within this distance should be returned. + // + // Note that edges whose distance is exactly equal to this are + // not returned. In most cases this doesn't matter (since distances are + // not computed exactly in the first place), but if such edges are needed + // then you can retrieve them by specifying the distance as the next + // largest representable distance. i.e. distanceLimit.Successor(). + // + // The default value is the infinity value, such that all results will be + // returned. + distanceLimit s1.ChordAngle + + // maxError specifies that edges up to MaxError further away than the true + // closest edges may be substituted in the result set, as long as such + // edges satisfy all the remaining search criteria (such as DistanceLimit). + // This option only has an effect if MaxResults is also specified; + // otherwise all edges closer than MaxDistance will always be returned. + // + // Note that this does not affect how the distance between edges is + // computed; it simply gives the algorithm permission to stop the search + // early as soon as the best possible improvement drops below MaxError. + // + // This can be used to implement distance predicates efficiently. For + // example, to determine whether the minimum distance is less than D, set + // MaxResults == 1 and MaxDistance == MaxError == D. This causes + // the algorithm to terminate as soon as it finds any edge whose distance + // is less than D, rather than continuing to search for an edge that is + // even closer. + // + // The default value is zero. + maxError s1.ChordAngle + + // includeInteriors specifies that polygon interiors should be included + // when measuring distances. In other words, polygons that contain the target + // should have a distance of zero. (For targets consisting of multiple connected + // components, the distance is zero if any component is contained.) This + // is indicated in the results by returning a (ShapeID, EdgeID) pair + // with EdgeID == -1, i.e. this value denotes the polygons's interior. + // + // Note that for efficiency, any polygon that intersects the target may or + // may not have an EdgeID == -1 result. Such results are optional + // because in that case the distance to the polygon is already zero. + // + // The default value is true. + includeInteriors bool + + // specifies that distances should be computed by examining every edge + // rather than using the ShapeIndex. + // + // TODO(roberts): When optimized is implemented, update the default to false. + // The default value is true. + useBruteForce bool + + // region specifies that results must intersect the given Region. + // + // Note that if you want to set the region to a disc around a target + // point, it is faster to use a PointTarget with distanceLimit set + // instead. You can also set a distance limit and also require that results + // lie within a given rectangle. + // + // The default is nil (no region limits). + region Region +} + +// UseBruteForce sets or disables the use of brute force in a query. +func (q *queryOptions) UseBruteForce(x bool) *queryOptions { + q.useBruteForce = x + return q +} + +// IncludeInteriors specifies whether polygon interiors should be +// included when measuring distances. +func (q *queryOptions) IncludeInteriors(x bool) *queryOptions { + q.includeInteriors = x + return q +} + +// MaxError specifies that edges up to dist away than the true +// matching edges may be substituted in the result set, as long as such +// edges satisfy all the remaining search criteria (such as DistanceLimit). +// This option only has an effect if MaxResults is also specified; +// otherwise all edges closer than MaxDistance will always be returned. +func (q *queryOptions) MaxError(x s1.ChordAngle) *queryOptions { + q.maxError = x + return q +} + +// MaxResults specifies that at most MaxResults edges should be returned. +// This must be at least 1. +func (q *queryOptions) MaxResults(x int) *queryOptions { + // TODO(roberts): What should be done if the value is <= 0? + q.maxResults = int(x) + return q +} + +// DistanceLimit specifies that only edges whose distance to the target is +// within, this distance should be returned. Edges whose distance is equal +// are not returned. +// +// To include values that are equal, specify the limit with the next largest +// representable distance such as limit.Successor(), or set the option with +// Furthest/ClosestInclusiveDistanceLimit. +func (q *queryOptions) DistanceLimit(x s1.ChordAngle) *queryOptions { + q.distanceLimit = x + return q +} + +// ClosestInclusiveDistanceLimit sets the distance limit such that results whose +// distance is exactly equal to the limit are also returned. +func (q *queryOptions) ClosestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions { + q.distanceLimit = limit.Successor() + return q +} + +// FurthestInclusiveDistanceLimit sets the distance limit such that results whose +// distance is exactly equal to the limit are also returned. +func (q *queryOptions) FurthestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions { + q.distanceLimit = limit.Predecessor() + return q +} + +// ClosestConservativeDistanceLimit sets the distance limit such that results +// also incorporates the error in distance calculations. This ensures that all +// edges whose true distance is less than or equal to limit will be returned +// (along with some edges whose true distance is slightly greater). +// +// Algorithms that need to do exact distance comparisons can use this +// option to find a set of candidate edges that can then be filtered +// further (e.g., using CompareDistance). +func (q *queryOptions) ClosestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions { + q.distanceLimit = limit.Expanded(minUpdateDistanceMaxError(limit)) + return q +} + +// FurthestConservativeDistanceLimit sets the distance limit such that results +// also incorporates the error in distance calculations. This ensures that all +// edges whose true distance is greater than or equal to limit will be returned +// (along with some edges whose true distance is slightly less). +func (q *queryOptions) FurthestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions { + q.distanceLimit = limit.Expanded(-minUpdateDistanceMaxError(limit)) + return q +} + +// newQueryOptions returns a set of options using the given distance type +// with the proper default values. +func newQueryOptions(d distance) *queryOptions { + return &queryOptions{ + maxResults: maxQueryResults, + distanceLimit: d.infinity().chordAngle(), + maxError: 0, + includeInteriors: true, + useBruteForce: false, + region: nil, + } +} diff --git a/vendor/github.com/golang/geo/s2/rect.go b/vendor/github.com/golang/geo/s2/rect.go new file mode 100644 index 000000000..cb4c93180 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/rect.go @@ -0,0 +1,710 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "fmt" + "io" + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// Rect represents a closed latitude-longitude rectangle. +type Rect struct { + Lat r1.Interval + Lng s1.Interval +} + +var ( + validRectLatRange = r1.Interval{-math.Pi / 2, math.Pi / 2} + validRectLngRange = s1.FullInterval() +) + +// EmptyRect returns the empty rectangle. +func EmptyRect() Rect { return Rect{r1.EmptyInterval(), s1.EmptyInterval()} } + +// FullRect returns the full rectangle. +func FullRect() Rect { return Rect{validRectLatRange, validRectLngRange} } + +// RectFromLatLng constructs a rectangle containing a single point p. +func RectFromLatLng(p LatLng) Rect { + return Rect{ + Lat: r1.Interval{p.Lat.Radians(), p.Lat.Radians()}, + Lng: s1.Interval{p.Lng.Radians(), p.Lng.Radians()}, + } +} + +// RectFromCenterSize constructs a rectangle with the given size and center. +// center needs to be normalized, but size does not. The latitude +// interval of the result is clamped to [-90,90] degrees, and the longitude +// interval of the result is FullRect() if and only if the longitude size is +// 360 degrees or more. +// +// Examples of clamping (in degrees): +// center=(80,170), size=(40,60) -> lat=[60,90], lng=[140,-160] +// center=(10,40), size=(210,400) -> lat=[-90,90], lng=[-180,180] +// center=(-90,180), size=(20,50) -> lat=[-90,-80], lng=[155,-155] +func RectFromCenterSize(center, size LatLng) Rect { + half := LatLng{size.Lat / 2, size.Lng / 2} + return RectFromLatLng(center).expanded(half) +} + +// IsValid returns true iff the rectangle is valid. +// This requires Lat ⊆ [-π/2,π/2] and Lng ⊆ [-π,π], and Lat = ∅ iff Lng = ∅ +func (r Rect) IsValid() bool { + return math.Abs(r.Lat.Lo) <= math.Pi/2 && + math.Abs(r.Lat.Hi) <= math.Pi/2 && + r.Lng.IsValid() && + r.Lat.IsEmpty() == r.Lng.IsEmpty() +} + +// IsEmpty reports whether the rectangle is empty. +func (r Rect) IsEmpty() bool { return r.Lat.IsEmpty() } + +// IsFull reports whether the rectangle is full. +func (r Rect) IsFull() bool { return r.Lat.Equal(validRectLatRange) && r.Lng.IsFull() } + +// IsPoint reports whether the rectangle is a single point. +func (r Rect) IsPoint() bool { return r.Lat.Lo == r.Lat.Hi && r.Lng.Lo == r.Lng.Hi } + +// Vertex returns the i-th vertex of the rectangle (i = 0,1,2,3) in CCW order +// (lower left, lower right, upper right, upper left). +func (r Rect) Vertex(i int) LatLng { + var lat, lng float64 + + switch i { + case 0: + lat = r.Lat.Lo + lng = r.Lng.Lo + case 1: + lat = r.Lat.Lo + lng = r.Lng.Hi + case 2: + lat = r.Lat.Hi + lng = r.Lng.Hi + case 3: + lat = r.Lat.Hi + lng = r.Lng.Lo + } + return LatLng{s1.Angle(lat) * s1.Radian, s1.Angle(lng) * s1.Radian} +} + +// Lo returns one corner of the rectangle. +func (r Rect) Lo() LatLng { + return LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(r.Lng.Lo) * s1.Radian} +} + +// Hi returns the other corner of the rectangle. +func (r Rect) Hi() LatLng { + return LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(r.Lng.Hi) * s1.Radian} +} + +// Center returns the center of the rectangle. +func (r Rect) Center() LatLng { + return LatLng{s1.Angle(r.Lat.Center()) * s1.Radian, s1.Angle(r.Lng.Center()) * s1.Radian} +} + +// Size returns the size of the Rect. +func (r Rect) Size() LatLng { + return LatLng{s1.Angle(r.Lat.Length()) * s1.Radian, s1.Angle(r.Lng.Length()) * s1.Radian} +} + +// Area returns the surface area of the Rect. +func (r Rect) Area() float64 { + if r.IsEmpty() { + return 0 + } + capDiff := math.Abs(math.Sin(r.Lat.Hi) - math.Sin(r.Lat.Lo)) + return r.Lng.Length() * capDiff +} + +// AddPoint increases the size of the rectangle to include the given point. +func (r Rect) AddPoint(ll LatLng) Rect { + if !ll.IsValid() { + return r + } + return Rect{ + Lat: r.Lat.AddPoint(ll.Lat.Radians()), + Lng: r.Lng.AddPoint(ll.Lng.Radians()), + } +} + +// expanded returns a rectangle that has been expanded by margin.Lat on each side +// in the latitude direction, and by margin.Lng on each side in the longitude +// direction. If either margin is negative, then it shrinks the rectangle on +// the corresponding sides instead. The resulting rectangle may be empty. +// +// The latitude-longitude space has the topology of a cylinder. Longitudes +// "wrap around" at +/-180 degrees, while latitudes are clamped to range [-90, 90]. +// This means that any expansion (positive or negative) of the full longitude range +// remains full (since the "rectangle" is actually a continuous band around the +// cylinder), while expansion of the full latitude range remains full only if the +// margin is positive. +// +// If either the latitude or longitude interval becomes empty after +// expansion by a negative margin, the result is empty. +// +// Note that if an expanded rectangle contains a pole, it may not contain +// all possible lat/lng representations of that pole, e.g., both points [π/2,0] +// and [π/2,1] represent the same pole, but they might not be contained by the +// same Rect. +// +// If you are trying to grow a rectangle by a certain distance on the +// sphere (e.g. 5km), refer to the ExpandedByDistance() C++ method implementation +// instead. +func (r Rect) expanded(margin LatLng) Rect { + lat := r.Lat.Expanded(margin.Lat.Radians()) + lng := r.Lng.Expanded(margin.Lng.Radians()) + + if lat.IsEmpty() || lng.IsEmpty() { + return EmptyRect() + } + + return Rect{ + Lat: lat.Intersection(validRectLatRange), + Lng: lng, + } +} + +func (r Rect) String() string { return fmt.Sprintf("[Lo%v, Hi%v]", r.Lo(), r.Hi()) } + +// PolarClosure returns the rectangle unmodified if it does not include either pole. +// If it includes either pole, PolarClosure returns an expansion of the rectangle along +// the longitudinal range to include all possible representations of the contained poles. +func (r Rect) PolarClosure() Rect { + if r.Lat.Lo == -math.Pi/2 || r.Lat.Hi == math.Pi/2 { + return Rect{r.Lat, s1.FullInterval()} + } + return r +} + +// Union returns the smallest Rect containing the union of this rectangle and the given rectangle. +func (r Rect) Union(other Rect) Rect { + return Rect{ + Lat: r.Lat.Union(other.Lat), + Lng: r.Lng.Union(other.Lng), + } +} + +// Intersection returns the smallest rectangle containing the intersection of +// this rectangle and the given rectangle. Note that the region of intersection +// may consist of two disjoint rectangles, in which case a single rectangle +// spanning both of them is returned. +func (r Rect) Intersection(other Rect) Rect { + lat := r.Lat.Intersection(other.Lat) + lng := r.Lng.Intersection(other.Lng) + + if lat.IsEmpty() || lng.IsEmpty() { + return EmptyRect() + } + return Rect{lat, lng} +} + +// Intersects reports whether this rectangle and the other have any points in common. +func (r Rect) Intersects(other Rect) bool { + return r.Lat.Intersects(other.Lat) && r.Lng.Intersects(other.Lng) +} + +// CapBound returns a cap that contains Rect. +func (r Rect) CapBound() Cap { + // We consider two possible bounding caps, one whose axis passes + // through the center of the lat-long rectangle and one whose axis + // is the north or south pole. We return the smaller of the two caps. + + if r.IsEmpty() { + return EmptyCap() + } + + var poleZ, poleAngle float64 + if r.Lat.Hi+r.Lat.Lo < 0 { + // South pole axis yields smaller cap. + poleZ = -1 + poleAngle = math.Pi/2 + r.Lat.Hi + } else { + poleZ = 1 + poleAngle = math.Pi/2 - r.Lat.Lo + } + poleCap := CapFromCenterAngle(Point{r3.Vector{0, 0, poleZ}}, s1.Angle(poleAngle)*s1.Radian) + + // For bounding rectangles that span 180 degrees or less in longitude, the + // maximum cap size is achieved at one of the rectangle vertices. For + // rectangles that are larger than 180 degrees, we punt and always return a + // bounding cap centered at one of the two poles. + if math.Remainder(r.Lng.Hi-r.Lng.Lo, 2*math.Pi) >= 0 && r.Lng.Hi-r.Lng.Lo < 2*math.Pi { + midCap := CapFromPoint(PointFromLatLng(r.Center())).AddPoint(PointFromLatLng(r.Lo())).AddPoint(PointFromLatLng(r.Hi())) + if midCap.Height() < poleCap.Height() { + return midCap + } + } + return poleCap +} + +// RectBound returns itself. +func (r Rect) RectBound() Rect { + return r +} + +// Contains reports whether this Rect contains the other Rect. +func (r Rect) Contains(other Rect) bool { + return r.Lat.ContainsInterval(other.Lat) && r.Lng.ContainsInterval(other.Lng) +} + +// ContainsCell reports whether the given Cell is contained by this Rect. +func (r Rect) ContainsCell(c Cell) bool { + // A latitude-longitude rectangle contains a cell if and only if it contains + // the cell's bounding rectangle. This test is exact from a mathematical + // point of view, assuming that the bounds returned by Cell.RectBound() + // are tight. However, note that there can be a loss of precision when + // converting between representations -- for example, if an s2.Cell is + // converted to a polygon, the polygon's bounding rectangle may not contain + // the cell's bounding rectangle. This has some slightly unexpected side + // effects; for instance, if one creates an s2.Polygon from an s2.Cell, the + // polygon will contain the cell, but the polygon's bounding box will not. + return r.Contains(c.RectBound()) +} + +// ContainsLatLng reports whether the given LatLng is within the Rect. +func (r Rect) ContainsLatLng(ll LatLng) bool { + if !ll.IsValid() { + return false + } + return r.Lat.Contains(ll.Lat.Radians()) && r.Lng.Contains(ll.Lng.Radians()) +} + +// ContainsPoint reports whether the given Point is within the Rect. +func (r Rect) ContainsPoint(p Point) bool { + return r.ContainsLatLng(LatLngFromPoint(p)) +} + +// CellUnionBound computes a covering of the Rect. +func (r Rect) CellUnionBound() []CellID { + return r.CapBound().CellUnionBound() +} + +// intersectsLatEdge reports whether the edge AB intersects the given edge of constant +// latitude. Requires the points to have unit length. +func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool { + // Unfortunately, lines of constant latitude are curves on + // the sphere. They can intersect a straight edge in 0, 1, or 2 points. + + // First, compute the normal to the plane AB that points vaguely north. + z := Point{a.PointCross(b).Normalize()} + if z.Z < 0 { + z = Point{z.Mul(-1)} + } + + // Extend this to an orthonormal frame (x,y,z) where x is the direction + // where the great circle through AB achieves its maximium latitude. + y := Point{z.PointCross(PointFromCoords(0, 0, 1)).Normalize()} + x := y.Cross(z.Vector) + + // Compute the angle "theta" from the x-axis (in the x-y plane defined + // above) where the great circle intersects the given line of latitude. + sinLat := math.Sin(float64(lat)) + if math.Abs(sinLat) >= x.Z { + // The great circle does not reach the given latitude. + return false + } + + cosTheta := sinLat / x.Z + sinTheta := math.Sqrt(1 - cosTheta*cosTheta) + theta := math.Atan2(sinTheta, cosTheta) + + // The candidate intersection points are located +/- theta in the x-y + // plane. For an intersection to be valid, we need to check that the + // intersection point is contained in the interior of the edge AB and + // also that it is contained within the given longitude interval "lng". + + // Compute the range of theta values spanned by the edge AB. + abTheta := s1.IntervalFromPointPair( + math.Atan2(a.Dot(y.Vector), a.Dot(x)), + math.Atan2(b.Dot(y.Vector), b.Dot(x))) + + if abTheta.Contains(theta) { + // Check if the intersection point is also in the given lng interval. + isect := x.Mul(cosTheta).Add(y.Mul(sinTheta)) + if lng.Contains(math.Atan2(isect.Y, isect.X)) { + return true + } + } + + if abTheta.Contains(-theta) { + // Check if the other intersection point is also in the given lng interval. + isect := x.Mul(cosTheta).Sub(y.Mul(sinTheta)) + if lng.Contains(math.Atan2(isect.Y, isect.X)) { + return true + } + } + return false +} + +// intersectsLngEdge reports whether the edge AB intersects the given edge of constant +// longitude. Requires the points to have unit length. +func intersectsLngEdge(a, b Point, lat r1.Interval, lng s1.Angle) bool { + // The nice thing about edges of constant longitude is that + // they are straight lines on the sphere (geodesics). + return CrossingSign(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}), + PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng})) == Cross +} + +// IntersectsCell reports whether this rectangle intersects the given cell. This is an +// exact test and may be fairly expensive. +func (r Rect) IntersectsCell(c Cell) bool { + // First we eliminate the cases where one region completely contains the + // other. Once these are disposed of, then the regions will intersect + // if and only if their boundaries intersect. + if r.IsEmpty() { + return false + } + if r.ContainsPoint(Point{c.id.rawPoint()}) { + return true + } + if c.ContainsPoint(PointFromLatLng(r.Center())) { + return true + } + + // Quick rejection test (not required for correctness). + if !r.Intersects(c.RectBound()) { + return false + } + + // Precompute the cell vertices as points and latitude-longitudes. We also + // check whether the Cell contains any corner of the rectangle, or + // vice-versa, since the edge-crossing tests only check the edge interiors. + vertices := [4]Point{} + latlngs := [4]LatLng{} + + for i := range vertices { + vertices[i] = c.Vertex(i) + latlngs[i] = LatLngFromPoint(vertices[i]) + if r.ContainsLatLng(latlngs[i]) { + return true + } + if c.ContainsPoint(PointFromLatLng(r.Vertex(i))) { + return true + } + } + + // Now check whether the boundaries intersect. Unfortunately, a + // latitude-longitude rectangle does not have straight edges: two edges + // are curved, and at least one of them is concave. + for i := range vertices { + edgeLng := s1.IntervalFromEndpoints(latlngs[i].Lng.Radians(), latlngs[(i+1)&3].Lng.Radians()) + if !r.Lng.Intersects(edgeLng) { + continue + } + + a := vertices[i] + b := vertices[(i+1)&3] + if edgeLng.Contains(r.Lng.Lo) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Lo)) { + return true + } + if edgeLng.Contains(r.Lng.Hi) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Hi)) { + return true + } + if intersectsLatEdge(a, b, s1.Angle(r.Lat.Lo), r.Lng) { + return true + } + if intersectsLatEdge(a, b, s1.Angle(r.Lat.Hi), r.Lng) { + return true + } + } + return false +} + +// Encode encodes the Rect. +func (r Rect) Encode(w io.Writer) error { + e := &encoder{w: w} + r.encode(e) + return e.err +} + +func (r Rect) encode(e *encoder) { + e.writeInt8(encodingVersion) + e.writeFloat64(r.Lat.Lo) + e.writeFloat64(r.Lat.Hi) + e.writeFloat64(r.Lng.Lo) + e.writeFloat64(r.Lng.Hi) +} + +// Decode decodes a rectangle. +func (r *Rect) Decode(rd io.Reader) error { + d := &decoder{r: asByteReader(rd)} + r.decode(d) + return d.err +} + +func (r *Rect) decode(d *decoder) { + if version := d.readUint8(); int(version) != int(encodingVersion) && d.err == nil { + d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion) + return + } + r.Lat.Lo = d.readFloat64() + r.Lat.Hi = d.readFloat64() + r.Lng.Lo = d.readFloat64() + r.Lng.Hi = d.readFloat64() + return +} + +// DistanceToLatLng returns the minimum distance (measured along the surface of the sphere) +// from a given point to the rectangle (both its boundary and its interior). +// If r is empty, the result is meaningless. +// The latlng must be valid. +func (r Rect) DistanceToLatLng(ll LatLng) s1.Angle { + if r.Lng.Contains(float64(ll.Lng)) { + return maxAngle(0, ll.Lat-s1.Angle(r.Lat.Hi), s1.Angle(r.Lat.Lo)-ll.Lat) + } + + i := s1.IntervalFromEndpoints(r.Lng.Hi, r.Lng.ComplementCenter()) + rectLng := r.Lng.Lo + if i.Contains(float64(ll.Lng)) { + rectLng = r.Lng.Hi + } + + lo := LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(rectLng) * s1.Radian} + hi := LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(rectLng) * s1.Radian} + return DistanceFromSegment(PointFromLatLng(ll), PointFromLatLng(lo), PointFromLatLng(hi)) +} + +// DirectedHausdorffDistance returns the directed Hausdorff distance (measured along the +// surface of the sphere) to the given Rect. The directed Hausdorff +// distance from rectangle A to rectangle B is given by +// h(A, B) = max_{p in A} min_{q in B} d(p, q). +func (r Rect) DirectedHausdorffDistance(other Rect) s1.Angle { + if r.IsEmpty() { + return 0 * s1.Radian + } + if other.IsEmpty() { + return math.Pi * s1.Radian + } + + lng := r.Lng.DirectedHausdorffDistance(other.Lng) + return directedHausdorffDistance(lng, r.Lat, other.Lat) +} + +// HausdorffDistance returns the undirected Hausdorff distance (measured along the +// surface of the sphere) to the given Rect. +// The Hausdorff distance between rectangle A and rectangle B is given by +// H(A, B) = max{h(A, B), h(B, A)}. +func (r Rect) HausdorffDistance(other Rect) s1.Angle { + return maxAngle(r.DirectedHausdorffDistance(other), + other.DirectedHausdorffDistance(r)) +} + +// ApproxEqual reports whether the latitude and longitude intervals of the two rectangles +// are the same up to a small tolerance. +func (r Rect) ApproxEqual(other Rect) bool { + return r.Lat.ApproxEqual(other.Lat) && r.Lng.ApproxEqual(other.Lng) +} + +// directedHausdorffDistance returns the directed Hausdorff distance +// from one longitudinal edge spanning latitude range 'a' to the other +// longitudinal edge spanning latitude range 'b', with their longitudinal +// difference given by 'lngDiff'. +func directedHausdorffDistance(lngDiff s1.Angle, a, b r1.Interval) s1.Angle { + // By symmetry, we can assume a's longitude is 0 and b's longitude is + // lngDiff. Call b's two endpoints bLo and bHi. Let H be the hemisphere + // containing a and delimited by the longitude line of b. The Voronoi diagram + // of b on H has three edges (portions of great circles) all orthogonal to b + // and meeting at bLo cross bHi. + // E1: (bLo, bLo cross bHi) + // E2: (bHi, bLo cross bHi) + // E3: (-bMid, bLo cross bHi), where bMid is the midpoint of b + // + // They subdivide H into three Voronoi regions. Depending on how longitude 0 + // (which contains edge a) intersects these regions, we distinguish two cases: + // Case 1: it intersects three regions. This occurs when lngDiff <= π/2. + // Case 2: it intersects only two regions. This occurs when lngDiff > π/2. + // + // In the first case, the directed Hausdorff distance to edge b can only be + // realized by the following points on a: + // A1: two endpoints of a. + // A2: intersection of a with the equator, if b also intersects the equator. + // + // In the second case, the directed Hausdorff distance to edge b can only be + // realized by the following points on a: + // B1: two endpoints of a. + // B2: intersection of a with E3 + // B3: farthest point from bLo to the interior of D, and farthest point from + // bHi to the interior of U, if any, where D (resp. U) is the portion + // of edge a below (resp. above) the intersection point from B2. + + if lngDiff < 0 { + panic("impossible: negative lngDiff") + } + if lngDiff > math.Pi { + panic("impossible: lngDiff > Pi") + } + + if lngDiff == 0 { + return s1.Angle(a.DirectedHausdorffDistance(b)) + } + + // Assumed longitude of b. + bLng := lngDiff + // Two endpoints of b. + bLo := PointFromLatLng(LatLng{s1.Angle(b.Lo), bLng}) + bHi := PointFromLatLng(LatLng{s1.Angle(b.Hi), bLng}) + + // Cases A1 and B1. + aLo := PointFromLatLng(LatLng{s1.Angle(a.Lo), 0}) + aHi := PointFromLatLng(LatLng{s1.Angle(a.Hi), 0}) + maxDistance := maxAngle( + DistanceFromSegment(aLo, bLo, bHi), + DistanceFromSegment(aHi, bLo, bHi)) + + if lngDiff <= math.Pi/2 { + // Case A2. + if a.Contains(0) && b.Contains(0) { + maxDistance = maxAngle(maxDistance, lngDiff) + } + return maxDistance + } + + // Case B2. + p := bisectorIntersection(b, bLng) + pLat := LatLngFromPoint(p).Lat + if a.Contains(float64(pLat)) { + maxDistance = maxAngle(maxDistance, p.Angle(bLo.Vector)) + } + + // Case B3. + if pLat > s1.Angle(a.Lo) { + intDist, ok := interiorMaxDistance(r1.Interval{a.Lo, math.Min(float64(pLat), a.Hi)}, bLo) + if ok { + maxDistance = maxAngle(maxDistance, intDist) + } + } + if pLat < s1.Angle(a.Hi) { + intDist, ok := interiorMaxDistance(r1.Interval{math.Max(float64(pLat), a.Lo), a.Hi}, bHi) + if ok { + maxDistance = maxAngle(maxDistance, intDist) + } + } + + return maxDistance +} + +// interiorMaxDistance returns the max distance from a point b to the segment spanning latitude range +// aLat on longitude 0 if the max occurs in the interior of aLat. Otherwise, returns (0, false). +func interiorMaxDistance(aLat r1.Interval, b Point) (a s1.Angle, ok bool) { + // Longitude 0 is in the y=0 plane. b.X >= 0 implies that the maximum + // does not occur in the interior of aLat. + if aLat.IsEmpty() || b.X >= 0 { + return 0, false + } + + // Project b to the y=0 plane. The antipodal of the normalized projection is + // the point at which the maxium distance from b occurs, if it is contained + // in aLat. + intersectionPoint := PointFromCoords(-b.X, 0, -b.Z) + if !aLat.InteriorContains(float64(LatLngFromPoint(intersectionPoint).Lat)) { + return 0, false + } + return b.Angle(intersectionPoint.Vector), true +} + +// bisectorIntersection return the intersection of longitude 0 with the bisector of an edge +// on longitude 'lng' and spanning latitude range 'lat'. +func bisectorIntersection(lat r1.Interval, lng s1.Angle) Point { + lng = s1.Angle(math.Abs(float64(lng))) + latCenter := s1.Angle(lat.Center()) + + // A vector orthogonal to the bisector of the given longitudinal edge. + orthoBisector := LatLng{latCenter - math.Pi/2, lng} + if latCenter < 0 { + orthoBisector = LatLng{-latCenter - math.Pi/2, lng - math.Pi} + } + + // A vector orthogonal to longitude 0. + orthoLng := Point{r3.Vector{0, -1, 0}} + + return orthoLng.PointCross(PointFromLatLng(orthoBisector)) +} + +// Centroid returns the true centroid of the given Rect multiplied by its +// surface area. The result is not unit length, so you may want to normalize it. +// Note that in general the centroid is *not* at the center of the rectangle, and +// in fact it may not even be contained by the rectangle. (It is the "center of +// mass" of the rectangle viewed as subset of the unit sphere, i.e. it is the +// point in space about which this curved shape would rotate.) +// +// The reason for multiplying the result by the rectangle area is to make it +// easier to compute the centroid of more complicated shapes. The centroid +// of a union of disjoint regions can be computed simply by adding their +// Centroid results. +func (r Rect) Centroid() Point { + // When a sphere is divided into slices of constant thickness by a set + // of parallel planes, all slices have the same surface area. This + // implies that the z-component of the centroid is simply the midpoint + // of the z-interval spanned by the Rect. + // + // Similarly, it is easy to see that the (x,y) of the centroid lies in + // the plane through the midpoint of the rectangle's longitude interval. + // We only need to determine the distance "d" of this point from the + // z-axis. + // + // Let's restrict our attention to a particular z-value. In this + // z-plane, the Rect is a circular arc. The centroid of this arc + // lies on a radial line through the midpoint of the arc, and at a + // distance from the z-axis of + // + // r * (sin(alpha) / alpha) + // + // where r = sqrt(1-z^2) is the radius of the arc, and "alpha" is half + // of the arc length (i.e., the arc covers longitudes [-alpha, alpha]). + // + // To find the centroid distance from the z-axis for the entire + // rectangle, we just need to integrate over the z-interval. This gives + // + // d = Integrate[sqrt(1-z^2)*sin(alpha)/alpha, z1..z2] / (z2 - z1) + // + // where [z1, z2] is the range of z-values covered by the rectangle. + // This simplifies to + // + // d = sin(alpha)/(2*alpha*(z2-z1))*(z2*r2 - z1*r1 + theta2 - theta1) + // + // where [theta1, theta2] is the latitude interval, z1=sin(theta1), + // z2=sin(theta2), r1=cos(theta1), and r2=cos(theta2). + // + // Finally, we want to return not the centroid itself, but the centroid + // scaled by the area of the rectangle. The area of the rectangle is + // + // A = 2 * alpha * (z2 - z1) + // + // which fortunately appears in the denominator of "d". + + if r.IsEmpty() { + return Point{} + } + + z1 := math.Sin(r.Lat.Lo) + z2 := math.Sin(r.Lat.Hi) + r1 := math.Cos(r.Lat.Lo) + r2 := math.Cos(r.Lat.Hi) + + alpha := 0.5 * r.Lng.Length() + r0 := math.Sin(alpha) * (r2*z2 - r1*z1 + r.Lat.Length()) + lng := r.Lng.Center() + z := alpha * (z2 + z1) * (z2 - z1) // scaled by the area + + return Point{r3.Vector{r0 * math.Cos(lng), r0 * math.Sin(lng), z}} +} + +// BUG: The major differences from the C++ version are: +// - Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point) diff --git a/vendor/github.com/golang/geo/s2/rect_bounder.go b/vendor/github.com/golang/geo/s2/rect_bounder.go new file mode 100644 index 000000000..419dea0c1 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/rect_bounder.go @@ -0,0 +1,352 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r3" + "github.com/golang/geo/s1" +) + +// RectBounder is used to compute a bounding rectangle that contains all edges +// defined by a vertex chain (v0, v1, v2, ...). All vertices must be unit length. +// Note that the bounding rectangle of an edge can be larger than the bounding +// rectangle of its endpoints, e.g. consider an edge that passes through the North Pole. +// +// The bounds are calculated conservatively to account for numerical errors +// when points are converted to LatLngs. More precisely, this function +// guarantees the following: +// Let L be a closed edge chain (Loop) such that the interior of the loop does +// not contain either pole. Now if P is any point such that L.ContainsPoint(P), +// then RectBound(L).ContainsPoint(LatLngFromPoint(P)). +type RectBounder struct { + // The previous vertex in the chain. + a Point + // The previous vertex latitude longitude. + aLL LatLng + bound Rect +} + +// NewRectBounder returns a new instance of a RectBounder. +func NewRectBounder() *RectBounder { + return &RectBounder{ + bound: EmptyRect(), + } +} + +// maxErrorForTests returns the maximum error in RectBound provided that the +// result does not include either pole. It is only used for testing purposes +func (r *RectBounder) maxErrorForTests() LatLng { + // The maximum error in the latitude calculation is + // 3.84 * dblEpsilon for the PointCross calculation + // 0.96 * dblEpsilon for the Latitude calculation + // 5 * dblEpsilon added by AddPoint/RectBound to compensate for error + // ----------------- + // 9.80 * dblEpsilon maximum error in result + // + // The maximum error in the longitude calculation is dblEpsilon. RectBound + // does not do any expansion because this isn't necessary in order to + // bound the *rounded* longitudes of contained points. + return LatLng{10 * dblEpsilon * s1.Radian, 1 * dblEpsilon * s1.Radian} +} + +// AddPoint adds the given point to the chain. The Point must be unit length. +func (r *RectBounder) AddPoint(b Point) { + bLL := LatLngFromPoint(b) + + if r.bound.IsEmpty() { + r.a = b + r.aLL = bLL + r.bound = r.bound.AddPoint(bLL) + return + } + + // First compute the cross product N = A x B robustly. This is the normal + // to the great circle through A and B. We don't use RobustSign + // since that method returns an arbitrary vector orthogonal to A if the two + // vectors are proportional, and we want the zero vector in that case. + n := r.a.Sub(b.Vector).Cross(r.a.Add(b.Vector)) // N = 2 * (A x B) + + // The relative error in N gets large as its norm gets very small (i.e., + // when the two points are nearly identical or antipodal). We handle this + // by choosing a maximum allowable error, and if the error is greater than + // this we fall back to a different technique. Since it turns out that + // the other sources of error in converting the normal to a maximum + // latitude add up to at most 1.16 * dblEpsilon, and it is desirable to + // have the total error be a multiple of dblEpsilon, we have chosen to + // limit the maximum error in the normal to be 3.84 * dblEpsilon. + // It is possible to show that the error is less than this when + // + // n.Norm() >= 8 * sqrt(3) / (3.84 - 0.5 - sqrt(3)) * dblEpsilon + // = 1.91346e-15 (about 8.618 * dblEpsilon) + nNorm := n.Norm() + if nNorm < 1.91346e-15 { + // A and B are either nearly identical or nearly antipodal (to within + // 4.309 * dblEpsilon, or about 6 nanometers on the earth's surface). + if r.a.Dot(b.Vector) < 0 { + // The two points are nearly antipodal. The easiest solution is to + // assume that the edge between A and B could go in any direction + // around the sphere. + r.bound = FullRect() + } else { + // The two points are nearly identical (to within 4.309 * dblEpsilon). + // In this case we can just use the bounding rectangle of the points, + // since after the expansion done by GetBound this Rect is + // guaranteed to include the (lat,lng) values of all points along AB. + r.bound = r.bound.Union(RectFromLatLng(r.aLL).AddPoint(bLL)) + } + r.a = b + r.aLL = bLL + return + } + + // Compute the longitude range spanned by AB. + lngAB := s1.EmptyInterval().AddPoint(r.aLL.Lng.Radians()).AddPoint(bLL.Lng.Radians()) + if lngAB.Length() >= math.Pi-2*dblEpsilon { + // The points lie on nearly opposite lines of longitude to within the + // maximum error of the calculation. The easiest solution is to assume + // that AB could go on either side of the pole. + lngAB = s1.FullInterval() + } + + // Next we compute the latitude range spanned by the edge AB. We start + // with the range spanning the two endpoints of the edge: + latAB := r1.IntervalFromPoint(r.aLL.Lat.Radians()).AddPoint(bLL.Lat.Radians()) + + // This is the desired range unless the edge AB crosses the plane + // through N and the Z-axis (which is where the great circle through A + // and B attains its minimum and maximum latitudes). To test whether AB + // crosses this plane, we compute a vector M perpendicular to this + // plane and then project A and B onto it. + m := n.Cross(r3.Vector{0, 0, 1}) + mA := m.Dot(r.a.Vector) + mB := m.Dot(b.Vector) + + // We want to test the signs of "mA" and "mB", so we need to bound + // the error in these calculations. It is possible to show that the + // total error is bounded by + // + // (1 + sqrt(3)) * dblEpsilon * nNorm + 8 * sqrt(3) * (dblEpsilon**2) + // = 6.06638e-16 * nNorm + 6.83174e-31 + + mError := 6.06638e-16*nNorm + 6.83174e-31 + if mA*mB < 0 || math.Abs(mA) <= mError || math.Abs(mB) <= mError { + // Minimum/maximum latitude *may* occur in the edge interior. + // + // The maximum latitude is 90 degrees minus the latitude of N. We + // compute this directly using atan2 in order to get maximum accuracy + // near the poles. + // + // Our goal is compute a bound that contains the computed latitudes of + // all S2Points P that pass the point-in-polygon containment test. + // There are three sources of error we need to consider: + // - the directional error in N (at most 3.84 * dblEpsilon) + // - converting N to a maximum latitude + // - computing the latitude of the test point P + // The latter two sources of error are at most 0.955 * dblEpsilon + // individually, but it is possible to show by a more complex analysis + // that together they can add up to at most 1.16 * dblEpsilon, for a + // total error of 5 * dblEpsilon. + // + // We add 3 * dblEpsilon to the bound here, and GetBound() will pad + // the bound by another 2 * dblEpsilon. + maxLat := math.Min( + math.Atan2(math.Sqrt(n.X*n.X+n.Y*n.Y), math.Abs(n.Z))+3*dblEpsilon, + math.Pi/2) + + // In order to get tight bounds when the two points are close together, + // we also bound the min/max latitude relative to the latitudes of the + // endpoints A and B. First we compute the distance between A and B, + // and then we compute the maximum change in latitude between any two + // points along the great circle that are separated by this distance. + // This gives us a latitude change "budget". Some of this budget must + // be spent getting from A to B; the remainder bounds the round-trip + // distance (in latitude) from A or B to the min or max latitude + // attained along the edge AB. + latBudget := 2 * math.Asin(0.5*(r.a.Sub(b.Vector)).Norm()*math.Sin(maxLat)) + maxDelta := 0.5*(latBudget-latAB.Length()) + dblEpsilon + + // Test whether AB passes through the point of maximum latitude or + // minimum latitude. If the dot product(s) are small enough then the + // result may be ambiguous. + if mA <= mError && mB >= -mError { + latAB.Hi = math.Min(maxLat, latAB.Hi+maxDelta) + } + if mB <= mError && mA >= -mError { + latAB.Lo = math.Max(-maxLat, latAB.Lo-maxDelta) + } + } + r.a = b + r.aLL = bLL + r.bound = r.bound.Union(Rect{latAB, lngAB}) +} + +// RectBound returns the bounding rectangle of the edge chain that connects the +// vertices defined so far. This bound satisfies the guarantee made +// above, i.e. if the edge chain defines a Loop, then the bound contains +// the LatLng coordinates of all Points contained by the loop. +func (r *RectBounder) RectBound() Rect { + return r.bound.expanded(LatLng{s1.Angle(2 * dblEpsilon), 0}).PolarClosure() +} + +// ExpandForSubregions expands a bounding Rect so that it is guaranteed to +// contain the bounds of any subregion whose bounds are computed using +// ComputeRectBound. For example, consider a loop L that defines a square. +// GetBound ensures that if a point P is contained by this square, then +// LatLngFromPoint(P) is contained by the bound. But now consider a diamond +// shaped loop S contained by L. It is possible that GetBound returns a +// *larger* bound for S than it does for L, due to rounding errors. This +// method expands the bound for L so that it is guaranteed to contain the +// bounds of any subregion S. +// +// More precisely, if L is a loop that does not contain either pole, and S +// is a loop such that L.Contains(S), then +// +// ExpandForSubregions(L.RectBound).Contains(S.RectBound). +// +func ExpandForSubregions(bound Rect) Rect { + // Empty bounds don't need expansion. + if bound.IsEmpty() { + return bound + } + + // First we need to check whether the bound B contains any nearly-antipodal + // points (to within 4.309 * dblEpsilon). If so then we need to return + // FullRect, since the subregion might have an edge between two + // such points, and AddPoint returns Full for such edges. Note that + // this can happen even if B is not Full for example, consider a loop + // that defines a 10km strip straddling the equator extending from + // longitudes -100 to +100 degrees. + // + // It is easy to check whether B contains any antipodal points, but checking + // for nearly-antipodal points is trickier. Essentially we consider the + // original bound B and its reflection through the origin B', and then test + // whether the minimum distance between B and B' is less than 4.309 * dblEpsilon. + + // lngGap is a lower bound on the longitudinal distance between B and its + // reflection B'. (2.5 * dblEpsilon is the maximum combined error of the + // endpoint longitude calculations and the Length call.) + lngGap := math.Max(0, math.Pi-bound.Lng.Length()-2.5*dblEpsilon) + + // minAbsLat is the minimum distance from B to the equator (if zero or + // negative, then B straddles the equator). + minAbsLat := math.Max(bound.Lat.Lo, -bound.Lat.Hi) + + // latGapSouth and latGapNorth measure the minimum distance from B to the + // south and north poles respectively. + latGapSouth := math.Pi/2 + bound.Lat.Lo + latGapNorth := math.Pi/2 - bound.Lat.Hi + + if minAbsLat >= 0 { + // The bound B does not straddle the equator. In this case the minimum + // distance is between one endpoint of the latitude edge in B closest to + // the equator and the other endpoint of that edge in B'. The latitude + // distance between these two points is 2*minAbsLat, and the longitude + // distance is lngGap. We could compute the distance exactly using the + // Haversine formula, but then we would need to bound the errors in that + // calculation. Since we only need accuracy when the distance is very + // small (close to 4.309 * dblEpsilon), we substitute the Euclidean + // distance instead. This gives us a right triangle XYZ with two edges of + // length x = 2*minAbsLat and y ~= lngGap. The desired distance is the + // length of the third edge z, and we have + // + // z ~= sqrt(x^2 + y^2) >= (x + y) / sqrt(2) + // + // Therefore the region may contain nearly antipodal points only if + // + // 2*minAbsLat + lngGap < sqrt(2) * 4.309 * dblEpsilon + // ~= 1.354e-15 + // + // Note that because the given bound B is conservative, minAbsLat and + // lngGap are both lower bounds on their true values so we do not need + // to make any adjustments for their errors. + if 2*minAbsLat+lngGap < 1.354e-15 { + return FullRect() + } + } else if lngGap >= math.Pi/2 { + // B spans at most Pi/2 in longitude. The minimum distance is always + // between one corner of B and the diagonally opposite corner of B'. We + // use the same distance approximation that we used above; in this case + // we have an obtuse triangle XYZ with two edges of length x = latGapSouth + // and y = latGapNorth, and angle Z >= Pi/2 between them. We then have + // + // z >= sqrt(x^2 + y^2) >= (x + y) / sqrt(2) + // + // Unlike the case above, latGapSouth and latGapNorth are not lower bounds + // (because of the extra addition operation, and because math.Pi/2 is not + // exactly equal to Pi/2); they can exceed their true values by up to + // 0.75 * dblEpsilon. Putting this all together, the region may contain + // nearly antipodal points only if + // + // latGapSouth + latGapNorth < (sqrt(2) * 4.309 + 1.5) * dblEpsilon + // ~= 1.687e-15 + if latGapSouth+latGapNorth < 1.687e-15 { + return FullRect() + } + } else { + // Otherwise we know that (1) the bound straddles the equator and (2) its + // width in longitude is at least Pi/2. In this case the minimum + // distance can occur either between a corner of B and the diagonally + // opposite corner of B' (as in the case above), or between a corner of B + // and the opposite longitudinal edge reflected in B'. It is sufficient + // to only consider the corner-edge case, since this distance is also a + // lower bound on the corner-corner distance when that case applies. + + // Consider the spherical triangle XYZ where X is a corner of B with + // minimum absolute latitude, Y is the closest pole to X, and Z is the + // point closest to X on the opposite longitudinal edge of B'. This is a + // right triangle (Z = Pi/2), and from the spherical law of sines we have + // + // sin(z) / sin(Z) = sin(y) / sin(Y) + // sin(maxLatGap) / 1 = sin(dMin) / sin(lngGap) + // sin(dMin) = sin(maxLatGap) * sin(lngGap) + // + // where "maxLatGap" = max(latGapSouth, latGapNorth) and "dMin" is the + // desired minimum distance. Now using the facts that sin(t) >= (2/Pi)*t + // for 0 <= t <= Pi/2, that we only need an accurate approximation when + // at least one of "maxLatGap" or lngGap is extremely small (in which + // case sin(t) ~= t), and recalling that "maxLatGap" has an error of up + // to 0.75 * dblEpsilon, we want to test whether + // + // maxLatGap * lngGap < (4.309 + 0.75) * (Pi/2) * dblEpsilon + // ~= 1.765e-15 + if math.Max(latGapSouth, latGapNorth)*lngGap < 1.765e-15 { + return FullRect() + } + } + // Next we need to check whether the subregion might contain any edges that + // span (math.Pi - 2 * dblEpsilon) radians or more in longitude, since AddPoint + // sets the longitude bound to Full in that case. This corresponds to + // testing whether (lngGap <= 0) in lngExpansion below. + + // Otherwise, the maximum latitude error in AddPoint is 4.8 * dblEpsilon. + // In the worst case, the errors when computing the latitude bound for a + // subregion could go in the opposite direction as the errors when computing + // the bound for the original region, so we need to double this value. + // (More analysis shows that it's okay to round down to a multiple of + // dblEpsilon.) + // + // For longitude, we rely on the fact that atan2 is correctly rounded and + // therefore no additional bounds expansion is necessary. + + latExpansion := 9 * dblEpsilon + lngExpansion := 0.0 + if lngGap <= 0 { + lngExpansion = math.Pi + } + return bound.expanded(LatLng{s1.Angle(latExpansion), s1.Angle(lngExpansion)}).PolarClosure() +} diff --git a/vendor/github.com/golang/geo/s2/region.go b/vendor/github.com/golang/geo/s2/region.go new file mode 100644 index 000000000..9ea3de1ca --- /dev/null +++ b/vendor/github.com/golang/geo/s2/region.go @@ -0,0 +1,71 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// A Region represents a two-dimensional region on the unit sphere. +// +// The purpose of this interface is to allow complex regions to be +// approximated as simpler regions. The interface is restricted to methods +// that are useful for computing approximations. +type Region interface { + // CapBound returns a bounding spherical cap. This is not guaranteed to be exact. + CapBound() Cap + + // RectBound returns a bounding latitude-longitude rectangle that contains + // the region. The bounds are not guaranteed to be tight. + RectBound() Rect + + // ContainsCell reports whether the region completely contains the given region. + // It returns false if containment could not be determined. + ContainsCell(c Cell) bool + + // IntersectsCell reports whether the region intersects the given cell or + // if intersection could not be determined. It returns false if the region + // does not intersect. + IntersectsCell(c Cell) bool + + // ContainsPoint reports whether the region contains the given point or not. + // The point should be unit length, although some implementations may relax + // this restriction. + ContainsPoint(p Point) bool + + // CellUnionBound returns a small collection of CellIDs whose union covers + // the region. The cells are not sorted, may have redundancies (such as cells + // that contain other cells), and may cover much more area than necessary. + // + // This method is not intended for direct use by client code. Clients + // should typically use Covering, which has options to control the size and + // accuracy of the covering. Alternatively, if you want a fast covering and + // don't care about accuracy, consider calling FastCovering (which returns a + // cleaned-up version of the covering computed by this method). + // + // CellUnionBound implementations should attempt to return a small + // covering (ideally 4 cells or fewer) that covers the region and can be + // computed quickly. The result is used by RegionCoverer as a starting + // point for further refinement. + CellUnionBound() []CellID +} + +// Enforce Region interface satisfaction. +var ( + _ Region = Cap{} + _ Region = Cell{} + _ Region = (*CellUnion)(nil) + _ Region = (*Loop)(nil) + _ Region = Point{} + _ Region = (*Polygon)(nil) + _ Region = (*Polyline)(nil) + _ Region = Rect{} +) diff --git a/vendor/github.com/golang/geo/s2/regioncoverer.go b/vendor/github.com/golang/geo/s2/regioncoverer.go new file mode 100644 index 000000000..476e58559 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/regioncoverer.go @@ -0,0 +1,477 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "container/heap" +) + +// RegionCoverer allows arbitrary regions to be approximated as unions of cells (CellUnion). +// This is useful for implementing various sorts of search and precomputation operations. +// +// Typical usage: +// +// rc := &s2.RegionCoverer{MaxLevel: 30, MaxCells: 5} +// r := s2.Region(CapFromCenterArea(center, area)) +// covering := rc.Covering(r) +// +// This yields a CellUnion of at most 5 cells that is guaranteed to cover the +// given region (a disc-shaped region on the sphere). +// +// For covering, only cells where (level - MinLevel) is a multiple of LevelMod will be used. +// This effectively allows the branching factor of the S2 CellID hierarchy to be increased. +// Currently the only parameter values allowed are 1, 2, or 3, corresponding to +// branching factors of 4, 16, and 64 respectively. +// +// Note the following: +// +// - MinLevel takes priority over MaxCells, i.e. cells below the given level will +// never be used even if this causes a large number of cells to be returned. +// +// - For any setting of MaxCells, up to 6 cells may be returned if that +// is the minimum number of cells required (e.g. if the region intersects +// all six face cells). Up to 3 cells may be returned even for very tiny +// convex regions if they happen to be located at the intersection of +// three cube faces. +// +// - For any setting of MaxCells, an arbitrary number of cells may be +// returned if MinLevel is too high for the region being approximated. +// +// - If MaxCells is less than 4, the area of the covering may be +// arbitrarily large compared to the area of the original region even if +// the region is convex (e.g. a Cap or Rect). +// +// The approximation algorithm is not optimal but does a pretty good job in +// practice. The output does not always use the maximum number of cells +// allowed, both because this would not always yield a better approximation, +// and because MaxCells is a limit on how much work is done exploring the +// possible covering as well as a limit on the final output size. +// +// Because it is an approximation algorithm, one should not rely on the +// stability of the output. In particular, the output of the covering algorithm +// may change across different versions of the library. +// +// One can also generate interior coverings, which are sets of cells which +// are entirely contained within a region. Interior coverings can be +// empty, even for non-empty regions, if there are no cells that satisfy +// the provided constraints and are contained by the region. Note that for +// performance reasons, it is wise to specify a MaxLevel when computing +// interior coverings - otherwise for regions with small or zero area, the +// algorithm may spend a lot of time subdividing cells all the way to leaf +// level to try to find contained cells. +type RegionCoverer struct { + MinLevel int // the minimum cell level to be used. + MaxLevel int // the maximum cell level to be used. + LevelMod int // the LevelMod to be used. + MaxCells int // the maximum desired number of cells in the approximation. +} + +type coverer struct { + minLevel int // the minimum cell level to be used. + maxLevel int // the maximum cell level to be used. + levelMod int // the LevelMod to be used. + maxCells int // the maximum desired number of cells in the approximation. + region Region + result CellUnion + pq priorityQueue + interiorCovering bool +} + +type candidate struct { + cell Cell + terminal bool // Cell should not be expanded further. + numChildren int // Number of children that intersect the region. + children []*candidate // Actual size may be 0, 4, 16, or 64 elements. + priority int // Priority of the candidate. +} + +type priorityQueue []*candidate + +func (pq priorityQueue) Len() int { + return len(pq) +} + +func (pq priorityQueue) Less(i, j int) bool { + // We want Pop to give us the highest, not lowest, priority so we use greater than here. + return pq[i].priority > pq[j].priority +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *priorityQueue) Push(x interface{}) { + item := x.(*candidate) + *pq = append(*pq, item) +} + +func (pq *priorityQueue) Pop() interface{} { + item := (*pq)[len(*pq)-1] + *pq = (*pq)[:len(*pq)-1] + return item +} + +func (pq *priorityQueue) Reset() { + *pq = (*pq)[:0] +} + +// newCandidate returns a new candidate with no children if the cell intersects the given region. +// The candidate is marked as terminal if it should not be expanded further. +func (c *coverer) newCandidate(cell Cell) *candidate { + if !c.region.IntersectsCell(cell) { + return nil + } + cand := &candidate{cell: cell} + level := int(cell.level) + if level >= c.minLevel { + if c.interiorCovering { + if c.region.ContainsCell(cell) { + cand.terminal = true + } else if level+c.levelMod > c.maxLevel { + return nil + } + } else if level+c.levelMod > c.maxLevel || c.region.ContainsCell(cell) { + cand.terminal = true + } + } + return cand +} + +// expandChildren populates the children of the candidate by expanding the given number of +// levels from the given cell. Returns the number of children that were marked "terminal". +func (c *coverer) expandChildren(cand *candidate, cell Cell, numLevels int) int { + numLevels-- + var numTerminals int + last := cell.id.ChildEnd() + for ci := cell.id.ChildBegin(); ci != last; ci = ci.Next() { + childCell := CellFromCellID(ci) + if numLevels > 0 { + if c.region.IntersectsCell(childCell) { + numTerminals += c.expandChildren(cand, childCell, numLevels) + } + continue + } + if child := c.newCandidate(childCell); child != nil { + cand.children = append(cand.children, child) + cand.numChildren++ + if child.terminal { + numTerminals++ + } + } + } + return numTerminals +} + +// addCandidate adds the given candidate to the result if it is marked as "terminal", +// otherwise expands its children and inserts it into the priority queue. +// Passing an argument of nil does nothing. +func (c *coverer) addCandidate(cand *candidate) { + if cand == nil { + return + } + + if cand.terminal { + c.result = append(c.result, cand.cell.id) + return + } + + // Expand one level at a time until we hit minLevel to ensure that we don't skip over it. + numLevels := c.levelMod + level := int(cand.cell.level) + if level < c.minLevel { + numLevels = 1 + } + + numTerminals := c.expandChildren(cand, cand.cell, numLevels) + maxChildrenShift := uint(2 * c.levelMod) + if cand.numChildren == 0 { + return + } else if !c.interiorCovering && numTerminals == 1<= c.minLevel { + // Optimization: add the parent cell rather than all of its children. + // We can't do this for interior coverings, since the children just + // intersect the region, but may not be contained by it - we need to + // subdivide them further. + cand.terminal = true + c.addCandidate(cand) + } else { + // We negate the priority so that smaller absolute priorities are returned + // first. The heuristic is designed to refine the largest cells first, + // since those are where we have the largest potential gain. Among cells + // of the same size, we prefer the cells with the fewest children. + // Finally, among cells with equal numbers of children we prefer those + // with the smallest number of children that cannot be refined further. + cand.priority = -(((level< 1 && level > c.minLevel { + level -= (level - c.minLevel) % c.levelMod + } + return level +} + +// adjustCellLevels ensures that all cells with level > minLevel also satisfy levelMod, +// by replacing them with an ancestor if necessary. Cell levels smaller +// than minLevel are not modified (see AdjustLevel). The output is +// then normalized to ensure that no redundant cells are present. +func (c *coverer) adjustCellLevels(cells *CellUnion) { + if c.levelMod == 1 { + return + } + + var out int + for _, ci := range *cells { + level := ci.Level() + newLevel := c.adjustLevel(level) + if newLevel != level { + ci = ci.Parent(newLevel) + } + if out > 0 && (*cells)[out-1].Contains(ci) { + continue + } + for out > 0 && ci.Contains((*cells)[out-1]) { + out-- + } + (*cells)[out] = ci + out++ + } + *cells = (*cells)[:out] +} + +// initialCandidates computes a set of initial candidates that cover the given region. +func (c *coverer) initialCandidates() { + // Optimization: start with a small (usually 4 cell) covering of the region's bounding cap. + temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: minInt(4, c.maxCells)} + + cells := temp.FastCovering(c.region) + c.adjustCellLevels(&cells) + for _, ci := range cells { + c.addCandidate(c.newCandidate(CellFromCellID(ci))) + } +} + +// coveringInternal generates a covering and stores it in result. +// Strategy: Start with the 6 faces of the cube. Discard any +// that do not intersect the shape. Then repeatedly choose the +// largest cell that intersects the shape and subdivide it. +// +// result contains the cells that will be part of the output, while pq +// contains cells that we may still subdivide further. Cells that are +// entirely contained within the region are immediately added to the output, +// while cells that do not intersect the region are immediately discarded. +// Therefore pq only contains cells that partially intersect the region. +// Candidates are prioritized first according to cell size (larger cells +// first), then by the number of intersecting children they have (fewest +// children first), and then by the number of fully contained children +// (fewest children first). +func (c *coverer) coveringInternal(region Region) { + c.region = region + + c.initialCandidates() + for c.pq.Len() > 0 && (!c.interiorCovering || len(c.result) < c.maxCells) { + cand := heap.Pop(&c.pq).(*candidate) + + // For interior covering we keep subdividing no matter how many children + // candidate has. If we reach MaxCells before expanding all children, + // we will just use some of them. + // For exterior covering we cannot do this, because result has to cover the + // whole region, so all children have to be used. + // candidate.numChildren == 1 case takes care of the situation when we + // already have more than MaxCells in result (minLevel is too high). + // Subdividing of the candidate with one child does no harm in this case. + if c.interiorCovering || int(cand.cell.level) < c.minLevel || cand.numChildren == 1 || len(c.result)+c.pq.Len()+cand.numChildren <= c.maxCells { + for _, child := range cand.children { + if !c.interiorCovering || len(c.result) < c.maxCells { + c.addCandidate(child) + } + } + } else { + cand.terminal = true + c.addCandidate(cand) + } + } + c.pq.Reset() + c.region = nil +} + +// newCoverer returns an instance of coverer. +func (rc *RegionCoverer) newCoverer() *coverer { + return &coverer{ + minLevel: maxInt(0, minInt(maxLevel, rc.MinLevel)), + maxLevel: maxInt(0, minInt(maxLevel, rc.MaxLevel)), + levelMod: maxInt(1, minInt(3, rc.LevelMod)), + maxCells: rc.MaxCells, + } +} + +// Covering returns a CellUnion that covers the given region and satisfies the various restrictions. +func (rc *RegionCoverer) Covering(region Region) CellUnion { + covering := rc.CellUnion(region) + covering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod))) + return covering +} + +// InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions. +func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion { + intCovering := rc.InteriorCellUnion(region) + intCovering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod))) + return intCovering +} + +// CellUnion returns a normalized CellUnion that covers the given region and +// satisfies the restrictions except for minLevel and levelMod. These criteria +// cannot be satisfied using a cell union because cell unions are +// automatically normalized by replacing four child cells with their parent +// whenever possible. (Note that the list of cell ids passed to the CellUnion +// constructor does in fact satisfy all the given restrictions.) +func (rc *RegionCoverer) CellUnion(region Region) CellUnion { + c := rc.newCoverer() + c.coveringInternal(region) + cu := c.result + cu.Normalize() + return cu +} + +// InteriorCellUnion returns a normalized CellUnion that is contained within the given region and +// satisfies the restrictions except for minLevel and levelMod. These criteria +// cannot be satisfied using a cell union because cell unions are +// automatically normalized by replacing four child cells with their parent +// whenever possible. (Note that the list of cell ids passed to the CellUnion +// constructor does in fact satisfy all the given restrictions.) +func (rc *RegionCoverer) InteriorCellUnion(region Region) CellUnion { + c := rc.newCoverer() + c.interiorCovering = true + c.coveringInternal(region) + cu := c.result + cu.Normalize() + return cu +} + +// FastCovering returns a CellUnion that covers the given region similar to Covering, +// except that this method is much faster and the coverings are not as tight. +// All of the usual parameters are respected (MaxCells, MinLevel, MaxLevel, and LevelMod), +// except that the implementation makes no attempt to take advantage of large values of +// MaxCells. (A small number of cells will always be returned.) +// +// This function is useful as a starting point for algorithms that +// recursively subdivide cells. +func (rc *RegionCoverer) FastCovering(region Region) CellUnion { + c := rc.newCoverer() + cu := CellUnion(region.CellUnionBound()) + c.normalizeCovering(&cu) + return cu +} + +// normalizeCovering normalizes the "covering" so that it conforms to the current covering +// parameters (MaxCells, minLevel, maxLevel, and levelMod). +// This method makes no attempt to be optimal. In particular, if +// minLevel > 0 or levelMod > 1 then it may return more than the +// desired number of cells even when this isn't necessary. +// +// Note that when the covering parameters have their default values, almost +// all of the code in this function is skipped. +func (c *coverer) normalizeCovering(covering *CellUnion) { + // If any cells are too small, or don't satisfy levelMod, then replace them with ancestors. + if c.maxLevel < maxLevel || c.levelMod > 1 { + for i, ci := range *covering { + level := ci.Level() + newLevel := c.adjustLevel(minInt(level, c.maxLevel)) + if newLevel != level { + (*covering)[i] = ci.Parent(newLevel) + } + } + } + // Sort the cells and simplify them. + covering.Normalize() + + // If there are still too many cells, then repeatedly replace two adjacent + // cells in CellID order by their lowest common ancestor. + for len(*covering) > c.maxCells { + bestIndex := -1 + bestLevel := -1 + for i := 0; i+1 < len(*covering); i++ { + level, ok := (*covering)[i].CommonAncestorLevel((*covering)[i+1]) + if !ok { + continue + } + level = c.adjustLevel(level) + if level > bestLevel { + bestLevel = level + bestIndex = i + } + } + + if bestLevel < c.minLevel { + break + } + (*covering)[bestIndex] = (*covering)[bestIndex].Parent(bestLevel) + covering.Normalize() + } + // Make sure that the covering satisfies minLevel and levelMod, + // possibly at the expense of satisfying MaxCells. + if c.minLevel > 0 || c.levelMod > 1 { + covering.Denormalize(c.minLevel, c.levelMod) + } +} + +// SimpleRegionCovering returns a set of cells at the given level that cover +// the connected region and a starting point on the boundary or inside the +// region. The cells are returned in arbitrary order. +// +// Note that this method is not faster than the regular Covering +// method for most region types, such as Cap or Polygon, and in fact it +// can be much slower when the output consists of a large number of cells. +// Currently it can be faster at generating coverings of long narrow regions +// such as polylines, but this may change in the future. +func SimpleRegionCovering(region Region, start Point, level int) []CellID { + return FloodFillRegionCovering(region, cellIDFromPoint(start).Parent(level)) +} + +// FloodFillRegionCovering returns all edge-connected cells at the same level as +// the given CellID that intersect the given region, in arbitrary order. +func FloodFillRegionCovering(region Region, start CellID) []CellID { + var output []CellID + all := map[CellID]bool{ + start: true, + } + frontier := []CellID{start} + for len(frontier) > 0 { + id := frontier[len(frontier)-1] + frontier = frontier[:len(frontier)-1] + if !region.IntersectsCell(CellFromCellID(id)) { + continue + } + output = append(output, id) + for _, nbr := range id.EdgeNeighbors() { + if !all[nbr] { + all[nbr] = true + frontier = append(frontier, nbr) + } + } + } + + return output +} + +// TODO(roberts): The differences from the C++ version +// finish up FastCovering to match C++ +// IsCanonical +// CanonicalizeCovering +// containsAllChildren +// replaceCellsWithAncestor diff --git a/vendor/github.com/golang/geo/s2/shape.go b/vendor/github.com/golang/geo/s2/shape.go new file mode 100644 index 000000000..2cbf170c3 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/shape.go @@ -0,0 +1,263 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "sort" +) + +// Edge represents a geodesic edge consisting of two vertices. Zero-length edges are +// allowed, and can be used to represent points. +type Edge struct { + V0, V1 Point +} + +// Cmp compares the two edges using the underlying Points Cmp method and returns +// +// -1 if e < other +// 0 if e == other +// +1 if e > other +// +// The two edges are compared by first vertex, and then by the second vertex. +func (e Edge) Cmp(other Edge) int { + if v0cmp := e.V0.Cmp(other.V0.Vector); v0cmp != 0 { + return v0cmp + } + return e.V1.Cmp(other.V1.Vector) +} + +// sortEdges sorts the slice of Edges in place. +func sortEdges(e []Edge) { + sort.Sort(edges(e)) +} + +// edges implements the Sort interface for slices of Edge. +type edges []Edge + +func (e edges) Len() int { return len(e) } +func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e edges) Less(i, j int) bool { return e[i].Cmp(e[j]) == -1 } + +// ShapeEdgeID is a unique identifier for an Edge within an ShapeIndex, +// consisting of a (shapeID, edgeID) pair. +type ShapeEdgeID struct { + ShapeID int32 + EdgeID int32 +} + +// Cmp compares the two ShapeEdgeIDs and returns +// +// -1 if s < other +// 0 if s == other +// +1 if s > other +// +// The two are compared first by shape id and then by edge id. +func (s ShapeEdgeID) Cmp(other ShapeEdgeID) int { + switch { + case s.ShapeID < other.ShapeID: + return -1 + case s.ShapeID > other.ShapeID: + return 1 + } + switch { + case s.EdgeID < other.EdgeID: + return -1 + case s.EdgeID > other.EdgeID: + return 1 + } + return 0 +} + +// ShapeEdge represents a ShapeEdgeID with the two endpoints of that Edge. +type ShapeEdge struct { + ID ShapeEdgeID + Edge Edge +} + +// Chain represents a range of edge IDs corresponding to a chain of connected +// edges, specified as a (start, length) pair. The chain is defined to consist of +// edge IDs {start, start + 1, ..., start + length - 1}. +type Chain struct { + Start, Length int +} + +// ChainPosition represents the position of an edge within a given edge chain, +// specified as a (chainID, offset) pair. Chains are numbered sequentially +// starting from zero, and offsets are measured from the start of each chain. +type ChainPosition struct { + ChainID, Offset int +} + +// A ReferencePoint consists of a point and a boolean indicating whether the point +// is contained by a particular shape. +type ReferencePoint struct { + Point Point + Contained bool +} + +// OriginReferencePoint returns a ReferencePoint with the given value for +// contained and the origin point. It should be used when all points or no +// points are contained. +func OriginReferencePoint(contained bool) ReferencePoint { + return ReferencePoint{Point: OriginPoint(), Contained: contained} +} + +// typeTag is a 32-bit tag that can be used to identify the type of an encoded +// Shape. All encodable types have a non-zero type tag. The tag associated with +type typeTag uint32 + +const ( + // Indicates that a given Shape type cannot be encoded. + typeTagNone typeTag = 0 + typeTagPolygon typeTag = 1 + typeTagPolyline typeTag = 2 + typeTagPointVector typeTag = 3 + typeTagLaxPolyline typeTag = 4 + typeTagLaxPolygon typeTag = 5 + + // The minimum allowable tag for future user-defined Shape types. + typeTagMinUser typeTag = 8192 +) + +// Shape represents polygonal geometry in a flexible way. It is organized as a +// collection of edges that optionally defines an interior. All geometry +// represented by a given Shape must have the same dimension, which means that +// an Shape can represent either a set of points, a set of polylines, or a set +// of polygons. +// +// Shape is defined as an interface in order to give clients control over the +// underlying data representation. Sometimes an Shape does not have any data of +// its own, but instead wraps some other type. +// +// Shape operations are typically defined on a ShapeIndex rather than +// individual shapes. An ShapeIndex is simply a collection of Shapes, +// possibly of different dimensions (e.g. 10 points and 3 polygons), organized +// into a data structure for efficient edge access. +// +// The edges of a Shape are indexed by a contiguous range of edge IDs +// starting at 0. The edges are further subdivided into chains, where each +// chain consists of a sequence of edges connected end-to-end (a polyline). +// For example, a Shape representing two polylines AB and CDE would have +// three edges (AB, CD, DE) grouped into two chains: (AB) and (CD, DE). +// Similarly, an Shape representing 5 points would have 5 chains consisting +// of one edge each. +// +// Shape has methods that allow edges to be accessed either using the global +// numbering (edge ID) or within a particular chain. The global numbering is +// sufficient for most purposes, but the chain representation is useful for +// certain algorithms such as intersection (see BooleanOperation). +type Shape interface { + // NumEdges returns the number of edges in this shape. + NumEdges() int + + // Edge returns the edge for the given edge index. + Edge(i int) Edge + + // ReferencePoint returns an arbitrary reference point for the shape. (The + // containment boolean value must be false for shapes that do not have an interior.) + // + // This reference point may then be used to compute the containment of other + // points by counting edge crossings. + ReferencePoint() ReferencePoint + + // NumChains reports the number of contiguous edge chains in the shape. + // For example, a shape whose edges are [AB, BC, CD, AE, EF] would consist + // of two chains (AB,BC,CD and AE,EF). Every chain is assigned a chain Id + // numbered sequentially starting from zero. + // + // Note that it is always acceptable to implement this method by returning + // NumEdges, i.e. every chain consists of a single edge, but this may + // reduce the efficiency of some algorithms. + NumChains() int + + // Chain returns the range of edge IDs corresponding to the given edge chain. + // Edge chains must form contiguous, non-overlapping ranges that cover + // the entire range of edge IDs. This is spelled out more formally below: + // + // 0 <= i < NumChains() + // Chain(i).length > 0, for all i + // Chain(0).start == 0 + // Chain(i).start + Chain(i).length == Chain(i+1).start, for i < NumChains()-1 + // Chain(i).start + Chain(i).length == NumEdges(), for i == NumChains()-1 + Chain(chainID int) Chain + + // ChainEdgeReturns the edge at offset "offset" within edge chain "chainID". + // Equivalent to "shape.Edge(shape.Chain(chainID).start + offset)" + // but more efficient. + ChainEdge(chainID, offset int) Edge + + // ChainPosition finds the chain containing the given edge, and returns the + // position of that edge as a ChainPosition(chainID, offset) pair. + // + // shape.Chain(pos.chainID).start + pos.offset == edgeID + // shape.Chain(pos.chainID+1).start > edgeID + // + // where pos == shape.ChainPosition(edgeID). + ChainPosition(edgeID int) ChainPosition + + // Dimension returns the dimension of the geometry represented by this shape, + // either 0, 1 or 2 for point, polyline and polygon geometry respectively. + // + // 0 - Point geometry. Each point is represented as a degenerate edge. + // + // 1 - Polyline geometry. Polyline edges may be degenerate. A shape may + // represent any number of polylines. Polylines edges may intersect. + // + // 2 - Polygon geometry. Edges should be oriented such that the polygon + // interior is always on the left. In theory the edges may be returned + // in any order, but typically the edges are organized as a collection + // of edge chains where each chain represents one polygon loop. + // Polygons may have degeneracies (e.g., degenerate edges or sibling + // pairs consisting of an edge and its corresponding reversed edge). + // A polygon loop may also be full (containing all points on the + // sphere); by convention this is represented as a chain with no edges. + // (See laxPolygon for details.) + // + // This method allows degenerate geometry of different dimensions + // to be distinguished, e.g. it allows a point to be distinguished from a + // polyline or polygon that has been simplified to a single point. + Dimension() int + + // IsEmpty reports whether the Shape contains no points. (Note that the full + // polygon is represented as a chain with zero edges.) + IsEmpty() bool + + // IsFull reports whether the Shape contains all points on the sphere. + IsFull() bool + + // typeTag returns a value that can be used to identify the type of an + // encoded Shape. + typeTag() typeTag + + // We do not support implementations of this interface outside this package. + privateInterface() +} + +// defaultShapeIsEmpty reports whether this shape contains no points. +func defaultShapeIsEmpty(s Shape) bool { + return s.NumEdges() == 0 && (s.Dimension() != 2 || s.NumChains() == 0) +} + +// defaultShapeIsFull reports whether this shape contains all points on the sphere. +func defaultShapeIsFull(s Shape) bool { + return s.NumEdges() == 0 && s.Dimension() == 2 && s.NumChains() > 0 +} + +// A minimal check for types that should satisfy the Shape interface. +var ( + _ Shape = &Loop{} + _ Shape = &Polygon{} + _ Shape = &Polyline{} +) diff --git a/vendor/github.com/golang/geo/s2/shapeindex.go b/vendor/github.com/golang/geo/s2/shapeindex.go new file mode 100644 index 000000000..8da299d06 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/shapeindex.go @@ -0,0 +1,1507 @@ +// Copyright 2016 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + "sort" + "sync" + "sync/atomic" + + "github.com/golang/geo/r1" + "github.com/golang/geo/r2" +) + +// CellRelation describes the possible relationships between a target cell +// and the cells of the ShapeIndex. If the target is an index cell or is +// contained by an index cell, it is Indexed. If the target is subdivided +// into one or more index cells, it is Subdivided. Otherwise it is Disjoint. +type CellRelation int + +// The possible CellRelations for a ShapeIndex. +const ( + Indexed CellRelation = iota + Subdivided + Disjoint +) + +const ( + // cellPadding defines the total error when clipping an edge which comes + // from two sources: + // (1) Clipping the original spherical edge to a cube face (the face edge). + // The maximum error in this step is faceClipErrorUVCoord. + // (2) Clipping the face edge to the u- or v-coordinate of a cell boundary. + // The maximum error in this step is edgeClipErrorUVCoord. + // Finally, since we encounter the same errors when clipping query edges, we + // double the total error so that we only need to pad edges during indexing + // and not at query time. + cellPadding = 2.0 * (faceClipErrorUVCoord + edgeClipErrorUVCoord) + + // cellSizeToLongEdgeRatio defines the cell size relative to the length of an + // edge at which it is first considered to be long. Long edges do not + // contribute toward the decision to subdivide a cell further. For example, + // a value of 2.0 means that the cell must be at least twice the size of the + // edge in order for that edge to be counted. There are two reasons for not + // counting long edges: (1) such edges typically need to be propagated to + // several children, which increases time and memory costs without much benefit, + // and (2) in pathological cases, many long edges close together could force + // subdivision to continue all the way to the leaf cell level. + cellSizeToLongEdgeRatio = 1.0 +) + +// clippedShape represents the part of a shape that intersects a Cell. +// It consists of the set of edge IDs that intersect that cell and a boolean +// indicating whether the center of the cell is inside the shape (for shapes +// that have an interior). +// +// Note that the edges themselves are not clipped; we always use the original +// edges for intersection tests so that the results will be the same as the +// original shape. +type clippedShape struct { + // shapeID is the index of the shape this clipped shape is a part of. + shapeID int32 + + // containsCenter indicates if the center of the CellID this shape has been + // clipped to falls inside this shape. This is false for shapes that do not + // have an interior. + containsCenter bool + + // edges is the ordered set of ShapeIndex original edge IDs. Edges + // are stored in increasing order of edge ID. + edges []int +} + +// newClippedShape returns a new clipped shape for the given shapeID and number of expected edges. +func newClippedShape(id int32, numEdges int) *clippedShape { + return &clippedShape{ + shapeID: id, + edges: make([]int, numEdges), + } +} + +// numEdges returns the number of edges that intersect the CellID of the Cell this was clipped to. +func (c *clippedShape) numEdges() int { + return len(c.edges) +} + +// containsEdge reports if this clipped shape contains the given edge ID. +func (c *clippedShape) containsEdge(id int) bool { + // Linear search is fast because the number of edges per shape is typically + // very small (less than 10). + for _, e := range c.edges { + if e == id { + return true + } + } + return false +} + +// ShapeIndexCell stores the index contents for a particular CellID. +type ShapeIndexCell struct { + shapes []*clippedShape +} + +// NewShapeIndexCell creates a new cell that is sized to hold the given number of shapes. +func NewShapeIndexCell(numShapes int) *ShapeIndexCell { + return &ShapeIndexCell{ + shapes: make([]*clippedShape, numShapes), + } +} + +// numEdges reports the total number of edges in all clipped shapes in this cell. +func (s *ShapeIndexCell) numEdges() int { + var e int + for _, cs := range s.shapes { + e += cs.numEdges() + } + return e +} + +// add adds the given clipped shape to this index cell. +func (s *ShapeIndexCell) add(c *clippedShape) { + // C++ uses a set, so it's ordered and unique. We don't currently catch + // the case when a duplicate value is added. + s.shapes = append(s.shapes, c) +} + +// findByShapeID returns the clipped shape that contains the given shapeID, +// or nil if none of the clipped shapes contain it. +func (s *ShapeIndexCell) findByShapeID(shapeID int32) *clippedShape { + // Linear search is fine because the number of shapes per cell is typically + // very small (most often 1), and is large only for pathological inputs + // (e.g. very deeply nested loops). + for _, clipped := range s.shapes { + if clipped.shapeID == shapeID { + return clipped + } + } + return nil +} + +// faceEdge and clippedEdge store temporary edge data while the index is being +// updated. +// +// While it would be possible to combine all the edge information into one +// structure, there are two good reasons for separating it: +// +// - Memory usage. Separating the two means that we only need to +// store one copy of the per-face data no matter how many times an edge is +// subdivided, and it also lets us delay computing bounding boxes until +// they are needed for processing each face (when the dataset spans +// multiple faces). +// +// - Performance. UpdateEdges is significantly faster on large polygons when +// the data is separated, because it often only needs to access the data in +// clippedEdge and this data is cached more successfully. + +// faceEdge represents an edge that has been projected onto a given face, +type faceEdge struct { + shapeID int32 // The ID of shape that this edge belongs to + edgeID int // Edge ID within that shape + maxLevel int // Not desirable to subdivide this edge beyond this level + hasInterior bool // Belongs to a shape that has a dimension of 2 + a, b r2.Point // The edge endpoints, clipped to a given face + edge Edge // The original edge. +} + +// clippedEdge represents the portion of that edge that has been clipped to a given Cell. +type clippedEdge struct { + faceEdge *faceEdge // The original unclipped edge + bound r2.Rect // Bounding box for the clipped portion +} + +// ShapeIndexIteratorPos defines the set of possible iterator starting positions. By +// default iterators are unpositioned, since this avoids an extra seek in this +// situation where one of the seek methods (such as Locate) is immediately called. +type ShapeIndexIteratorPos int + +const ( + // IteratorBegin specifies the iterator should be positioned at the beginning of the index. + IteratorBegin ShapeIndexIteratorPos = iota + // IteratorEnd specifies the iterator should be positioned at the end of the index. + IteratorEnd +) + +// ShapeIndexIterator is an iterator that provides low-level access to +// the cells of the index. Cells are returned in increasing order of CellID. +// +// for it := index.Iterator(); !it.Done(); it.Next() { +// fmt.Print(it.CellID()) +// } +// +type ShapeIndexIterator struct { + index *ShapeIndex + position int + id CellID + cell *ShapeIndexCell +} + +// NewShapeIndexIterator creates a new iterator for the given index. If a starting +// position is specified, the iterator is positioned at the given spot. +func NewShapeIndexIterator(index *ShapeIndex, pos ...ShapeIndexIteratorPos) *ShapeIndexIterator { + s := &ShapeIndexIterator{ + index: index, + } + + if len(pos) > 0 { + if len(pos) > 1 { + panic("too many ShapeIndexIteratorPos arguments") + } + switch pos[0] { + case IteratorBegin: + s.Begin() + case IteratorEnd: + s.End() + default: + panic("unknown ShapeIndexIteratorPos value") + } + } + + return s +} + +// CellID returns the CellID of the current index cell. +// If s.Done() is true, a value larger than any valid CellID is returned. +func (s *ShapeIndexIterator) CellID() CellID { + return s.id +} + +// IndexCell returns the current index cell. +func (s *ShapeIndexIterator) IndexCell() *ShapeIndexCell { + // TODO(roberts): C++ has this call a virtual method to allow subclasses + // of ShapeIndexIterator to do other work before returning the cell. Do + // we need such a thing? + return s.cell +} + +// Center returns the Point at the center of the current position of the iterator. +func (s *ShapeIndexIterator) Center() Point { + return s.CellID().Point() +} + +// Begin positions the iterator at the beginning of the index. +func (s *ShapeIndexIterator) Begin() { + if !s.index.IsFresh() { + s.index.maybeApplyUpdates() + } + s.position = 0 + s.refresh() +} + +// Next positions the iterator at the next index cell. +func (s *ShapeIndexIterator) Next() { + s.position++ + s.refresh() +} + +// Prev advances the iterator to the previous cell in the index and returns true to +// indicate it was not yet at the beginning of the index. If the iterator is at the +// first cell the call does nothing and returns false. +func (s *ShapeIndexIterator) Prev() bool { + if s.position <= 0 { + return false + } + + s.position-- + s.refresh() + return true +} + +// End positions the iterator at the end of the index. +func (s *ShapeIndexIterator) End() { + s.position = len(s.index.cells) + s.refresh() +} + +// Done reports if the iterator is positioned at or after the last index cell. +func (s *ShapeIndexIterator) Done() bool { + return s.id == SentinelCellID +} + +// refresh updates the stored internal iterator values. +func (s *ShapeIndexIterator) refresh() { + if s.position < len(s.index.cells) { + s.id = s.index.cells[s.position] + s.cell = s.index.cellMap[s.CellID()] + } else { + s.id = SentinelCellID + s.cell = nil + } +} + +// seek positions the iterator at the first cell whose ID >= target, or at the +// end of the index if no such cell exists. +func (s *ShapeIndexIterator) seek(target CellID) { + s.position = sort.Search(len(s.index.cells), func(i int) bool { + return s.index.cells[i] >= target + }) + s.refresh() +} + +// LocatePoint positions the iterator at the cell that contains the given Point. +// If no such cell exists, the iterator position is unspecified, and false is returned. +// The cell at the matched position is guaranteed to contain all edges that might +// intersect the line segment between target and the cell's center. +func (s *ShapeIndexIterator) LocatePoint(p Point) bool { + // Let I = cellMap.LowerBound(T), where T is the leaf cell containing + // point P. Then if T is contained by an index cell, then the + // containing cell is either I or I'. We test for containment by comparing + // the ranges of leaf cells spanned by T, I, and I'. + target := cellIDFromPoint(p) + s.seek(target) + if !s.Done() && s.CellID().RangeMin() <= target { + return true + } + + if s.Prev() && s.CellID().RangeMax() >= target { + return true + } + return false +} + +// LocateCellID attempts to position the iterator at the first matching index cell +// in the index that has some relation to the given CellID. Let T be the target CellID. +// If T is contained by (or equal to) some index cell I, then the iterator is positioned +// at I and returns Indexed. Otherwise if T contains one or more (smaller) index cells, +// then the iterator is positioned at the first such cell I and return Subdivided. +// Otherwise Disjoint is returned and the iterator position is undefined. +func (s *ShapeIndexIterator) LocateCellID(target CellID) CellRelation { + // Let T be the target, let I = cellMap.LowerBound(T.RangeMin()), and + // let I' be the predecessor of I. If T contains any index cells, then T + // contains I. Similarly, if T is contained by an index cell, then the + // containing cell is either I or I'. We test for containment by comparing + // the ranges of leaf cells spanned by T, I, and I'. + s.seek(target.RangeMin()) + if !s.Done() { + if s.CellID() >= target && s.CellID().RangeMin() <= target { + return Indexed + } + if s.CellID() <= target.RangeMax() { + return Subdivided + } + } + if s.Prev() && s.CellID().RangeMax() >= target { + return Indexed + } + return Disjoint +} + +// tracker keeps track of which shapes in a given set contain a particular point +// (the focus). It provides an efficient way to move the focus from one point +// to another and incrementally update the set of shapes which contain it. We use +// this to compute which shapes contain the center of every CellID in the index, +// by advancing the focus from one cell center to the next. +// +// Initially the focus is at the start of the CellID space-filling curve. We then +// visit all the cells that are being added to the ShapeIndex in increasing order +// of CellID. For each cell, we draw two edges: one from the entry vertex to the +// center, and another from the center to the exit vertex (where entry and exit +// refer to the points where the space-filling curve enters and exits the cell). +// By counting edge crossings we can incrementally compute which shapes contain +// the cell center. Note that the same set of shapes will always contain the exit +// point of one cell and the entry point of the next cell in the index, because +// either (a) these two points are actually the same, or (b) the intervening +// cells in CellID order are all empty, and therefore there are no edge crossings +// if we follow this path from one cell to the other. +// +// In C++, this is S2ShapeIndex::InteriorTracker. +type tracker struct { + isActive bool + a Point + b Point + nextCellID CellID + crosser *EdgeCrosser + shapeIDs []int32 + + // Shape ids saved by saveAndClearStateBefore. The state is never saved + // recursively so we don't need to worry about maintaining a stack. + savedIDs []int32 +} + +// newTracker returns a new tracker with the appropriate defaults. +func newTracker() *tracker { + // As shapes are added, we compute which ones contain the start of the + // CellID space-filling curve by drawing an edge from OriginPoint to this + // point and counting how many shape edges cross this edge. + t := &tracker{ + isActive: false, + b: trackerOrigin(), + nextCellID: CellIDFromFace(0).ChildBeginAtLevel(maxLevel), + } + t.drawTo(Point{faceUVToXYZ(0, -1, -1).Normalize()}) // CellID curve start + + return t +} + +// trackerOrigin returns the initial focus point when the tracker is created +// (corresponding to the start of the CellID space-filling curve). +func trackerOrigin() Point { + // The start of the S2CellId space-filling curve. + return Point{faceUVToXYZ(0, -1, -1).Normalize()} +} + +// focus returns the current focus point of the tracker. +func (t *tracker) focus() Point { return t.b } + +// addShape adds a shape whose interior should be tracked. containsOrigin indicates +// whether the current focus point is inside the shape. Alternatively, if +// the focus point is in the process of being moved (via moveTo/drawTo), you +// can also specify containsOrigin at the old focus point and call testEdge +// for every edge of the shape that might cross the current drawTo line. +// This updates the state to correspond to the new focus point. +// +// This requires shape.HasInterior +func (t *tracker) addShape(shapeID int32, containsFocus bool) { + t.isActive = true + if containsFocus { + t.toggleShape(shapeID) + } +} + +// moveTo moves the focus of the tracker to the given point. This method should +// only be used when it is known that there are no edge crossings between the old +// and new focus locations; otherwise use drawTo. +func (t *tracker) moveTo(b Point) { t.b = b } + +// drawTo moves the focus of the tracker to the given point. After this method is +// called, testEdge should be called with all edges that may cross the line +// segment between the old and new focus locations. +func (t *tracker) drawTo(b Point) { + t.a = t.b + t.b = b + // TODO: the edge crosser may need an in-place Init method if this gets expensive + t.crosser = NewEdgeCrosser(t.a, t.b) +} + +// testEdge checks if the given edge crosses the current edge, and if so, then +// toggle the state of the given shapeID. +// This requires shape to have an interior. +func (t *tracker) testEdge(shapeID int32, edge Edge) { + if t.crosser.EdgeOrVertexCrossing(edge.V0, edge.V1) { + t.toggleShape(shapeID) + } +} + +// setNextCellID is used to indicate that the last argument to moveTo or drawTo +// was the entry vertex of the given CellID, i.e. the tracker is positioned at the +// start of this cell. By using this method together with atCellID, the caller +// can avoid calling moveTo in cases where the exit vertex of the previous cell +// is the same as the entry vertex of the current cell. +func (t *tracker) setNextCellID(nextCellID CellID) { + t.nextCellID = nextCellID.RangeMin() +} + +// atCellID reports if the focus is already at the entry vertex of the given +// CellID (provided that the caller calls setNextCellID as each cell is processed). +func (t *tracker) atCellID(cellid CellID) bool { + return cellid.RangeMin() == t.nextCellID +} + +// toggleShape adds or removes the given shapeID from the set of IDs it is tracking. +func (t *tracker) toggleShape(shapeID int32) { + // Most shapeIDs slices are small, so special case the common steps. + + // If there is nothing here, add it. + if len(t.shapeIDs) == 0 { + t.shapeIDs = append(t.shapeIDs, shapeID) + return + } + + // If it's the first element, drop it from the slice. + if t.shapeIDs[0] == shapeID { + t.shapeIDs = t.shapeIDs[1:] + return + } + + for i, s := range t.shapeIDs { + if s < shapeID { + continue + } + + // If it's in the set, cut it out. + if s == shapeID { + copy(t.shapeIDs[i:], t.shapeIDs[i+1:]) // overwrite the ith element + t.shapeIDs = t.shapeIDs[:len(t.shapeIDs)-1] + return + } + + // We've got to a point in the slice where we should be inserted. + // (the given shapeID is now less than the current positions id.) + t.shapeIDs = append(t.shapeIDs[0:i], + append([]int32{shapeID}, t.shapeIDs[i:len(t.shapeIDs)]...)...) + return + } + + // We got to the end and didn't find it, so add it to the list. + t.shapeIDs = append(t.shapeIDs, shapeID) +} + +// saveAndClearStateBefore makes an internal copy of the state for shape ids below +// the given limit, and then clear the state for those shapes. This is used during +// incremental updates to track the state of added and removed shapes separately. +func (t *tracker) saveAndClearStateBefore(limitShapeID int32) { + limit := t.lowerBound(limitShapeID) + t.savedIDs = append([]int32(nil), t.shapeIDs[:limit]...) + t.shapeIDs = t.shapeIDs[limit:] +} + +// restoreStateBefore restores the state previously saved by saveAndClearStateBefore. +// This only affects the state for shapeIDs below "limitShapeID". +func (t *tracker) restoreStateBefore(limitShapeID int32) { + limit := t.lowerBound(limitShapeID) + t.shapeIDs = append(append([]int32(nil), t.savedIDs...), t.shapeIDs[limit:]...) + t.savedIDs = nil +} + +// lowerBound returns the shapeID of the first entry x where x >= shapeID. +func (t *tracker) lowerBound(shapeID int32) int32 { + panic("not implemented") +} + +// removedShape represents a set of edges from the given shape that is queued for removal. +type removedShape struct { + shapeID int32 + hasInterior bool + containsTrackerOrigin bool + edges []Edge +} + +// There are three basic states the index can be in. +const ( + stale int32 = iota // There are pending updates. + updating // Updates are currently being applied. + fresh // There are no pending updates. +) + +// ShapeIndex indexes a set of Shapes, where a Shape is some collection of edges +// that optionally defines an interior. It can be used to represent a set of +// points, a set of polylines, or a set of polygons. For Shapes that have +// interiors, the index makes it very fast to determine which Shape(s) contain +// a given point or region. +// +// The index can be updated incrementally by adding or removing shapes. It is +// designed to handle up to hundreds of millions of edges. All data structures +// are designed to be small, so the index is compact; generally it is smaller +// than the underlying data being indexed. The index is also fast to construct. +// +// Polygon, Loop, and Polyline implement Shape which allows these objects to +// be indexed easily. You can find useful query methods in CrossingEdgeQuery +// and ClosestEdgeQuery (Not yet implemented in Go). +// +// Example showing how to build an index of Polylines: +// +// index := NewShapeIndex() +// for _, polyline := range polylines { +// index.Add(polyline); +// } +// // Now you can use a CrossingEdgeQuery or ClosestEdgeQuery here. +// +type ShapeIndex struct { + // shapes is a map of shape ID to shape. + shapes map[int32]Shape + + // The maximum number of edges per cell. + // TODO(roberts): Update the comments when the usage of this is implemented. + maxEdgesPerCell int + + // nextID tracks the next ID to hand out. IDs are not reused when shapes + // are removed from the index. + nextID int32 + + // cellMap is a map from CellID to the set of clipped shapes that intersect that + // cell. The cell IDs cover a set of non-overlapping regions on the sphere. + // In C++, this is a BTree, so the cells are ordered naturally by the data structure. + cellMap map[CellID]*ShapeIndexCell + // Track the ordered list of cell IDs. + cells []CellID + + // The current status of the index; accessed atomically. + status int32 + + // Additions and removals are queued and processed on the first subsequent + // query. There are several reasons to do this: + // + // - It is significantly more efficient to process updates in batches if + // the amount of entities added grows. + // - Often the index will never be queried, in which case we can save both + // the time and memory required to build it. Examples: + // + Loops that are created simply to pass to an Polygon. (We don't + // need the Loop index, because Polygon builds its own index.) + // + Applications that load a database of geometry and then query only + // a small fraction of it. + // + // The main drawback is that we need to go to some extra work to ensure that + // some methods are still thread-safe. Note that the goal is *not* to + // make this thread-safe in general, but simply to hide the fact that + // we defer some of the indexing work until query time. + // + // This mutex protects all of following fields in the index. + mu sync.RWMutex + + // pendingAdditionsPos is the index of the first entry that has not been processed + // via applyUpdatesInternal. + pendingAdditionsPos int32 + + // The set of shapes that have been queued for removal but not processed yet by + // applyUpdatesInternal. + pendingRemovals []*removedShape +} + +// NewShapeIndex creates a new ShapeIndex. +func NewShapeIndex() *ShapeIndex { + return &ShapeIndex{ + maxEdgesPerCell: 10, + shapes: make(map[int32]Shape), + cellMap: make(map[CellID]*ShapeIndexCell), + cells: nil, + status: fresh, + } +} + +// Iterator returns an iterator for this index. +func (s *ShapeIndex) Iterator() *ShapeIndexIterator { + s.maybeApplyUpdates() + return NewShapeIndexIterator(s, IteratorBegin) +} + +// Begin positions the iterator at the first cell in the index. +func (s *ShapeIndex) Begin() *ShapeIndexIterator { + s.maybeApplyUpdates() + return NewShapeIndexIterator(s, IteratorBegin) +} + +// End positions the iterator at the last cell in the index. +func (s *ShapeIndex) End() *ShapeIndexIterator { + // TODO(roberts): It's possible that updates could happen to the index between + // the time this is called and the time the iterators position is used and this + // will be invalid or not the end. For now, things will be undefined if this + // happens. See about referencing the IsFresh to guard for this in the future. + s.maybeApplyUpdates() + return NewShapeIndexIterator(s, IteratorEnd) +} + +// Len reports the number of Shapes in this index. +func (s *ShapeIndex) Len() int { + return len(s.shapes) +} + +// Reset resets the index to its original state. +func (s *ShapeIndex) Reset() { + s.shapes = make(map[int32]Shape) + s.nextID = 0 + s.cellMap = make(map[CellID]*ShapeIndexCell) + s.cells = nil + atomic.StoreInt32(&s.status, fresh) +} + +// NumEdges returns the number of edges in this index. +func (s *ShapeIndex) NumEdges() int { + numEdges := 0 + for _, shape := range s.shapes { + numEdges += shape.NumEdges() + } + return numEdges +} + +// NumEdgesUpTo returns the number of edges in the given index, up to the given +// limit. If the limit is encountered, the current running total is returned, +// which may be more than the limit. +func (s *ShapeIndex) NumEdgesUpTo(limit int) int { + var numEdges int + // We choose to iterate over the shapes in order to match the counting + // up behavior in C++ and for test compatibility instead of using a + // more idiomatic range over the shape map. + for i := int32(0); i <= s.nextID; i++ { + s := s.Shape(i) + if s == nil { + continue + } + numEdges += s.NumEdges() + if numEdges >= limit { + break + } + } + + return numEdges +} + +// Shape returns the shape with the given ID, or nil if the shape has been removed from the index. +func (s *ShapeIndex) Shape(id int32) Shape { return s.shapes[id] } + +// idForShape returns the id of the given shape in this index, or -1 if it is +// not in the index. +// +// TODO(roberts): Need to figure out an appropriate way to expose this on a Shape. +// C++ allows a given S2 type (Loop, Polygon, etc) to be part of multiple indexes. +// By having each type extend S2Shape which has an id element, they all inherit their +// own id field rather than having to track it themselves. +func (s *ShapeIndex) idForShape(shape Shape) int32 { + for k, v := range s.shapes { + if v == shape { + return k + } + } + return -1 +} + +// Add adds the given shape to the index and returns the assigned ID.. +func (s *ShapeIndex) Add(shape Shape) int32 { + s.shapes[s.nextID] = shape + s.nextID++ + atomic.StoreInt32(&s.status, stale) + return s.nextID - 1 +} + +// Remove removes the given shape from the index. +func (s *ShapeIndex) Remove(shape Shape) { + // The index updates itself lazily because it is much more efficient to + // process additions and removals in batches. + id := s.idForShape(shape) + + // If the shape wasn't found, it's already been removed or was not in the index. + if s.shapes[id] == nil { + return + } + + // Remove the shape from the shapes map. + delete(s.shapes, id) + + // We are removing a shape that has not yet been added to the index, + // so there is nothing else to do. + if id >= s.pendingAdditionsPos { + return + } + + numEdges := shape.NumEdges() + removed := &removedShape{ + shapeID: id, + hasInterior: shape.Dimension() == 2, + containsTrackerOrigin: shape.ReferencePoint().Contained, + edges: make([]Edge, numEdges), + } + + for e := 0; e < numEdges; e++ { + removed.edges[e] = shape.Edge(e) + } + + s.pendingRemovals = append(s.pendingRemovals, removed) + atomic.StoreInt32(&s.status, stale) +} + +// IsFresh reports if there are no pending updates that need to be applied. +// This can be useful to avoid building the index unnecessarily, or for +// choosing between two different algorithms depending on whether the index +// is available. +// +// The returned index status may be slightly out of date if the index was +// built in a different thread. This is fine for the intended use (as an +// efficiency hint), but it should not be used by internal methods. +func (s *ShapeIndex) IsFresh() bool { + return atomic.LoadInt32(&s.status) == fresh +} + +// isFirstUpdate reports if this is the first update to the index. +func (s *ShapeIndex) isFirstUpdate() bool { + // Note that it is not sufficient to check whether cellMap is empty, since + // entries are added to it during the update process. + return s.pendingAdditionsPos == 0 +} + +// isShapeBeingRemoved reports if the shape with the given ID is currently slated for removal. +func (s *ShapeIndex) isShapeBeingRemoved(shapeID int32) bool { + // All shape ids being removed fall below the index position of shapes being added. + return shapeID < s.pendingAdditionsPos +} + +// maybeApplyUpdates checks if the index pieces have changed, and if so, applies pending updates. +func (s *ShapeIndex) maybeApplyUpdates() { + // TODO(roberts): To avoid acquiring and releasing the mutex on every + // query, we should use atomic operations when testing whether the status + // is fresh and when updating the status to be fresh. This guarantees + // that any thread that sees a status of fresh will also see the + // corresponding index updates. + if atomic.LoadInt32(&s.status) != fresh { + s.mu.Lock() + s.applyUpdatesInternal() + atomic.StoreInt32(&s.status, fresh) + s.mu.Unlock() + } +} + +// applyUpdatesInternal does the actual work of updating the index by applying all +// pending additions and removals. It does *not* update the indexes status. +func (s *ShapeIndex) applyUpdatesInternal() { + // TODO(roberts): Building the index can use up to 20x as much memory per + // edge as the final index memory size. If this causes issues, add in + // batched updating to limit the amount of items per batch to a + // configurable memory footprint overhead. + t := newTracker() + + // allEdges maps a Face to a collection of faceEdges. + allEdges := make([][]faceEdge, 6) + + for _, p := range s.pendingRemovals { + s.removeShapeInternal(p, allEdges, t) + } + + for id := s.pendingAdditionsPos; id < int32(len(s.shapes)); id++ { + s.addShapeInternal(id, allEdges, t) + } + + for face := 0; face < 6; face++ { + s.updateFaceEdges(face, allEdges[face], t) + } + + s.pendingRemovals = s.pendingRemovals[:0] + s.pendingAdditionsPos = int32(len(s.shapes)) + // It is the caller's responsibility to update the index status. +} + +// addShapeInternal clips all edges of the given shape to the six cube faces, +// adds the clipped edges to the set of allEdges, and starts tracking its +// interior if necessary. +func (s *ShapeIndex) addShapeInternal(shapeID int32, allEdges [][]faceEdge, t *tracker) { + shape, ok := s.shapes[shapeID] + if !ok { + // This shape has already been removed. + return + } + + faceEdge := faceEdge{ + shapeID: shapeID, + hasInterior: shape.Dimension() == 2, + } + + if faceEdge.hasInterior { + t.addShape(shapeID, containsBruteForce(shape, t.focus())) + } + + numEdges := shape.NumEdges() + for e := 0; e < numEdges; e++ { + edge := shape.Edge(e) + + faceEdge.edgeID = e + faceEdge.edge = edge + faceEdge.maxLevel = maxLevelForEdge(edge) + s.addFaceEdge(faceEdge, allEdges) + } +} + +// addFaceEdge adds the given faceEdge into the collection of all edges. +func (s *ShapeIndex) addFaceEdge(fe faceEdge, allEdges [][]faceEdge) { + aFace := face(fe.edge.V0.Vector) + // See if both endpoints are on the same face, and are far enough from + // the edge of the face that they don't intersect any (padded) adjacent face. + if aFace == face(fe.edge.V1.Vector) { + x, y := validFaceXYZToUV(aFace, fe.edge.V0.Vector) + fe.a = r2.Point{x, y} + x, y = validFaceXYZToUV(aFace, fe.edge.V1.Vector) + fe.b = r2.Point{x, y} + + maxUV := 1 - cellPadding + if math.Abs(fe.a.X) <= maxUV && math.Abs(fe.a.Y) <= maxUV && + math.Abs(fe.b.X) <= maxUV && math.Abs(fe.b.Y) <= maxUV { + allEdges[aFace] = append(allEdges[aFace], fe) + return + } + } + + // Otherwise, we simply clip the edge to all six faces. + for face := 0; face < 6; face++ { + if aClip, bClip, intersects := ClipToPaddedFace(fe.edge.V0, fe.edge.V1, face, cellPadding); intersects { + fe.a = aClip + fe.b = bClip + allEdges[face] = append(allEdges[face], fe) + } + } +} + +// updateFaceEdges adds or removes the various edges from the index. +// An edge is added if shapes[id] is not nil, and removed otherwise. +func (s *ShapeIndex) updateFaceEdges(face int, faceEdges []faceEdge, t *tracker) { + numEdges := len(faceEdges) + if numEdges == 0 && len(t.shapeIDs) == 0 { + return + } + + // Create the initial clippedEdge for each faceEdge. Additional clipped + // edges are created when edges are split between child cells. We create + // two arrays, one containing the edge data and another containing pointers + // to those edges, so that during the recursion we only need to copy + // pointers in order to propagate an edge to the correct child. + clippedEdges := make([]*clippedEdge, numEdges) + bound := r2.EmptyRect() + for e := 0; e < numEdges; e++ { + clipped := &clippedEdge{ + faceEdge: &faceEdges[e], + } + clipped.bound = r2.RectFromPoints(faceEdges[e].a, faceEdges[e].b) + clippedEdges[e] = clipped + bound = bound.AddRect(clipped.bound) + } + + // Construct the initial face cell containing all the edges, and then update + // all the edges in the index recursively. + faceID := CellIDFromFace(face) + pcell := PaddedCellFromCellID(faceID, cellPadding) + + disjointFromIndex := s.isFirstUpdate() + if numEdges > 0 { + shrunkID := s.shrinkToFit(pcell, bound) + if shrunkID != pcell.id { + // All the edges are contained by some descendant of the face cell. We + // can save a lot of work by starting directly with that cell, but if we + // are in the interior of at least one shape then we need to create + // index entries for the cells we are skipping over. + s.skipCellRange(faceID.RangeMin(), shrunkID.RangeMin(), t, disjointFromIndex) + pcell = PaddedCellFromCellID(shrunkID, cellPadding) + s.updateEdges(pcell, clippedEdges, t, disjointFromIndex) + s.skipCellRange(shrunkID.RangeMax().Next(), faceID.RangeMax().Next(), t, disjointFromIndex) + return + } + } + + // Otherwise (no edges, or no shrinking is possible), subdivide normally. + s.updateEdges(pcell, clippedEdges, t, disjointFromIndex) +} + +// shrinkToFit shrinks the PaddedCell to fit within the given bounds. +func (s *ShapeIndex) shrinkToFit(pcell *PaddedCell, bound r2.Rect) CellID { + shrunkID := pcell.ShrinkToFit(bound) + + if !s.isFirstUpdate() && shrunkID != pcell.CellID() { + // Don't shrink any smaller than the existing index cells, since we need + // to combine the new edges with those cells. + iter := s.Iterator() + if iter.LocateCellID(shrunkID) == Indexed { + shrunkID = iter.CellID() + } + } + return shrunkID +} + +// skipCellRange skips over the cells in the given range, creating index cells if we are +// currently in the interior of at least one shape. +func (s *ShapeIndex) skipCellRange(begin, end CellID, t *tracker, disjointFromIndex bool) { + // If we aren't in the interior of a shape, then skipping over cells is easy. + if len(t.shapeIDs) == 0 { + return + } + + // Otherwise generate the list of cell ids that we need to visit, and create + // an index entry for each one. + skipped := CellUnionFromRange(begin, end) + for _, cell := range skipped { + var clippedEdges []*clippedEdge + s.updateEdges(PaddedCellFromCellID(cell, cellPadding), clippedEdges, t, disjointFromIndex) + } +} + +// updateEdges adds or removes the given edges whose bounding boxes intersect a +// given cell. disjointFromIndex is an optimization hint indicating that cellMap +// does not contain any entries that overlap the given cell. +func (s *ShapeIndex) updateEdges(pcell *PaddedCell, edges []*clippedEdge, t *tracker, disjointFromIndex bool) { + // This function is recursive with a maximum recursion depth of 30 (maxLevel). + + // Incremental updates are handled as follows. All edges being added or + // removed are combined together in edges, and all shapes with interiors + // are tracked using tracker. We subdivide recursively as usual until we + // encounter an existing index cell. At this point we absorb the index + // cell as follows: + // + // - Edges and shapes that are being removed are deleted from edges and + // tracker. + // - All remaining edges and shapes from the index cell are added to + // edges and tracker. + // - Continue subdividing recursively, creating new index cells as needed. + // - When the recursion gets back to the cell that was absorbed, we + // restore edges and tracker to their previous state. + // + // Note that the only reason that we include removed shapes in the recursive + // subdivision process is so that we can find all of the index cells that + // contain those shapes efficiently, without maintaining an explicit list of + // index cells for each shape (which would be expensive in terms of memory). + indexCellAbsorbed := false + if !disjointFromIndex { + // There may be existing index cells contained inside pcell. If we + // encounter such a cell, we need to combine the edges being updated with + // the existing cell contents by absorbing the cell. + iter := s.Iterator() + r := iter.LocateCellID(pcell.id) + if r == Disjoint { + disjointFromIndex = true + } else if r == Indexed { + // Absorb the index cell by transferring its contents to edges and + // deleting it. We also start tracking the interior of any new shapes. + s.absorbIndexCell(pcell, iter, edges, t) + indexCellAbsorbed = true + disjointFromIndex = true + } else { + // DCHECK_EQ(SUBDIVIDED, r) + } + } + + // If there are existing index cells below us, then we need to keep + // subdividing so that we can merge with those cells. Otherwise, + // makeIndexCell checks if the number of edges is small enough, and creates + // an index cell if possible (returning true when it does so). + if !disjointFromIndex || !s.makeIndexCell(pcell, edges, t) { + // TODO(roberts): If it turns out to have memory problems when there + // are 10M+ edges in the index, look into pre-allocating space so we + // are not always appending. + childEdges := [2][2][]*clippedEdge{} // [i][j] + + // Compute the middle of the padded cell, defined as the rectangle in + // (u,v)-space that belongs to all four (padded) children. By comparing + // against the four boundaries of middle we can determine which children + // each edge needs to be propagated to. + middle := pcell.Middle() + + // Build up a vector edges to be passed to each child cell. The (i,j) + // directions are left (i=0), right (i=1), lower (j=0), and upper (j=1). + // Note that the vast majority of edges are propagated to a single child. + for _, edge := range edges { + if edge.bound.X.Hi <= middle.X.Lo { + // Edge is entirely contained in the two left children. + a, b := s.clipVAxis(edge, middle.Y) + if a != nil { + childEdges[0][0] = append(childEdges[0][0], a) + } + if b != nil { + childEdges[0][1] = append(childEdges[0][1], b) + } + } else if edge.bound.X.Lo >= middle.X.Hi { + // Edge is entirely contained in the two right children. + a, b := s.clipVAxis(edge, middle.Y) + if a != nil { + childEdges[1][0] = append(childEdges[1][0], a) + } + if b != nil { + childEdges[1][1] = append(childEdges[1][1], b) + } + } else if edge.bound.Y.Hi <= middle.Y.Lo { + // Edge is entirely contained in the two lower children. + if a := s.clipUBound(edge, 1, middle.X.Hi); a != nil { + childEdges[0][0] = append(childEdges[0][0], a) + } + if b := s.clipUBound(edge, 0, middle.X.Lo); b != nil { + childEdges[1][0] = append(childEdges[1][0], b) + } + } else if edge.bound.Y.Lo >= middle.Y.Hi { + // Edge is entirely contained in the two upper children. + if a := s.clipUBound(edge, 1, middle.X.Hi); a != nil { + childEdges[0][1] = append(childEdges[0][1], a) + } + if b := s.clipUBound(edge, 0, middle.X.Lo); b != nil { + childEdges[1][1] = append(childEdges[1][1], b) + } + } else { + // The edge bound spans all four children. The edge + // itself intersects either three or four padded children. + left := s.clipUBound(edge, 1, middle.X.Hi) + a, b := s.clipVAxis(left, middle.Y) + if a != nil { + childEdges[0][0] = append(childEdges[0][0], a) + } + if b != nil { + childEdges[0][1] = append(childEdges[0][1], b) + } + right := s.clipUBound(edge, 0, middle.X.Lo) + a, b = s.clipVAxis(right, middle.Y) + if a != nil { + childEdges[1][0] = append(childEdges[1][0], a) + } + if b != nil { + childEdges[1][1] = append(childEdges[1][1], b) + } + } + } + + // Now recursively update the edges in each child. We call the children in + // increasing order of CellID so that when the index is first constructed, + // all insertions into cellMap are at the end (which is much faster). + for pos := 0; pos < 4; pos++ { + i, j := pcell.ChildIJ(pos) + if len(childEdges[i][j]) > 0 || len(t.shapeIDs) > 0 { + s.updateEdges(PaddedCellFromParentIJ(pcell, i, j), childEdges[i][j], + t, disjointFromIndex) + } + } + } + + if indexCellAbsorbed { + // Restore the state for any edges being removed that we are tracking. + t.restoreStateBefore(s.pendingAdditionsPos) + } +} + +// makeIndexCell builds an indexCell from the given padded cell and set of edges and adds +// it to the index. If the cell or edges are empty, no cell is added. +func (s *ShapeIndex) makeIndexCell(p *PaddedCell, edges []*clippedEdge, t *tracker) bool { + // If the cell is empty, no index cell is needed. (In most cases this + // situation is detected before we get to this point, but this can happen + // when all shapes in a cell are removed.) + if len(edges) == 0 && len(t.shapeIDs) == 0 { + return true + } + + // Count the number of edges that have not reached their maximum level yet. + // Return false if there are too many such edges. + count := 0 + for _, ce := range edges { + if p.Level() < ce.faceEdge.maxLevel { + count++ + } + + if count > s.maxEdgesPerCell { + return false + } + } + + // Possible optimization: Continue subdividing as long as exactly one child + // of the padded cell intersects the given edges. This can be done by finding + // the bounding box of all the edges and calling ShrinkToFit: + // + // cellID = p.ShrinkToFit(RectBound(edges)); + // + // Currently this is not beneficial; it slows down construction by 4-25% + // (mainly computing the union of the bounding rectangles) and also slows + // down queries (since more recursive clipping is required to get down to + // the level of a spatial index cell). But it may be worth trying again + // once containsCenter is computed and all algorithms are modified to + // take advantage of it. + + // We update the InteriorTracker as follows. For every Cell in the index + // we construct two edges: one edge from entry vertex of the cell to its + // center, and one from the cell center to its exit vertex. Here entry + // and exit refer the CellID ordering, i.e. the order in which points + // are encountered along the 2 space-filling curve. The exit vertex then + // becomes the entry vertex for the next cell in the index, unless there are + // one or more empty intervening cells, in which case the InteriorTracker + // state is unchanged because the intervening cells have no edges. + + // Shift the InteriorTracker focus point to the center of the current cell. + if t.isActive && len(edges) != 0 { + if !t.atCellID(p.id) { + t.moveTo(p.EntryVertex()) + } + t.drawTo(p.Center()) + s.testAllEdges(edges, t) + } + + // Allocate and fill a new index cell. To get the total number of shapes we + // need to merge the shapes associated with the intersecting edges together + // with the shapes that happen to contain the cell center. + cshapeIDs := t.shapeIDs + numShapes := s.countShapes(edges, cshapeIDs) + cell := NewShapeIndexCell(numShapes) + + // To fill the index cell we merge the two sources of shapes: edge shapes + // (those that have at least one edge that intersects this cell), and + // containing shapes (those that contain the cell center). We keep track + // of the index of the next intersecting edge and the next containing shape + // as we go along. Both sets of shape ids are already sorted. + eNext := 0 + cNextIdx := 0 + for i := 0; i < numShapes; i++ { + var clipped *clippedShape + // advance to next value base + i + eshapeID := int32(s.Len()) + cshapeID := eshapeID // Sentinels + + if eNext != len(edges) { + eshapeID = edges[eNext].faceEdge.shapeID + } + if cNextIdx < len(cshapeIDs) { + cshapeID = cshapeIDs[cNextIdx] + } + eBegin := eNext + if cshapeID < eshapeID { + // The entire cell is in the shape interior. + clipped = newClippedShape(cshapeID, 0) + clipped.containsCenter = true + cNextIdx++ + } else { + // Count the number of edges for this shape and allocate space for them. + for eNext < len(edges) && edges[eNext].faceEdge.shapeID == eshapeID { + eNext++ + } + clipped = newClippedShape(eshapeID, eNext-eBegin) + for e := eBegin; e < eNext; e++ { + clipped.edges[e-eBegin] = edges[e].faceEdge.edgeID + } + if cshapeID == eshapeID { + clipped.containsCenter = true + cNextIdx++ + } + } + cell.shapes[i] = clipped + } + + // Add this cell to the map. + s.cellMap[p.id] = cell + s.cells = append(s.cells, p.id) + + // Shift the tracker focus point to the exit vertex of this cell. + if t.isActive && len(edges) != 0 { + t.drawTo(p.ExitVertex()) + s.testAllEdges(edges, t) + t.setNextCellID(p.id.Next()) + } + return true +} + +// updateBound updates the specified endpoint of the given clipped edge and returns the +// resulting clipped edge. +func (s *ShapeIndex) updateBound(edge *clippedEdge, uEnd int, u float64, vEnd int, v float64) *clippedEdge { + c := &clippedEdge{faceEdge: edge.faceEdge} + if uEnd == 0 { + c.bound.X.Lo = u + c.bound.X.Hi = edge.bound.X.Hi + } else { + c.bound.X.Lo = edge.bound.X.Lo + c.bound.X.Hi = u + } + + if vEnd == 0 { + c.bound.Y.Lo = v + c.bound.Y.Hi = edge.bound.Y.Hi + } else { + c.bound.Y.Lo = edge.bound.Y.Lo + c.bound.Y.Hi = v + } + + return c +} + +// clipUBound clips the given endpoint (lo=0, hi=1) of the u-axis so that +// it does not extend past the given value of the given edge. +func (s *ShapeIndex) clipUBound(edge *clippedEdge, uEnd int, u float64) *clippedEdge { + // First check whether the edge actually requires any clipping. (Sometimes + // this method is called when clipping is not necessary, e.g. when one edge + // endpoint is in the overlap area between two padded child cells.) + if uEnd == 0 { + if edge.bound.X.Lo >= u { + return edge + } + } else { + if edge.bound.X.Hi <= u { + return edge + } + } + // We interpolate the new v-value from the endpoints of the original edge. + // This has two advantages: (1) we don't need to store the clipped endpoints + // at all, just their bounding box; and (2) it avoids the accumulation of + // roundoff errors due to repeated interpolations. The result needs to be + // clamped to ensure that it is in the appropriate range. + e := edge.faceEdge + v := edge.bound.Y.ClampPoint(interpolateFloat64(u, e.a.X, e.b.X, e.a.Y, e.b.Y)) + + // Determine which endpoint of the v-axis bound to update. If the edge + // slope is positive we update the same endpoint, otherwise we update the + // opposite endpoint. + var vEnd int + positiveSlope := (e.a.X > e.b.X) == (e.a.Y > e.b.Y) + if (uEnd == 1) == positiveSlope { + vEnd = 1 + } + return s.updateBound(edge, uEnd, u, vEnd, v) +} + +// clipVBound clips the given endpoint (lo=0, hi=1) of the v-axis so that +// it does not extend past the given value of the given edge. +func (s *ShapeIndex) clipVBound(edge *clippedEdge, vEnd int, v float64) *clippedEdge { + if vEnd == 0 { + if edge.bound.Y.Lo >= v { + return edge + } + } else { + if edge.bound.Y.Hi <= v { + return edge + } + } + + // We interpolate the new v-value from the endpoints of the original edge. + // This has two advantages: (1) we don't need to store the clipped endpoints + // at all, just their bounding box; and (2) it avoids the accumulation of + // roundoff errors due to repeated interpolations. The result needs to be + // clamped to ensure that it is in the appropriate range. + e := edge.faceEdge + u := edge.bound.X.ClampPoint(interpolateFloat64(v, e.a.Y, e.b.Y, e.a.X, e.b.X)) + + // Determine which endpoint of the v-axis bound to update. If the edge + // slope is positive we update the same endpoint, otherwise we update the + // opposite endpoint. + var uEnd int + positiveSlope := (e.a.X > e.b.X) == (e.a.Y > e.b.Y) + if (vEnd == 1) == positiveSlope { + uEnd = 1 + } + return s.updateBound(edge, uEnd, u, vEnd, v) +} + +// cliupVAxis returns the given edge clipped to within the boundaries of the middle +// interval along the v-axis, and adds the result to its children. +func (s *ShapeIndex) clipVAxis(edge *clippedEdge, middle r1.Interval) (a, b *clippedEdge) { + if edge.bound.Y.Hi <= middle.Lo { + // Edge is entirely contained in the lower child. + return edge, nil + } else if edge.bound.Y.Lo >= middle.Hi { + // Edge is entirely contained in the upper child. + return nil, edge + } + // The edge bound spans both children. + return s.clipVBound(edge, 1, middle.Hi), s.clipVBound(edge, 0, middle.Lo) +} + +// absorbIndexCell absorbs an index cell by transferring its contents to edges +// and/or "tracker", and then delete this cell from the index. If edges includes +// any edges that are being removed, this method also updates their +// InteriorTracker state to correspond to the exit vertex of this cell. +func (s *ShapeIndex) absorbIndexCell(p *PaddedCell, iter *ShapeIndexIterator, edges []*clippedEdge, t *tracker) { + // When we absorb a cell, we erase all the edges that are being removed. + // However when we are finished with this cell, we want to restore the state + // of those edges (since that is how we find all the index cells that need + // to be updated). The edges themselves are restored automatically when + // UpdateEdges returns from its recursive call, but the InteriorTracker + // state needs to be restored explicitly. + // + // Here we first update the InteriorTracker state for removed edges to + // correspond to the exit vertex of this cell, and then save the + // InteriorTracker state. This state will be restored by UpdateEdges when + // it is finished processing the contents of this cell. + if t.isActive && len(edges) != 0 && s.isShapeBeingRemoved(edges[0].faceEdge.shapeID) { + // We probably need to update the tracker. ("Probably" because + // it's possible that all shapes being removed do not have interiors.) + if !t.atCellID(p.id) { + t.moveTo(p.EntryVertex()) + } + t.drawTo(p.ExitVertex()) + t.setNextCellID(p.id.Next()) + for _, edge := range edges { + fe := edge.faceEdge + if !s.isShapeBeingRemoved(fe.shapeID) { + break // All shapes being removed come first. + } + if fe.hasInterior { + t.testEdge(fe.shapeID, fe.edge) + } + } + } + + // Save the state of the edges being removed, so that it can be restored + // when we are finished processing this cell and its children. We don't + // need to save the state of the edges being added because they aren't being + // removed from "edges" and will therefore be updated normally as we visit + // this cell and its children. + t.saveAndClearStateBefore(s.pendingAdditionsPos) + + // Create a faceEdge for each edge in this cell that isn't being removed. + var faceEdges []*faceEdge + trackerMoved := false + + cell := iter.IndexCell() + for _, clipped := range cell.shapes { + shapeID := clipped.shapeID + shape := s.Shape(shapeID) + if shape == nil { + continue // This shape is being removed. + } + + numClipped := clipped.numEdges() + + // If this shape has an interior, start tracking whether we are inside the + // shape. updateEdges wants to know whether the entry vertex of this + // cell is inside the shape, but we only know whether the center of the + // cell is inside the shape, so we need to test all the edges against the + // line segment from the cell center to the entry vertex. + edge := &faceEdge{ + shapeID: shapeID, + hasInterior: shape.Dimension() == 2, + } + + if edge.hasInterior { + t.addShape(shapeID, clipped.containsCenter) + // There might not be any edges in this entire cell (i.e., it might be + // in the interior of all shapes), so we delay updating the tracker + // until we see the first edge. + if !trackerMoved && numClipped > 0 { + t.moveTo(p.Center()) + t.drawTo(p.EntryVertex()) + t.setNextCellID(p.id) + trackerMoved = true + } + } + for i := 0; i < numClipped; i++ { + edgeID := clipped.edges[i] + edge.edgeID = edgeID + edge.edge = shape.Edge(edgeID) + edge.maxLevel = maxLevelForEdge(edge.edge) + if edge.hasInterior { + t.testEdge(shapeID, edge.edge) + } + var ok bool + edge.a, edge.b, ok = ClipToPaddedFace(edge.edge.V0, edge.edge.V1, p.id.Face(), cellPadding) + if !ok { + panic("invariant failure in ShapeIndex") + } + faceEdges = append(faceEdges, edge) + } + } + // Now create a clippedEdge for each faceEdge, and put them in "new_edges". + var newEdges []*clippedEdge + for _, faceEdge := range faceEdges { + clipped := &clippedEdge{ + faceEdge: faceEdge, + bound: clippedEdgeBound(faceEdge.a, faceEdge.b, p.bound), + } + newEdges = append(newEdges, clipped) + } + + // Discard any edges from "edges" that are being removed, and append the + // remainder to "newEdges" (This keeps the edges sorted by shape id.) + for i, clipped := range edges { + if !s.isShapeBeingRemoved(clipped.faceEdge.shapeID) { + newEdges = append(newEdges, edges[i:]...) + break + } + } + + // Update the edge list and delete this cell from the index. + edges, newEdges = newEdges, edges + delete(s.cellMap, p.id) + // TODO(roberts): delete from s.Cells +} + +// testAllEdges calls the trackers testEdge on all edges from shapes that have interiors. +func (s *ShapeIndex) testAllEdges(edges []*clippedEdge, t *tracker) { + for _, edge := range edges { + if edge.faceEdge.hasInterior { + t.testEdge(edge.faceEdge.shapeID, edge.faceEdge.edge) + } + } +} + +// countShapes reports the number of distinct shapes that are either associated with the +// given edges, or that are currently stored in the InteriorTracker. +func (s *ShapeIndex) countShapes(edges []*clippedEdge, shapeIDs []int32) int { + count := 0 + lastShapeID := int32(-1) + + // next clipped shape id in the shapeIDs list. + clippedNext := int32(0) + // index of the current element in the shapeIDs list. + shapeIDidx := 0 + for _, edge := range edges { + if edge.faceEdge.shapeID == lastShapeID { + continue + } + + count++ + lastShapeID = edge.faceEdge.shapeID + + // Skip over any containing shapes up to and including this one, + // updating count as appropriate. + for ; shapeIDidx < len(shapeIDs); shapeIDidx++ { + clippedNext = shapeIDs[shapeIDidx] + if clippedNext > lastShapeID { + break + } + if clippedNext < lastShapeID { + count++ + } + } + } + + // Count any remaining containing shapes. + count += len(shapeIDs) - shapeIDidx + return count +} + +// maxLevelForEdge reports the maximum level for a given edge. +func maxLevelForEdge(edge Edge) int { + // Compute the maximum cell size for which this edge is considered long. + // The calculation does not need to be perfectly accurate, so we use Norm + // rather than Angle for speed. + cellSize := edge.V0.Sub(edge.V1.Vector).Norm() * cellSizeToLongEdgeRatio + // Now return the first level encountered during subdivision where the + // average cell size is at most cellSize. + return AvgEdgeMetric.MinLevel(cellSize) +} + +// removeShapeInternal does the actual work for removing a given shape from the index. +func (s *ShapeIndex) removeShapeInternal(removed *removedShape, allEdges [][]faceEdge, t *tracker) { + // TODO(roberts): finish the implementation of this. +} diff --git a/vendor/github.com/golang/geo/s2/shapeutil.go b/vendor/github.com/golang/geo/s2/shapeutil.go new file mode 100644 index 000000000..64245dfa1 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/shapeutil.go @@ -0,0 +1,228 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// CrossingType defines different ways of reporting edge intersections. +type CrossingType int + +const ( + // CrossingTypeInterior reports intersections that occur at a point + // interior to both edges (i.e., not at a vertex). + CrossingTypeInterior CrossingType = iota + + // CrossingTypeAll reports all intersections, even those where two edges + // intersect only because they share a common vertex. + CrossingTypeAll + + // CrossingTypeNonAdjacent reports all intersections except for pairs of + // the form (AB, BC) where both edges are from the same ShapeIndex. + CrossingTypeNonAdjacent +) + +// rangeIterator is a wrapper over ShapeIndexIterator with extra methods +// that are useful for merging the contents of two or more ShapeIndexes. +type rangeIterator struct { + it *ShapeIndexIterator + // The min and max leaf cell ids covered by the current cell. If done() is + // true, these methods return a value larger than any valid cell id. + rangeMin CellID + rangeMax CellID +} + +// newRangeIterator creates a new rangeIterator positioned at the first cell of the given index. +func newRangeIterator(index *ShapeIndex) *rangeIterator { + r := &rangeIterator{ + it: index.Iterator(), + } + r.refresh() + return r +} + +func (r *rangeIterator) cellID() CellID { return r.it.CellID() } +func (r *rangeIterator) indexCell() *ShapeIndexCell { return r.it.IndexCell() } +func (r *rangeIterator) next() { r.it.Next(); r.refresh() } +func (r *rangeIterator) done() bool { return r.it.Done() } + +// seekTo positions the iterator at the first cell that overlaps or follows +// the current range minimum of the target iterator, i.e. such that its +// rangeMax >= target.rangeMin. +func (r *rangeIterator) seekTo(target *rangeIterator) { + r.it.seek(target.rangeMin) + // If the current cell does not overlap target, it is possible that the + // previous cell is the one we are looking for. This can only happen when + // the previous cell contains target but has a smaller CellID. + if r.it.Done() || r.it.CellID().RangeMin() > target.rangeMax { + if r.it.Prev() && r.it.CellID().RangeMax() < target.cellID() { + r.it.Next() + } + } + r.refresh() +} + +// seekBeyond positions the iterator at the first cell that follows the current +// range minimum of the target iterator. i.e. the first cell such that its +// rangeMin > target.rangeMax. +func (r *rangeIterator) seekBeyond(target *rangeIterator) { + r.it.seek(target.rangeMax.Next()) + if !r.it.Done() && r.it.CellID().RangeMin() <= target.rangeMax { + r.it.Next() + } + r.refresh() +} + +// refresh updates the iterators min and max values. +func (r *rangeIterator) refresh() { + r.rangeMin = r.cellID().RangeMin() + r.rangeMax = r.cellID().RangeMax() +} + +// referencePointForShape is a helper function for implementing various Shapes +// ReferencePoint functions. +// +// Given a shape consisting of closed polygonal loops, the interior of the +// shape is defined as the region to the left of all edges (which must be +// oriented consistently). This function then chooses an arbitrary point and +// returns true if that point is contained by the shape. +// +// Unlike Loop and Polygon, this method allows duplicate vertices and +// edges, which requires some extra care with definitions. The rule that we +// apply is that an edge and its reverse edge cancel each other: the result +// is the same as if that edge pair were not present. Therefore shapes that +// consist only of degenerate loop(s) are either empty or full; by convention, +// the shape is considered full if and only if it contains an empty loop (see +// laxPolygon for details). +// +// Determining whether a loop on the sphere contains a point is harder than +// the corresponding problem in 2D plane geometry. It cannot be implemented +// just by counting edge crossings because there is no such thing as a point +// at infinity that is guaranteed to be outside the loop. +// +// This function requires that the given Shape have an interior. +func referencePointForShape(shape Shape) ReferencePoint { + if shape.NumEdges() == 0 { + // A shape with no edges is defined to be full if and only if it + // contains at least one chain. + return OriginReferencePoint(shape.NumChains() > 0) + } + // Define a "matched" edge as one that can be paired with a corresponding + // reversed edge. Define a vertex as "balanced" if all of its edges are + // matched. In order to determine containment, we must find an unbalanced + // vertex. Often every vertex is unbalanced, so we start by trying an + // arbitrary vertex. + edge := shape.Edge(0) + + if ref, ok := referencePointAtVertex(shape, edge.V0); ok { + return ref + } + + // That didn't work, so now we do some extra work to find an unbalanced + // vertex (if any). Essentially we gather a list of edges and a list of + // reversed edges, and then sort them. The first edge that appears in one + // list but not the other is guaranteed to be unmatched. + n := shape.NumEdges() + var edges = make([]Edge, n) + var revEdges = make([]Edge, n) + for i := 0; i < n; i++ { + edge := shape.Edge(i) + edges[i] = edge + revEdges[i] = Edge{V0: edge.V1, V1: edge.V0} + } + + sortEdges(edges) + sortEdges(revEdges) + + for i := 0; i < n; i++ { + if edges[i].Cmp(revEdges[i]) == -1 { // edges[i] is unmatched + if ref, ok := referencePointAtVertex(shape, edges[i].V0); ok { + return ref + } + } + if revEdges[i].Cmp(edges[i]) == -1 { // revEdges[i] is unmatched + if ref, ok := referencePointAtVertex(shape, revEdges[i].V0); ok { + return ref + } + } + } + + // All vertices are balanced, so this polygon is either empty or full except + // for degeneracies. By convention it is defined to be full if it contains + // any chain with no edges. + for i := 0; i < shape.NumChains(); i++ { + if shape.Chain(i).Length == 0 { + return OriginReferencePoint(true) + } + } + + return OriginReferencePoint(false) +} + +// referencePointAtVertex reports whether the given vertex is unbalanced, and +// returns a ReferencePoint indicating if the point is contained. +// Otherwise returns false. +func referencePointAtVertex(shape Shape, vTest Point) (ReferencePoint, bool) { + var ref ReferencePoint + + // Let P be an unbalanced vertex. Vertex P is defined to be inside the + // region if the region contains a particular direction vector starting from + // P, namely the direction p.Ortho(). This can be calculated using + // ContainsVertexQuery. + + containsQuery := NewContainsVertexQuery(vTest) + n := shape.NumEdges() + for e := 0; e < n; e++ { + edge := shape.Edge(e) + if edge.V0 == vTest { + containsQuery.AddEdge(edge.V1, 1) + } + if edge.V1 == vTest { + containsQuery.AddEdge(edge.V0, -1) + } + } + containsSign := containsQuery.ContainsVertex() + if containsSign == 0 { + return ref, false // There are no unmatched edges incident to this vertex. + } + ref.Point = vTest + ref.Contained = containsSign > 0 + + return ref, true +} + +// containsBruteForce reports whether the given shape contains the given point. +// Most clients should not use this method, since its running time is linear in +// the number of shape edges. Instead clients should create a ShapeIndex and use +// ContainsPointQuery, since this strategy is much more efficient when many +// points need to be tested. +// +// Polygon boundaries are treated as being semi-open (see ContainsPointQuery +// and VertexModel for other options). +func containsBruteForce(shape Shape, point Point) bool { + if shape.Dimension() != 2 { + return false + } + + refPoint := shape.ReferencePoint() + if refPoint.Point == point { + return refPoint.Contained + } + + crosser := NewEdgeCrosser(refPoint.Point, point) + inside := refPoint.Contained + for e := 0; e < shape.NumEdges(); e++ { + edge := shape.Edge(e) + inside = inside != crosser.EdgeOrVertexCrossing(edge.V0, edge.V1) + } + return inside +} diff --git a/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go b/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go new file mode 100644 index 000000000..2a0d82361 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go @@ -0,0 +1,72 @@ +// Copyright 2020 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// EdgeIterator is an iterator that advances through all edges in an ShapeIndex. +// This is different to the ShapeIndexIterator, which advances through the cells in the +// ShapeIndex. +type EdgeIterator struct { + index *ShapeIndex + shapeID int32 + numEdges int32 + edgeID int32 +} + +// NewEdgeIterator creates a new edge iterator for the given index. +func NewEdgeIterator(index *ShapeIndex) *EdgeIterator { + e := &EdgeIterator{ + index: index, + shapeID: -1, + edgeID: -1, + } + + e.Next() + return e +} + +// ShapeID returns the current shape ID. +func (e *EdgeIterator) ShapeID() int32 { return e.shapeID } + +// EdgeID returns the current edge ID. +func (e *EdgeIterator) EdgeID() int32 { return e.edgeID } + +// ShapeEdgeID returns the current (shapeID, edgeID). +func (e *EdgeIterator) ShapeEdgeID() ShapeEdgeID { return ShapeEdgeID{e.shapeID, e.edgeID} } + +// Edge returns the current edge. +func (e *EdgeIterator) Edge() Edge { + return e.index.Shape(e.shapeID).Edge(int(e.edgeID)) +} + +// Done reports if the iterator is positioned at or after the last index edge. +func (e *EdgeIterator) Done() bool { return e.shapeID >= int32(len(e.index.shapes)) } + +// Next positions the iterator at the next index edge. +func (e *EdgeIterator) Next() { + e.edgeID++ + for ; e.edgeID >= e.numEdges; e.edgeID++ { + e.shapeID++ + if e.shapeID >= int32(len(e.index.shapes)) { + break + } + shape := e.index.Shape(e.shapeID) + if shape == nil { + e.numEdges = 0 + } else { + e.numEdges = int32(shape.NumEdges()) + } + e.edgeID = -1 + } +} diff --git a/vendor/github.com/golang/geo/s2/stuv.go b/vendor/github.com/golang/geo/s2/stuv.go new file mode 100644 index 000000000..7663bb398 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/stuv.go @@ -0,0 +1,427 @@ +// Copyright 2014 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import ( + "math" + + "github.com/golang/geo/r3" +) + +// +// This file contains documentation of the various coordinate systems used +// throughout the library. Most importantly, S2 defines a framework for +// decomposing the unit sphere into a hierarchy of "cells". Each cell is a +// quadrilateral bounded by four geodesics. The top level of the hierarchy is +// obtained by projecting the six faces of a cube onto the unit sphere, and +// lower levels are obtained by subdividing each cell into four children +// recursively. Cells are numbered such that sequentially increasing cells +// follow a continuous space-filling curve over the entire sphere. The +// transformation is designed to make the cells at each level fairly uniform +// in size. +// +////////////////////////// S2 Cell Decomposition ///////////////////////// +// +// The following methods define the cube-to-sphere projection used by +// the Cell decomposition. +// +// In the process of converting a latitude-longitude pair to a 64-bit cell +// id, the following coordinate systems are used: +// +// (id) +// An CellID is a 64-bit encoding of a face and a Hilbert curve position +// on that face. The Hilbert curve position implicitly encodes both the +// position of a cell and its subdivision level (see s2cellid.go). +// +// (face, i, j) +// Leaf-cell coordinates. "i" and "j" are integers in the range +// [0,(2**30)-1] that identify a particular leaf cell on the given face. +// The (i, j) coordinate system is right-handed on each face, and the +// faces are oriented such that Hilbert curves connect continuously from +// one face to the next. +// +// (face, s, t) +// Cell-space coordinates. "s" and "t" are real numbers in the range +// [0,1] that identify a point on the given face. For example, the point +// (s, t) = (0.5, 0.5) corresponds to the center of the top-level face +// cell. This point is also a vertex of exactly four cells at each +// subdivision level greater than zero. +// +// (face, si, ti) +// Discrete cell-space coordinates. These are obtained by multiplying +// "s" and "t" by 2**31 and rounding to the nearest unsigned integer. +// Discrete coordinates lie in the range [0,2**31]. This coordinate +// system can represent the edge and center positions of all cells with +// no loss of precision (including non-leaf cells). In binary, each +// coordinate of a level-k cell center ends with a 1 followed by +// (30 - k) 0s. The coordinates of its edges end with (at least) +// (31 - k) 0s. +// +// (face, u, v) +// Cube-space coordinates in the range [-1,1]. To make the cells at each +// level more uniform in size after they are projected onto the sphere, +// we apply a nonlinear transformation of the form u=f(s), v=f(t). +// The (u, v) coordinates after this transformation give the actual +// coordinates on the cube face (modulo some 90 degree rotations) before +// it is projected onto the unit sphere. +// +// (face, u, v, w) +// Per-face coordinate frame. This is an extension of the (face, u, v) +// cube-space coordinates that adds a third axis "w" in the direction of +// the face normal. It is always a right-handed 3D coordinate system. +// Cube-space coordinates can be converted to this frame by setting w=1, +// while (u,v,w) coordinates can be projected onto the cube face by +// dividing by w, i.e. (face, u/w, v/w). +// +// (x, y, z) +// Direction vector (Point). Direction vectors are not necessarily unit +// length, and are often chosen to be points on the biunit cube +// [-1,+1]x[-1,+1]x[-1,+1]. They can be be normalized to obtain the +// corresponding point on the unit sphere. +// +// (lat, lng) +// Latitude and longitude (LatLng). Latitudes must be between -90 and +// 90 degrees inclusive, and longitudes must be between -180 and 180 +// degrees inclusive. +// +// Note that the (i, j), (s, t), (si, ti), and (u, v) coordinate systems are +// right-handed on all six faces. +// +// +// There are a number of different projections from cell-space (s,t) to +// cube-space (u,v): linear, quadratic, and tangent. They have the following +// tradeoffs: +// +// Linear - This is the fastest transformation, but also produces the least +// uniform cell sizes. Cell areas vary by a factor of about 5.2, with the +// largest cells at the center of each face and the smallest cells in +// the corners. +// +// Tangent - Transforming the coordinates via Atan makes the cell sizes +// more uniform. The areas vary by a maximum ratio of 1.4 as opposed to a +// maximum ratio of 5.2. However, each call to Atan is about as expensive +// as all of the other calculations combined when converting from points to +// cell ids, i.e. it reduces performance by a factor of 3. +// +// Quadratic - This is an approximation of the tangent projection that +// is much faster and produces cells that are almost as uniform in size. +// It is about 3 times faster than the tangent projection for converting +// cell ids to points or vice versa. Cell areas vary by a maximum ratio of +// about 2.1. +// +// Here is a table comparing the cell uniformity using each projection. Area +// Ratio is the maximum ratio over all subdivision levels of the largest cell +// area to the smallest cell area at that level, Edge Ratio is the maximum +// ratio of the longest edge of any cell to the shortest edge of any cell at +// the same level, and Diag Ratio is the ratio of the longest diagonal of +// any cell to the shortest diagonal of any cell at the same level. +// +// Area Edge Diag +// Ratio Ratio Ratio +// ----------------------------------- +// Linear: 5.200 2.117 2.959 +// Tangent: 1.414 1.414 1.704 +// Quadratic: 2.082 1.802 1.932 +// +// The worst-case cell aspect ratios are about the same with all three +// projections. The maximum ratio of the longest edge to the shortest edge +// within the same cell is about 1.4 and the maximum ratio of the diagonals +// within the same cell is about 1.7. +// +// For Go we have chosen to use only the Quadratic approach. Other language +// implementations may offer other choices. + +const ( + // maxSiTi is the maximum value of an si- or ti-coordinate. + // It is one shift more than maxSize. The range of valid (si,ti) + // values is [0..maxSiTi]. + maxSiTi = maxSize << 1 +) + +// siTiToST converts an si- or ti-value to the corresponding s- or t-value. +// Value is capped at 1.0 because there is no DCHECK in Go. +func siTiToST(si uint32) float64 { + if si > maxSiTi { + return 1.0 + } + return float64(si) / float64(maxSiTi) +} + +// stToSiTi converts the s- or t-value to the nearest si- or ti-coordinate. +// The result may be outside the range of valid (si,ti)-values. Value of +// 0.49999999999999994 (math.NextAfter(0.5, -1)), will be incorrectly rounded up. +func stToSiTi(s float64) uint32 { + if s < 0 { + return uint32(s*maxSiTi - 0.5) + } + return uint32(s*maxSiTi + 0.5) +} + +// stToUV converts an s or t value to the corresponding u or v value. +// This is a non-linear transformation from [-1,1] to [-1,1] that +// attempts to make the cell sizes more uniform. +// This uses what the C++ version calls 'the quadratic transform'. +func stToUV(s float64) float64 { + if s >= 0.5 { + return (1 / 3.) * (4*s*s - 1) + } + return (1 / 3.) * (1 - 4*(1-s)*(1-s)) +} + +// uvToST is the inverse of the stToUV transformation. Note that it +// is not always true that uvToST(stToUV(x)) == x due to numerical +// errors. +func uvToST(u float64) float64 { + if u >= 0 { + return 0.5 * math.Sqrt(1+3*u) + } + return 1 - 0.5*math.Sqrt(1-3*u) +} + +// face returns face ID from 0 to 5 containing the r. For points on the +// boundary between faces, the result is arbitrary but deterministic. +func face(r r3.Vector) int { + f := r.LargestComponent() + switch { + case f == r3.XAxis && r.X < 0: + f += 3 + case f == r3.YAxis && r.Y < 0: + f += 3 + case f == r3.ZAxis && r.Z < 0: + f += 3 + } + return int(f) +} + +// validFaceXYZToUV given a valid face for the given point r (meaning that +// dot product of r with the face normal is positive), returns +// the corresponding u and v values, which may lie outside the range [-1,1]. +func validFaceXYZToUV(face int, r r3.Vector) (float64, float64) { + switch face { + case 0: + return r.Y / r.X, r.Z / r.X + case 1: + return -r.X / r.Y, r.Z / r.Y + case 2: + return -r.X / r.Z, -r.Y / r.Z + case 3: + return r.Z / r.X, r.Y / r.X + case 4: + return r.Z / r.Y, -r.X / r.Y + } + return -r.Y / r.Z, -r.X / r.Z +} + +// xyzToFaceUV converts a direction vector (not necessarily unit length) to +// (face, u, v) coordinates. +func xyzToFaceUV(r r3.Vector) (f int, u, v float64) { + f = face(r) + u, v = validFaceXYZToUV(f, r) + return f, u, v +} + +// faceUVToXYZ turns face and UV coordinates into an unnormalized 3 vector. +func faceUVToXYZ(face int, u, v float64) r3.Vector { + switch face { + case 0: + return r3.Vector{1, u, v} + case 1: + return r3.Vector{-u, 1, v} + case 2: + return r3.Vector{-u, -v, 1} + case 3: + return r3.Vector{-1, -v, -u} + case 4: + return r3.Vector{v, -1, -u} + default: + return r3.Vector{v, u, -1} + } +} + +// faceXYZToUV returns the u and v values (which may lie outside the range +// [-1, 1]) if the dot product of the point p with the given face normal is positive. +func faceXYZToUV(face int, p Point) (u, v float64, ok bool) { + switch face { + case 0: + if p.X <= 0 { + return 0, 0, false + } + case 1: + if p.Y <= 0 { + return 0, 0, false + } + case 2: + if p.Z <= 0 { + return 0, 0, false + } + case 3: + if p.X >= 0 { + return 0, 0, false + } + case 4: + if p.Y >= 0 { + return 0, 0, false + } + default: + if p.Z >= 0 { + return 0, 0, false + } + } + + u, v = validFaceXYZToUV(face, p.Vector) + return u, v, true +} + +// faceXYZtoUVW transforms the given point P to the (u,v,w) coordinate frame of the given +// face where the w-axis represents the face normal. +func faceXYZtoUVW(face int, p Point) Point { + // The result coordinates are simply the dot products of P with the (u,v,w) + // axes for the given face (see faceUVWAxes). + switch face { + case 0: + return Point{r3.Vector{p.Y, p.Z, p.X}} + case 1: + return Point{r3.Vector{-p.X, p.Z, p.Y}} + case 2: + return Point{r3.Vector{-p.X, -p.Y, p.Z}} + case 3: + return Point{r3.Vector{-p.Z, -p.Y, -p.X}} + case 4: + return Point{r3.Vector{-p.Z, p.X, -p.Y}} + default: + return Point{r3.Vector{p.Y, p.X, -p.Z}} + } +} + +// faceSiTiToXYZ transforms the (si, ti) coordinates to a (not necessarily +// unit length) Point on the given face. +func faceSiTiToXYZ(face int, si, ti uint32) Point { + return Point{faceUVToXYZ(face, stToUV(siTiToST(si)), stToUV(siTiToST(ti)))} +} + +// xyzToFaceSiTi transforms the (not necessarily unit length) Point to +// (face, si, ti) coordinates and the level the Point is at. +func xyzToFaceSiTi(p Point) (face int, si, ti uint32, level int) { + face, u, v := xyzToFaceUV(p.Vector) + si = stToSiTi(uvToST(u)) + ti = stToSiTi(uvToST(v)) + + // If the levels corresponding to si,ti are not equal, then p is not a cell + // center. The si,ti values of 0 and maxSiTi need to be handled specially + // because they do not correspond to cell centers at any valid level; they + // are mapped to level -1 by the code at the end. + level = maxLevel - findLSBSetNonZero64(uint64(si|maxSiTi)) + if level < 0 || level != maxLevel-findLSBSetNonZero64(uint64(ti|maxSiTi)) { + return face, si, ti, -1 + } + + // In infinite precision, this test could be changed to ST == SiTi. However, + // due to rounding errors, uvToST(xyzToFaceUV(faceUVToXYZ(stToUV(...)))) is + // not idempotent. On the other hand, the center is computed exactly the same + // way p was originally computed (if it is indeed the center of a Cell); + // the comparison can be exact. + if p.Vector == faceSiTiToXYZ(face, si, ti).Normalize() { + return face, si, ti, level + } + + return face, si, ti, -1 +} + +// uNorm returns the right-handed normal (not necessarily unit length) for an +// edge in the direction of the positive v-axis at the given u-value on +// the given face. (This vector is perpendicular to the plane through +// the sphere origin that contains the given edge.) +func uNorm(face int, u float64) r3.Vector { + switch face { + case 0: + return r3.Vector{u, -1, 0} + case 1: + return r3.Vector{1, u, 0} + case 2: + return r3.Vector{1, 0, u} + case 3: + return r3.Vector{-u, 0, 1} + case 4: + return r3.Vector{0, -u, 1} + default: + return r3.Vector{0, -1, -u} + } +} + +// vNorm returns the right-handed normal (not necessarily unit length) for an +// edge in the direction of the positive u-axis at the given v-value on +// the given face. +func vNorm(face int, v float64) r3.Vector { + switch face { + case 0: + return r3.Vector{-v, 0, 1} + case 1: + return r3.Vector{0, -v, 1} + case 2: + return r3.Vector{0, -1, -v} + case 3: + return r3.Vector{v, -1, 0} + case 4: + return r3.Vector{1, v, 0} + default: + return r3.Vector{1, 0, v} + } +} + +// faceUVWAxes are the U, V, and W axes for each face. +var faceUVWAxes = [6][3]Point{ + {Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{1, 0, 0}}}, + {Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{0, 1, 0}}}, + {Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{0, 0, 1}}}, + {Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{-1, 0, 0}}}, + {Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, -1, 0}}}, + {Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, 0, -1}}}, +} + +// faceUVWFaces are the precomputed neighbors of each face. +var faceUVWFaces = [6][3][2]int{ + {{4, 1}, {5, 2}, {3, 0}}, + {{0, 3}, {5, 2}, {4, 1}}, + {{0, 3}, {1, 4}, {5, 2}}, + {{2, 5}, {1, 4}, {0, 3}}, + {{2, 5}, {3, 0}, {1, 4}}, + {{4, 1}, {3, 0}, {2, 5}}, +} + +// uvwAxis returns the given axis of the given face. +func uvwAxis(face, axis int) Point { + return faceUVWAxes[face][axis] +} + +// uvwFaces returns the face in the (u,v,w) coordinate system on the given axis +// in the given direction. +func uvwFace(face, axis, direction int) int { + return faceUVWFaces[face][axis][direction] +} + +// uAxis returns the u-axis for the given face. +func uAxis(face int) Point { + return uvwAxis(face, 0) +} + +// vAxis returns the v-axis for the given face. +func vAxis(face int) Point { + return uvwAxis(face, 1) +} + +// Return the unit-length normal for the given face. +func unitNorm(face int) Point { + return uvwAxis(face, 2) +} diff --git a/vendor/github.com/golang/geo/s2/util.go b/vendor/github.com/golang/geo/s2/util.go new file mode 100644 index 000000000..7cab746d8 --- /dev/null +++ b/vendor/github.com/golang/geo/s2/util.go @@ -0,0 +1,125 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +import "github.com/golang/geo/s1" + +// roundAngle returns the value rounded to nearest as an int32. +// This does not match C++ exactly for the case of x.5. +func roundAngle(val s1.Angle) int32 { + if val < 0 { + return int32(val - 0.5) + } + return int32(val + 0.5) +} + +// minAngle returns the smallest of the given values. +func minAngle(x s1.Angle, others ...s1.Angle) s1.Angle { + min := x + for _, y := range others { + if y < min { + min = y + } + } + return min +} + +// maxAngle returns the largest of the given values. +func maxAngle(x s1.Angle, others ...s1.Angle) s1.Angle { + max := x + for _, y := range others { + if y > max { + max = y + } + } + return max +} + +// minChordAngle returns the smallest of the given values. +func minChordAngle(x s1.ChordAngle, others ...s1.ChordAngle) s1.ChordAngle { + min := x + for _, y := range others { + if y < min { + min = y + } + } + return min +} + +// maxChordAngle returns the largest of the given values. +func maxChordAngle(x s1.ChordAngle, others ...s1.ChordAngle) s1.ChordAngle { + max := x + for _, y := range others { + if y > max { + max = y + } + } + return max +} + +// minFloat64 returns the smallest of the given values. +func minFloat64(x float64, others ...float64) float64 { + min := x + for _, y := range others { + if y < min { + min = y + } + } + return min +} + +// maxFloat64 returns the largest of the given values. +func maxFloat64(x float64, others ...float64) float64 { + max := x + for _, y := range others { + if y > max { + max = y + } + } + return max +} + +// minInt returns the smallest of the given values. +func minInt(x int, others ...int) int { + min := x + for _, y := range others { + if y < min { + min = y + } + } + return min +} + +// maxInt returns the largest of the given values. +func maxInt(x int, others ...int) int { + max := x + for _, y := range others { + if y > max { + max = y + } + } + return max +} + +// clampInt returns the number closest to x within the range min..max. +func clampInt(x, min, max int) int { + if x < min { + return min + } + if x > max { + return max + } + return x +} diff --git a/vendor/github.com/golang/geo/s2/wedge_relations.go b/vendor/github.com/golang/geo/s2/wedge_relations.go new file mode 100644 index 000000000..d637bb68c --- /dev/null +++ b/vendor/github.com/golang/geo/s2/wedge_relations.go @@ -0,0 +1,97 @@ +// Copyright 2017 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s2 + +// WedgeRel enumerates the possible relation between two wedges A and B. +type WedgeRel int + +// Define the different possible relationships between two wedges. +// +// Given an edge chain (x0, x1, x2), the wedge at x1 is the region to the +// left of the edges. More precisely, it is the set of all rays from x1x0 +// (inclusive) to x1x2 (exclusive) in the *clockwise* direction. +const ( + WedgeEquals WedgeRel = iota // A and B are equal. + WedgeProperlyContains // A is a strict superset of B. + WedgeIsProperlyContained // A is a strict subset of B. + WedgeProperlyOverlaps // A-B, B-A, and A intersect B are non-empty. + WedgeIsDisjoint // A and B are disjoint. +) + +// WedgeRelation reports the relation between two non-empty wedges +// A=(a0, ab1, a2) and B=(b0, ab1, b2). +func WedgeRelation(a0, ab1, a2, b0, b2 Point) WedgeRel { + // There are 6 possible edge orderings at a shared vertex (all + // of these orderings are circular, i.e. abcd == bcda): + // + // (1) a2 b2 b0 a0: A contains B + // (2) a2 a0 b0 b2: B contains A + // (3) a2 a0 b2 b0: A and B are disjoint + // (4) a2 b0 a0 b2: A and B intersect in one wedge + // (5) a2 b2 a0 b0: A and B intersect in one wedge + // (6) a2 b0 b2 a0: A and B intersect in two wedges + // + // We do not distinguish between 4, 5, and 6. + // We pay extra attention when some of the edges overlap. When edges + // overlap, several of these orderings can be satisfied, and we take + // the most specific. + if a0 == b0 && a2 == b2 { + return WedgeEquals + } + + // Cases 1, 2, 5, and 6 + if OrderedCCW(a0, a2, b2, ab1) { + // The cases with this vertex ordering are 1, 5, and 6, + if OrderedCCW(b2, b0, a0, ab1) { + return WedgeProperlyContains + } + + // We are in case 5 or 6, or case 2 if a2 == b2. + if a2 == b2 { + return WedgeIsProperlyContained + } + return WedgeProperlyOverlaps + + } + // We are in case 2, 3, or 4. + if OrderedCCW(a0, b0, b2, ab1) { + return WedgeIsProperlyContained + } + + if OrderedCCW(a0, b0, a2, ab1) { + return WedgeIsDisjoint + } + return WedgeProperlyOverlaps +} + +// WedgeContains reports whether non-empty wedge A=(a0, ab1, a2) contains B=(b0, ab1, b2). +// Equivalent to WedgeRelation == WedgeProperlyContains || WedgeEquals. +func WedgeContains(a0, ab1, a2, b0, b2 Point) bool { + // For A to contain B (where each loop interior is defined to be its left + // side), the CCW edge order around ab1 must be a2 b2 b0 a0. We split + // this test into two parts that test three vertices each. + return OrderedCCW(a2, b2, b0, ab1) && OrderedCCW(b0, a0, a2, ab1) +} + +// WedgeIntersects reports whether non-empty wedge A=(a0, ab1, a2) intersects B=(b0, ab1, b2). +// Equivalent but faster than WedgeRelation != WedgeIsDisjoint +func WedgeIntersects(a0, ab1, a2, b0, b2 Point) bool { + // For A not to intersect B (where each loop interior is defined to be + // its left side), the CCW edge order around ab1 must be a0 b2 b0 a2. + // Note that it's important to write these conditions as negatives + // (!OrderedCCW(a,b,c,o) rather than Ordered(c,b,a,o)) to get correct + // results when two vertices are the same. + return !(OrderedCCW(a0, b2, b0, ab1) && OrderedCCW(b0, a2, a0, ab1)) +} diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/.MODULE_ROOT b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/.MODULE_ROOT new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/LICENSE b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/LICENSE new file mode 100644 index 000000000..163291ed6 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/LICENSE @@ -0,0 +1,9 @@ +MIT LICENSE + +Copyright 2020 Dustin Oprea + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/README.md b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/README.md new file mode 100644 index 000000000..bf60ef504 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/README.md @@ -0,0 +1,10 @@ +[![Build Status](https://travis-ci.org/dsoprea/go-jpeg-image-structure/v2.svg?branch=master)](https://travis-ci.org/dsoprea/go-jpeg-image-structure/v2) +[![codecov](https://codecov.io/gh/dsoprea/go-jpeg-image-structure/branch/master/graph/badge.svg)](https://codecov.io/gh/dsoprea/go-jpeg-image-structure) +[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-jpeg-image-structure/v2)](https://goreportcard.com/report/github.com/dsoprea/go-jpeg-image-structure/v2) +[![GoDoc](https://godoc.org/github.com/dsoprea/go-jpeg-image-structure/v2?status.svg)](https://godoc.org/github.com/dsoprea/go-jpeg-image-structure/v2) + +## Overview + +Parse raw JPEG data into individual segments of data. You can print or export this data, including hash digests for each. You can also parse/modify the EXIF data and write an updated image. + +EXIF, XMP, and IPTC data can also be extracted. The provided CLI tool can print this data as well. diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/markers.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/markers.go new file mode 100644 index 000000000..a12171bd8 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/markers.go @@ -0,0 +1,212 @@ +package jpegstructure + +import ( + "github.com/dsoprea/go-logging" +) + +const ( + // MARKER_SOI marker + MARKER_SOI = 0xd8 + + // MARKER_EOI marker + MARKER_EOI = 0xd9 + + // MARKER_SOS marker + MARKER_SOS = 0xda + + // MARKER_SOD marker + MARKER_SOD = 0x93 + + // MARKER_DQT marker + MARKER_DQT = 0xdb + + // MARKER_APP0 marker + MARKER_APP0 = 0xe0 + + // MARKER_APP1 marker + MARKER_APP1 = 0xe1 + + // MARKER_APP2 marker + MARKER_APP2 = 0xe2 + + // MARKER_APP3 marker + MARKER_APP3 = 0xe3 + + // MARKER_APP4 marker + MARKER_APP4 = 0xe4 + + // MARKER_APP5 marker + MARKER_APP5 = 0xe5 + + // MARKER_APP6 marker + MARKER_APP6 = 0xe6 + + // MARKER_APP7 marker + MARKER_APP7 = 0xe7 + + // MARKER_APP8 marker + MARKER_APP8 = 0xe8 + + // MARKER_APP10 marker + MARKER_APP10 = 0xea + + // MARKER_APP12 marker + MARKER_APP12 = 0xec + + // MARKER_APP13 marker + MARKER_APP13 = 0xed + + // MARKER_APP14 marker + MARKER_APP14 = 0xee + + // MARKER_APP15 marker + MARKER_APP15 = 0xef + + // MARKER_COM marker + MARKER_COM = 0xfe + + // MARKER_CME marker + MARKER_CME = 0x64 + + // MARKER_SIZ marker + MARKER_SIZ = 0x51 + + // MARKER_DHT marker + MARKER_DHT = 0xc4 + + // MARKER_JPG marker + MARKER_JPG = 0xc8 + + // MARKER_DAC marker + MARKER_DAC = 0xcc + + // MARKER_SOF0 marker + MARKER_SOF0 = 0xc0 + + // MARKER_SOF1 marker + MARKER_SOF1 = 0xc1 + + // MARKER_SOF2 marker + MARKER_SOF2 = 0xc2 + + // MARKER_SOF3 marker + MARKER_SOF3 = 0xc3 + + // MARKER_SOF5 marker + MARKER_SOF5 = 0xc5 + + // MARKER_SOF6 marker + MARKER_SOF6 = 0xc6 + + // MARKER_SOF7 marker + MARKER_SOF7 = 0xc7 + + // MARKER_SOF9 marker + MARKER_SOF9 = 0xc9 + + // MARKER_SOF10 marker + MARKER_SOF10 = 0xca + + // MARKER_SOF11 marker + MARKER_SOF11 = 0xcb + + // MARKER_SOF13 marker + MARKER_SOF13 = 0xcd + + // MARKER_SOF14 marker + MARKER_SOF14 = 0xce + + // MARKER_SOF15 marker + MARKER_SOF15 = 0xcf +) + +var ( + jpegLogger = log.NewLogger("jpegstructure.jpeg") + jpegMagicStandard = []byte{0xff, MARKER_SOI, 0xff} + jpegMagic2000 = []byte{0xff, 0x4f, 0xff} + + markerLen = map[byte]int{ + 0x00: 0, + 0x01: 0, + 0xd0: 0, + 0xd1: 0, + 0xd2: 0, + 0xd3: 0, + 0xd4: 0, + 0xd5: 0, + 0xd6: 0, + 0xd7: 0, + 0xd8: 0, + 0xd9: 0, + 0xda: 0, + + // J2C + 0x30: 0, + 0x31: 0, + 0x32: 0, + 0x33: 0, + 0x34: 0, + 0x35: 0, + 0x36: 0, + 0x37: 0, + 0x38: 0, + 0x39: 0, + 0x3a: 0, + 0x3b: 0, + 0x3c: 0, + 0x3d: 0, + 0x3e: 0, + 0x3f: 0, + 0x4f: 0, + 0x92: 0, + 0x93: 0, + + // J2C extensions + 0x74: 4, + 0x75: 4, + 0x77: 4, + } + + markerNames = map[byte]string{ + MARKER_SOI: "SOI", + MARKER_EOI: "EOI", + MARKER_SOS: "SOS", + MARKER_SOD: "SOD", + MARKER_DQT: "DQT", + MARKER_APP0: "APP0", + MARKER_APP1: "APP1", + MARKER_APP2: "APP2", + MARKER_APP3: "APP3", + MARKER_APP4: "APP4", + MARKER_APP5: "APP5", + MARKER_APP6: "APP6", + MARKER_APP7: "APP7", + MARKER_APP8: "APP8", + MARKER_APP10: "APP10", + MARKER_APP12: "APP12", + MARKER_APP13: "APP13", + MARKER_APP14: "APP14", + MARKER_APP15: "APP15", + MARKER_COM: "COM", + MARKER_CME: "CME", + MARKER_SIZ: "SIZ", + + MARKER_DHT: "DHT", + MARKER_JPG: "JPG", + MARKER_DAC: "DAC", + + MARKER_SOF0: "SOF0", + MARKER_SOF1: "SOF1", + MARKER_SOF2: "SOF2", + MARKER_SOF3: "SOF3", + MARKER_SOF5: "SOF5", + MARKER_SOF6: "SOF6", + MARKER_SOF7: "SOF7", + MARKER_SOF9: "SOF9", + MARKER_SOF10: "SOF10", + MARKER_SOF11: "SOF11", + MARKER_SOF13: "SOF13", + MARKER_SOF14: "SOF14", + MARKER_SOF15: "SOF15", + } +) diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/media_parser.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/media_parser.go new file mode 100644 index 000000000..e6fc60bc4 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/media_parser.go @@ -0,0 +1,139 @@ +package jpegstructure + +import ( + "bufio" + "bytes" + "image" + "io" + "os" + + "image/jpeg" + + "github.com/dsoprea/go-logging" + "github.com/dsoprea/go-utility/v2/image" +) + +// JpegMediaParser is a `riimage.MediaParser` that knows how to parse JPEG +// images. +type JpegMediaParser struct { +} + +// NewJpegMediaParser returns a new JpegMediaParser. +func NewJpegMediaParser() *JpegMediaParser { + + // TODO(dustin): Add test + + return new(JpegMediaParser) +} + +// Parse parses a JPEG uses an `io.ReadSeeker`. Even if it fails, it will return +// the list of segments encountered prior to the failure. +func (jmp *JpegMediaParser) Parse(rs io.ReadSeeker, size int) (ec riimage.MediaContext, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + s := bufio.NewScanner(rs) + + // Since each segment can be any size, our buffer must allowed to grow as + // large as the file. + buffer := []byte{} + s.Buffer(buffer, size) + + js := NewJpegSplitter(nil) + s.Split(js.Split) + + for s.Scan() != false { + } + + // Always return the segments that were parsed, at least until there was an + // error. + ec = js.Segments() + + log.PanicIf(s.Err()) + + return ec, nil +} + +// ParseFile parses a JPEG file. Even if it fails, it will return the list of +// segments encountered prior to the failure. +func (jmp *JpegMediaParser) ParseFile(filepath string) (ec riimage.MediaContext, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + f, err := os.Open(filepath) + log.PanicIf(err) + + defer f.Close() + + stat, err := f.Stat() + log.PanicIf(err) + + size := stat.Size() + + sl, err := jmp.Parse(f, int(size)) + + // Always return the segments that were parsed, at least until there was an + // error. + ec = sl + + log.PanicIf(err) + + return ec, nil +} + +// ParseBytes parses a JPEG byte-slice. Even if it fails, it will return the +// list of segments encountered prior to the failure. +func (jmp *JpegMediaParser) ParseBytes(data []byte) (ec riimage.MediaContext, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + br := bytes.NewReader(data) + + sl, err := jmp.Parse(br, len(data)) + + // Always return the segments that were parsed, at least until there was an + // error. + ec = sl + + log.PanicIf(err) + + return ec, nil +} + +// LooksLikeFormat indicates whether the data looks like a JPEG image. +func (jmp *JpegMediaParser) LooksLikeFormat(data []byte) bool { + if len(data) < 4 { + return false + } + + l := len(data) + if data[0] != 0xff || data[1] != MARKER_SOI || data[l-2] != 0xff || data[l-1] != MARKER_EOI { + return false + } + + return true +} + +// GetImage returns an image.Image-compatible struct. +func (jmp *JpegMediaParser) GetImage(r io.Reader) (img image.Image, err error) { + img, err = jpeg.Decode(r) + log.PanicIf(err) + + return img, nil +} + +var ( + // Enforce interface conformance. + _ riimage.MediaParser = new(JpegMediaParser) +) diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment.go new file mode 100644 index 000000000..6b433bf1f --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment.go @@ -0,0 +1,352 @@ +package jpegstructure + +import ( + "bytes" + "errors" + "fmt" + + "crypto/sha1" + "encoding/hex" + + "github.com/dsoprea/go-exif/v3" + "github.com/dsoprea/go-exif/v3/common" + "github.com/dsoprea/go-iptc" + "github.com/dsoprea/go-logging" + "github.com/dsoprea/go-photoshop-info-format" + "github.com/dsoprea/go-utility/v2/image" +) + +const ( + pirIptcImageResourceId = uint16(0x0404) +) + +var ( + // exifPrefix is the prefix found at the top of an EXIF slice. This is JPEG- + // specific. + exifPrefix = []byte{'E', 'x', 'i', 'f', 0, 0} + + xmpPrefix = []byte("http://ns.adobe.com/xap/1.0/\000") + + ps30Prefix = []byte("Photoshop 3.0\000") +) + +var ( + // ErrNoXmp is returned if XMP data was requested but not found. + ErrNoXmp = errors.New("no XMP data") + + // ErrNoIptc is returned if IPTC data was requested but not found. + ErrNoIptc = errors.New("no IPTC data") + + // ErrNoPhotoshopData is returned if Photoshop info was requested but not + // found. + ErrNoPhotoshopData = errors.New("no photoshop data") +) + +// SofSegment has info read from a SOF segment. +type SofSegment struct { + // BitsPerSample is the bits-per-sample. + BitsPerSample byte + + // Width is the image width. + Width uint16 + + // Height is the image height. + Height uint16 + + // ComponentCount is the number of color components. + ComponentCount byte +} + +// String returns a string representation of the SOF segment. +func (ss SofSegment) String() string { + + // TODO(dustin): Add test + + return fmt.Sprintf("SOF", ss.BitsPerSample, ss.Width, ss.Height, ss.ComponentCount) +} + +// SegmentVisitor describes a segment-visitor struct. +type SegmentVisitor interface { + // HandleSegment is triggered for each segment encountered as well as the + // scan-data. + HandleSegment(markerId byte, markerName string, counter int, lastIsScanData bool) error +} + +// SofSegmentVisitor describes a visitor that is only called for each SOF +// segment. +type SofSegmentVisitor interface { + // HandleSof is called for each encountered SOF segment. + HandleSof(sof *SofSegment) error +} + +// Segment describes a single segment. +type Segment struct { + MarkerId byte + MarkerName string + Offset int + Data []byte + + photoshopInfo map[uint16]photoshopinfo.Photoshop30InfoRecord + iptcTags map[iptc.StreamTagKey][]iptc.TagData +} + +// SetExif encodes and sets EXIF data into this segment. +func (s *Segment) SetExif(ib *exif.IfdBuilder) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + ibe := exif.NewIfdByteEncoder() + + exifData, err := ibe.EncodeToExif(ib) + log.PanicIf(err) + + l := len(exifPrefix) + + s.Data = make([]byte, l+len(exifData)) + copy(s.Data[0:], exifPrefix) + copy(s.Data[l:], exifData) + + return nil +} + +// Exif returns an `exif.Ifd` instance for the EXIF data we currently have. +func (s *Segment) Exif() (rootIfd *exif.Ifd, data []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + l := len(exifPrefix) + + rawExif := s.Data[l:] + + jpegLogger.Debugf(nil, "Attempting to parse (%d) byte EXIF blob (Exif).", len(rawExif)) + + im, err := exifcommon.NewIfdMappingWithStandard() + log.PanicIf(err) + + ti := exif.NewTagIndex() + + _, index, err := exif.Collect(im, ti, rawExif) + log.PanicIf(err) + + return index.RootIfd, rawExif, nil +} + +// FlatExif parses the EXIF data and just returns a list of tags. +func (s *Segment) FlatExif() (exifTags []exif.ExifTag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + l := len(exifPrefix) + + rawExif := s.Data[l:] + + jpegLogger.Debugf(nil, "Attempting to parse (%d) byte EXIF blob (FlatExif).", len(rawExif)) + + exifTags, _, err = exif.GetFlatExifData(rawExif, nil) + log.PanicIf(err) + + return exifTags, nil +} + +// EmbeddedString returns a string of properties that can be embedded into an +// longer string of properties. +func (s *Segment) EmbeddedString() string { + h := sha1.New() + h.Write(s.Data) + + // TODO(dustin): Add test + + digestString := hex.EncodeToString(h.Sum(nil)) + + return fmt.Sprintf("OFFSET=(0x%08x %10d) ID=(0x%02x) NAME=[%-5s] SIZE=(%10d) SHA1=[%s]", s.Offset, s.Offset, s.MarkerId, markerNames[s.MarkerId], len(s.Data), digestString) +} + +// String returns a descriptive string. +func (s *Segment) String() string { + + // TODO(dustin): Add test + + return fmt.Sprintf("Segment<%s>", s.EmbeddedString()) +} + +// IsExif returns true if EXIF data. +func (s *Segment) IsExif() bool { + if s.MarkerId != MARKER_APP1 { + return false + } + + // TODO(dustin): Add test + + l := len(exifPrefix) + + if len(s.Data) < l { + return false + } + + if bytes.Equal(s.Data[:l], exifPrefix) == false { + return false + } + + return true +} + +// IsXmp returns true if XMP data. +func (s *Segment) IsXmp() bool { + if s.MarkerId != MARKER_APP1 { + return false + } + + // TODO(dustin): Add test + + l := len(xmpPrefix) + + if len(s.Data) < l { + return false + } + + if bytes.Equal(s.Data[:l], xmpPrefix) == false { + return false + } + + return true +} + +// FormattedXmp returns a formatted XML string. This only makes sense for a +// segment comprised of XML data (like XMP). +func (s *Segment) FormattedXmp() (formatted string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + if s.IsXmp() != true { + log.Panicf("not an XMP segment") + } + + l := len(xmpPrefix) + + raw := string(s.Data[l:]) + + formatted, err = FormatXml(raw) + log.PanicIf(err) + + return formatted, nil +} + +func (s *Segment) parsePhotoshopInfo() (photoshopInfo map[uint16]photoshopinfo.Photoshop30InfoRecord, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if s.photoshopInfo != nil { + return s.photoshopInfo, nil + } + + if s.MarkerId != MARKER_APP13 { + return nil, ErrNoPhotoshopData + } + + l := len(ps30Prefix) + + if len(s.Data) < l { + return nil, ErrNoPhotoshopData + } + + if bytes.Equal(s.Data[:l], ps30Prefix) == false { + return nil, ErrNoPhotoshopData + } + + data := s.Data[l:] + b := bytes.NewBuffer(data) + + // Parse it. + + pirIndex, err := photoshopinfo.ReadPhotoshop30Info(b) + log.PanicIf(err) + + s.photoshopInfo = pirIndex + + return s.photoshopInfo, nil +} + +// IsIptc returns true if XMP data. +func (s *Segment) IsIptc() bool { + // TODO(dustin): Add test + + // There's a cost to determining if there's IPTC data, so we won't do it + // more than once. + if s.iptcTags != nil { + return true + } + + photoshopInfo, err := s.parsePhotoshopInfo() + if err != nil { + if err == ErrNoPhotoshopData { + return false + } + + log.Panic(err) + } + + // Bail if the Photoshop info doesn't have IPTC data. + + _, found := photoshopInfo[pirIptcImageResourceId] + if found == false { + return false + } + + return true +} + +// Iptc parses Photoshop info (if present) and then parses the IPTC info inside +// it (if present). +func (s *Segment) Iptc() (tags map[iptc.StreamTagKey][]iptc.TagData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // Cache the parse. + if s.iptcTags != nil { + return s.iptcTags, nil + } + + photoshopInfo, err := s.parsePhotoshopInfo() + log.PanicIf(err) + + iptcPir, found := photoshopInfo[pirIptcImageResourceId] + if found == false { + return nil, ErrNoIptc + } + + b := bytes.NewBuffer(iptcPir.Data) + + tags, err = iptc.ParseStream(b) + log.PanicIf(err) + + s.iptcTags = tags + + return tags, nil +} + +var ( + // Enforce interface conformance. + _ riimage.MediaContext = new(Segment) +) diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment_list.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment_list.go new file mode 100644 index 000000000..b4f4d5810 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment_list.go @@ -0,0 +1,416 @@ +package jpegstructure + +import ( + "bytes" + "fmt" + "io" + + "crypto/sha1" + "encoding/binary" + + "github.com/dsoprea/go-exif/v3" + "github.com/dsoprea/go-exif/v3/common" + "github.com/dsoprea/go-iptc" + "github.com/dsoprea/go-logging" +) + +// SegmentList contains a slice of segments. +type SegmentList struct { + segments []*Segment +} + +// NewSegmentList returns a new SegmentList struct. +func NewSegmentList(segments []*Segment) (sl *SegmentList) { + if segments == nil { + segments = make([]*Segment, 0) + } + + return &SegmentList{ + segments: segments, + } +} + +// OffsetsEqual returns true is all segments have the same marker-IDs and were +// found at the same offsets. +func (sl *SegmentList) OffsetsEqual(o *SegmentList) bool { + if len(o.segments) != len(sl.segments) { + return false + } + + for i, s := range o.segments { + if s.MarkerId != sl.segments[i].MarkerId || s.Offset != sl.segments[i].Offset { + return false + } + } + + return true +} + +// Segments returns the underlying slice of segments. +func (sl *SegmentList) Segments() []*Segment { + return sl.segments +} + +// Add adds another segment. +func (sl *SegmentList) Add(s *Segment) { + sl.segments = append(sl.segments, s) +} + +// Print prints segment info. +func (sl *SegmentList) Print() { + if len(sl.segments) == 0 { + fmt.Printf("No segments.\n") + } else { + exifIndex, _, err := sl.FindExif() + if err != nil { + if err == exif.ErrNoExif { + exifIndex = -1 + } else { + log.Panic(err) + } + } + + xmpIndex, _, err := sl.FindXmp() + if err != nil { + if err == ErrNoXmp { + xmpIndex = -1 + } else { + log.Panic(err) + } + } + + iptcIndex, _, err := sl.FindIptc() + if err != nil { + if err == ErrNoIptc { + iptcIndex = -1 + } else { + log.Panic(err) + } + } + + for i, s := range sl.segments { + fmt.Printf("%2d: %s", i, s.EmbeddedString()) + + if i == exifIndex { + fmt.Printf(" [EXIF]") + } else if i == xmpIndex { + fmt.Printf(" [XMP]") + } else if i == iptcIndex { + fmt.Printf(" [IPTC]") + } + + fmt.Printf("\n") + } + } +} + +// Validate checks that all of the markers are actually located at all of the +// recorded offsets. +func (sl *SegmentList) Validate(data []byte) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if len(sl.segments) < 2 { + log.Panicf("minimum segments not found") + } + + if sl.segments[0].MarkerId != MARKER_SOI { + log.Panicf("first segment not SOI") + } else if sl.segments[len(sl.segments)-1].MarkerId != MARKER_EOI { + log.Panicf("last segment not EOI") + } + + lastOffset := 0 + for i, s := range sl.segments { + if lastOffset != 0 && s.Offset <= lastOffset { + log.Panicf("segment offset not greater than the last: SEGMENT=(%d) (0x%08x) <= (0x%08x)", i, s.Offset, lastOffset) + } + + // The scan-data doesn't start with a marker. + if s.MarkerId == 0x0 { + continue + } + + o := s.Offset + if bytes.Compare(data[o:o+2], []byte{0xff, s.MarkerId}) != 0 { + log.Panicf("segment offset does not point to the start of a segment: SEGMENT=(%d) (0x%08x)", i, s.Offset) + } + + lastOffset = o + } + + return nil +} + +// FindExif returns the the segment that hosts the EXIF data (if present). +func (sl *SegmentList) FindExif() (index int, segment *Segment, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + for i, s := range sl.segments { + if s.IsExif() == true { + return i, s, nil + } + } + + return -1, nil, exif.ErrNoExif +} + +// FindXmp returns the the segment that hosts the XMP data (if present). +func (sl *SegmentList) FindXmp() (index int, segment *Segment, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + for i, s := range sl.segments { + if s.IsXmp() == true { + return i, s, nil + } + } + + return -1, nil, ErrNoXmp +} + +// FindIptc returns the the segment that hosts the IPTC data (if present). +func (sl *SegmentList) FindIptc() (index int, segment *Segment, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + for i, s := range sl.segments { + if s.IsIptc() == true { + return i, s, nil + } + } + + return -1, nil, ErrNoIptc +} + +// Exif returns an `exif.Ifd` instance for the EXIF data we currently have. +func (sl *SegmentList) Exif() (rootIfd *exif.Ifd, rawExif []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + _, s, err := sl.FindExif() + log.PanicIf(err) + + rootIfd, rawExif, err = s.Exif() + log.PanicIf(err) + + return rootIfd, rawExif, nil +} + +// Iptc returns embedded IPTC data if present. +func (sl *SegmentList) Iptc() (tags map[iptc.StreamTagKey][]iptc.TagData, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add comment and return data. + + _, s, err := sl.FindIptc() + log.PanicIf(err) + + tags, err = s.Iptc() + log.PanicIf(err) + + return tags, nil +} + +// ConstructExifBuilder returns an `exif.IfdBuilder` instance (needed for +// modifying) preloaded with all existing tags. +func (sl *SegmentList) ConstructExifBuilder() (rootIb *exif.IfdBuilder, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + rootIfd, _, err := sl.Exif() + if log.Is(err, exif.ErrNoExif) == true { + // No EXIF. Just create a boilerplate builder. + + im := exifcommon.NewIfdMapping() + + err := exifcommon.LoadStandardIfds(im) + log.PanicIf(err) + + ti := exif.NewTagIndex() + + rootIb := + exif.NewIfdBuilder( + im, + ti, + exifcommon.IfdStandardIfdIdentity, + exifcommon.EncodeDefaultByteOrder) + + return rootIb, nil + } else if err != nil { + log.Panic(err) + } + + rootIb = exif.NewIfdBuilderFromExistingChain(rootIfd) + + return rootIb, nil +} + +// DumpExif returns an unstructured list of tags (useful when just reviewing). +func (sl *SegmentList) DumpExif() (segmentIndex int, segment *Segment, exifTags []exif.ExifTag, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + segmentIndex, s, err := sl.FindExif() + if err != nil { + if err == exif.ErrNoExif { + return 0, nil, nil, err + } + + log.Panic(err) + } + + exifTags, err = s.FlatExif() + log.PanicIf(err) + + return segmentIndex, s, exifTags, nil +} + +func makeEmptyExifSegment() (s *Segment) { + + // TODO(dustin): Add test + + return &Segment{ + MarkerId: MARKER_APP1, + } +} + +// SetExif encodes and sets EXIF data into the given segment. If `index` is -1, +// append a new segment. +func (sl *SegmentList) SetExif(ib *exif.IfdBuilder) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + _, s, err := sl.FindExif() + if err != nil { + if log.Is(err, exif.ErrNoExif) == false { + log.Panic(err) + } + + s = makeEmptyExifSegment() + + prefix := sl.segments[:1] + + // Install it near the beginning where we know it's safe. We can't + // insert it after the EOI segment, and there might be more than one + // depending on implementation and/or lax adherence to the standard. + tail := append([]*Segment{s}, sl.segments[1:]...) + + sl.segments = append(prefix, tail...) + } + + err = s.SetExif(ib) + log.PanicIf(err) + + return nil +} + +// DropExif will drop the EXIF data if present. +func (sl *SegmentList) DropExif() (wasDropped bool, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // TODO(dustin): Add test + + i, _, err := sl.FindExif() + if err == nil { + // Found. + sl.segments = append(sl.segments[:i], sl.segments[i+1:]...) + + return true, nil + } else if log.Is(err, exif.ErrNoExif) == false { + log.Panic(err) + } + + // Not found. + return false, nil +} + +// Write writes the segment data to the given `io.Writer`. +func (sl *SegmentList) Write(w io.Writer) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + offset := 0 + + for i, s := range sl.segments { + h := sha1.New() + h.Write(s.Data) + + // The scan-data will have a marker-ID of (0) because it doesn't have a + // marker-ID or length. + if s.MarkerId != 0 { + _, err := w.Write([]byte{0xff}) + log.PanicIf(err) + + offset++ + + _, err = w.Write([]byte{s.MarkerId}) + log.PanicIf(err) + + offset++ + + sizeLen, found := markerLen[s.MarkerId] + if found == false || sizeLen == 2 { + sizeLen = 2 + l := uint16(len(s.Data) + sizeLen) + + err = binary.Write(w, binary.BigEndian, &l) + log.PanicIf(err) + + offset += 2 + } else if sizeLen == 4 { + l := uint32(len(s.Data) + sizeLen) + + err = binary.Write(w, binary.BigEndian, &l) + log.PanicIf(err) + + offset += 4 + } else if sizeLen != 0 { + log.Panicf("not a supported marker-size: SEGMENT-INDEX=(%d) MARKER-ID=(0x%02x) MARKER-SIZE-LEN=(%d)", i, s.MarkerId, sizeLen) + } + } + + _, err := w.Write(s.Data) + log.PanicIf(err) + + offset += len(s.Data) + } + + return nil +} diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/splitter.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/splitter.go new file mode 100644 index 000000000..1856beddf --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/splitter.go @@ -0,0 +1,437 @@ +package jpegstructure + +import ( + "bufio" + "bytes" + "io" + + "encoding/binary" + + "github.com/dsoprea/go-logging" +) + +// JpegSplitter uses the Go stream splitter to divide the JPEG stream into +// segments. +type JpegSplitter struct { + lastMarkerId byte + lastMarkerName string + counter int + lastIsScanData bool + visitor interface{} + + currentOffset int + segments *SegmentList + + scandataOffset int +} + +// NewJpegSplitter returns a new JpegSplitter. +func NewJpegSplitter(visitor interface{}) *JpegSplitter { + return &JpegSplitter{ + segments: NewSegmentList(nil), + visitor: visitor, + } +} + +// Segments returns all found segments. +func (js *JpegSplitter) Segments() *SegmentList { + return js.segments +} + +// MarkerId returns the ID of the last processed marker. +func (js *JpegSplitter) MarkerId() byte { + return js.lastMarkerId +} + +// MarkerName returns the name of the last-processed marker. +func (js *JpegSplitter) MarkerName() string { + return js.lastMarkerName +} + +// Counter returns the number of processed segments. +func (js *JpegSplitter) Counter() int { + return js.counter +} + +// IsScanData returns whether the last processed segment was scan-data. +func (js *JpegSplitter) IsScanData() bool { + return js.lastIsScanData +} + +func (js *JpegSplitter) processScanData(data []byte) (advanceBytes int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + // Search through the segment, past all 0xff's therein, until we encounter + // the EOI segment. + + dataLength := -1 + for i := js.scandataOffset; i < len(data); i++ { + thisByte := data[i] + + if i == 0 { + continue + } + + lastByte := data[i-1] + if lastByte != 0xff { + continue + } + + if thisByte == 0x00 || thisByte >= 0xd0 && thisByte <= 0xd8 { + continue + } + + // After all of the other checks, this means that we're on the EOF + // segment. + if thisByte != MARKER_EOI { + continue + } + + dataLength = i - 1 + break + } + + if dataLength == -1 { + // On the next pass, start on the last byte of this pass, just in case + // the first byte of the two-byte sequence is here. + js.scandataOffset = len(data) - 1 + + jpegLogger.Debugf(nil, "Scan-data not fully available (%d).", len(data)) + return 0, nil + } + + js.lastIsScanData = true + js.lastMarkerId = 0 + js.lastMarkerName = "" + + // Note that we don't increment the counter since this isn't an actual + // segment. + + jpegLogger.Debugf(nil, "End of scan-data.") + + err = js.handleSegment(0x0, "!SCANDATA", 0x0, data[:dataLength]) + log.PanicIf(err) + + return dataLength, nil +} + +func (js *JpegSplitter) readSegment(data []byte) (count int, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + if js.counter == 0 { + // Verify magic bytes. + + if len(data) < 3 { + jpegLogger.Debugf(nil, "Not enough (1)") + return 0, nil + } + + if data[0] == jpegMagic2000[0] && data[1] == jpegMagic2000[1] && data[2] == jpegMagic2000[2] { + // TODO(dustin): Revisit JPEG2000 support. + log.Panicf("JPEG2000 not supported") + } + + if data[0] != jpegMagicStandard[0] || data[1] != jpegMagicStandard[1] || data[2] != jpegMagicStandard[2] { + log.Panicf("file does not look like a JPEG: (%02x) (%02x) (%02x)", data[0], data[1], data[2]) + } + } + + chunkLength := len(data) + + jpegLogger.Debugf(nil, "SPLIT: LEN=(%d) COUNTER=(%d)", chunkLength, js.counter) + + if js.scanDataIsNext() == true { + // If the last segment was the SOS, we're currently sitting on scan data. + // Search for the EOI marker afterward in order to know how much data + // there is. Return this as its own token. + // + // REF: https://stackoverflow.com/questions/26715684/parsing-jpeg-sos-marker + + advanceBytes, err := js.processScanData(data) + log.PanicIf(err) + + // This will either return 0 and implicitly request that we need more + // data and then need to run again or will return an actual byte count + // to progress by. + + return advanceBytes, nil + } else if js.lastMarkerId == MARKER_EOI { + // We have more data following the EOI, which is unexpected. There + // might be non-standard cruft at the end of the file. Terminate the + // parse because the file-structure is, technically, complete at this + // point. + + return 0, io.EOF + } else { + js.lastIsScanData = false + } + + // If we're here, we're supposed to be sitting on the 0xff bytes at the + // beginning of a segment (just before the marker). + + if data[0] != 0xff { + log.Panicf("not on new segment marker @ (%d): (%02X)", js.currentOffset, data[0]) + } + + i := 0 + found := false + for ; i < chunkLength; i++ { + jpegLogger.Debugf(nil, "Prefix check: (%d) %02X", i, data[i]) + + if data[i] != 0xff { + found = true + break + } + } + + jpegLogger.Debugf(nil, "Skipped over leading 0xFF bytes: (%d)", i) + + if found == false || i >= chunkLength { + jpegLogger.Debugf(nil, "Not enough (3)") + return 0, nil + } + + markerId := data[i] + + js.lastMarkerName = markerNames[markerId] + + sizeLen, found := markerLen[markerId] + jpegLogger.Debugf(nil, "MARKER-ID=%x SIZELEN=%v FOUND=%v", markerId, sizeLen, found) + + i++ + + b := bytes.NewBuffer(data[i:]) + payloadLength := 0 + + // marker-ID + size => 2 + + headerSize := 2 + sizeLen + + if found == false { + + // It's not one of the static-length markers. Read the length. + // + // The length is an unsigned 16-bit network/big-endian. + + // marker-ID + size => 2 + 2 + headerSize = 2 + 2 + + if i+2 >= chunkLength { + jpegLogger.Debugf(nil, "Not enough (4)") + return 0, nil + } + + l := uint16(0) + err = binary.Read(b, binary.BigEndian, &l) + log.PanicIf(err) + + if l < 2 { + log.Panicf("length of size read for non-special marker (%02x) is unexpectedly less than two.", markerId) + } + + // (l includes the bytes of the length itself.) + payloadLength = int(l) - 2 + jpegLogger.Debugf(nil, "DataLength (dynamically-sized segment): (%d)", payloadLength) + + i += 2 + } else if sizeLen > 0 { + + // Accommodates the non-zero markers in our marker index, which only + // represent J2C extensions. + // + // The length is an unsigned 32-bit network/big-endian. + + // TODO(dustin): !! This needs to be tested, but we need an image. + + if sizeLen != 4 { + log.Panicf("known non-zero marker is not four bytes, which is not currently handled: M=(%x)", markerId) + } + + if i+4 >= chunkLength { + jpegLogger.Debugf(nil, "Not enough (5)") + return 0, nil + } + + l := uint32(0) + err = binary.Read(b, binary.BigEndian, &l) + log.PanicIf(err) + + payloadLength = int(l) - 4 + jpegLogger.Debugf(nil, "DataLength (four-byte-length segment): (%u)", l) + + i += 4 + } + + jpegLogger.Debugf(nil, "PAYLOAD-LENGTH: %d", payloadLength) + + payload := data[i:] + + if payloadLength < 0 { + log.Panicf("payload length less than zero: (%d)", payloadLength) + } + + i += int(payloadLength) + + if i > chunkLength { + jpegLogger.Debugf(nil, "Not enough (6)") + return 0, nil + } + + jpegLogger.Debugf(nil, "Found whole segment.") + + js.lastMarkerId = markerId + + payloadWindow := payload[:payloadLength] + err = js.handleSegment(markerId, js.lastMarkerName, headerSize, payloadWindow) + log.PanicIf(err) + + js.counter++ + + jpegLogger.Debugf(nil, "Returning advance of (%d)", i) + + return i, nil +} + +func (js *JpegSplitter) scanDataIsNext() bool { + return js.lastMarkerId == MARKER_SOS +} + +// Split is the base splitting function that satisfies `bufio.SplitFunc`. +func (js *JpegSplitter) Split(data []byte, atEOF bool) (advance int, token []byte, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + for len(data) > 0 { + currentAdvance, err := js.readSegment(data) + if err != nil { + if err == io.EOF { + // We've encountered an EOI marker. + return 0, nil, err + } + + log.Panic(err) + } + + if currentAdvance == 0 { + if len(data) > 0 && atEOF == true { + // Provide a little context in the error message. + + if js.scanDataIsNext() == true { + // Yes, we've ran into this. + + log.Panicf("scan-data is unbounded; EOI not encountered before EOF") + } else { + log.Panicf("partial segment data encountered before scan-data") + } + } + + // We don't have enough data for another segment. + break + } + + data = data[currentAdvance:] + advance += currentAdvance + } + + return advance, nil, nil +} + +func (js *JpegSplitter) parseSof(data []byte) (sof *SofSegment, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + stream := bytes.NewBuffer(data) + buffer := bufio.NewReader(stream) + + bitsPerSample, err := buffer.ReadByte() + log.PanicIf(err) + + height := uint16(0) + err = binary.Read(buffer, binary.BigEndian, &height) + log.PanicIf(err) + + width := uint16(0) + err = binary.Read(buffer, binary.BigEndian, &width) + log.PanicIf(err) + + componentCount, err := buffer.ReadByte() + log.PanicIf(err) + + sof = &SofSegment{ + BitsPerSample: bitsPerSample, + Width: width, + Height: height, + ComponentCount: componentCount, + } + + return sof, nil +} + +func (js *JpegSplitter) parseAppData(markerId byte, data []byte) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + return nil +} + +func (js *JpegSplitter) handleSegment(markerId byte, markerName string, headerSize int, payload []byte) (err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + cloned := make([]byte, len(payload)) + copy(cloned, payload) + + s := &Segment{ + MarkerId: markerId, + MarkerName: markerName, + Offset: js.currentOffset, + Data: cloned, + } + + jpegLogger.Debugf(nil, "Encountered marker (0x%02x) [%s] at offset (%d)", markerId, markerName, js.currentOffset) + + js.currentOffset += headerSize + len(payload) + + js.segments.Add(s) + + sv, ok := js.visitor.(SegmentVisitor) + if ok == true { + err = sv.HandleSegment(js.lastMarkerId, js.lastMarkerName, js.counter, js.lastIsScanData) + log.PanicIf(err) + } + + if markerId >= MARKER_SOF0 && markerId <= MARKER_SOF15 { + ssv, ok := js.visitor.(SofSegmentVisitor) + if ok == true { + sof, err := js.parseSof(payload) + log.PanicIf(err) + + err = ssv.HandleSof(sof) + log.PanicIf(err) + } + } else if markerId >= MARKER_APP0 && markerId <= MARKER_APP15 { + err := js.parseAppData(markerId, payload) + log.PanicIf(err) + } + + return nil +} diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/testing_common.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/testing_common.go new file mode 100644 index 000000000..e7169c2f0 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/testing_common.go @@ -0,0 +1,73 @@ +package jpegstructure + +import ( + "os" + "path" + + "github.com/dsoprea/go-logging" +) + +var ( + testImageRelFilepath = "NDM_8901.jpg" +) + +var ( + moduleRootPath = "" + assetsPath = "" +) + +// GetModuleRootPath returns the root-path of the module. +func GetModuleRootPath() string { + if moduleRootPath == "" { + moduleRootPath = os.Getenv("JPEG_MODULE_ROOT_PATH") + if moduleRootPath != "" { + return moduleRootPath + } + + currentWd, err := os.Getwd() + log.PanicIf(err) + + currentPath := currentWd + visited := make([]string, 0) + + for { + tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") + + _, err := os.Stat(tryStampFilepath) + if err != nil && os.IsNotExist(err) != true { + log.Panic(err) + } else if err == nil { + break + } + + visited = append(visited, tryStampFilepath) + + currentPath = path.Dir(currentPath) + if currentPath == "/" { + log.Panicf("could not find module-root: %v", visited) + } + } + + moduleRootPath = currentPath + } + + return moduleRootPath +} + +// GetTestAssetsPath returns the path of the test-assets. +func GetTestAssetsPath() string { + if assetsPath == "" { + moduleRootPath := GetModuleRootPath() + assetsPath = path.Join(moduleRootPath, "assets") + } + + return assetsPath +} + +// GetTestImageFilepath returns the file-path of the common test-image. +func GetTestImageFilepath() string { + assetsPath := GetTestAssetsPath() + filepath := path.Join(assetsPath, testImageRelFilepath) + + return filepath +} diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/utility.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/utility.go new file mode 100644 index 000000000..1c618ba6d --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/utility.go @@ -0,0 +1,110 @@ +package jpegstructure + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/dsoprea/go-logging" + "github.com/go-xmlfmt/xmlfmt" +) + +// DumpBytes prints the hex for a given byte-slice. +func DumpBytes(data []byte) { + fmt.Printf("DUMP: ") + for _, x := range data { + fmt.Printf("%02x ", x) + } + + fmt.Printf("\n") +} + +// DumpBytesClause prints a Go-formatted byte-slice expression. +func DumpBytesClause(data []byte) { + fmt.Printf("DUMP: ") + + fmt.Printf("[]byte { ") + + for i, x := range data { + fmt.Printf("0x%02x", x) + + if i < len(data)-1 { + fmt.Printf(", ") + } + } + + fmt.Printf(" }\n") +} + +// DumpBytesToString returns a string of hex-encoded bytes. +func DumpBytesToString(data []byte) string { + b := new(bytes.Buffer) + + for i, x := range data { + _, err := b.WriteString(fmt.Sprintf("%02x", x)) + log.PanicIf(err) + + if i < len(data)-1 { + _, err := b.WriteRune(' ') + log.PanicIf(err) + } + } + + return b.String() +} + +// DumpBytesClauseToString returns a string of Go-formatted byte values. +func DumpBytesClauseToString(data []byte) string { + b := new(bytes.Buffer) + + for i, x := range data { + _, err := b.WriteString(fmt.Sprintf("0x%02x", x)) + log.PanicIf(err) + + if i < len(data)-1 { + _, err := b.WriteString(", ") + log.PanicIf(err) + } + } + + return b.String() +} + +// FormatXml prettifies XML data. +func FormatXml(raw string) (formatted string, err error) { + defer func() { + if state := recover(); state != nil { + err = log.Wrap(state.(error)) + } + }() + + formatted = xmlfmt.FormatXML(raw, " ", " ") + formatted = strings.TrimSpace(formatted) + + return formatted, nil +} + +// SortStringStringMap sorts a string-string dictionary and returns it as a list +// of 2-tuples. +func SortStringStringMap(data map[string]string) (sorted [][2]string) { + // Sort keys. + + sortedKeys := make([]string, len(data)) + i := 0 + for key := range data { + sortedKeys[i] = key + i++ + } + + sort.Strings(sortedKeys) + + // Build result. + + sorted = make([][2]string, len(sortedKeys)) + for i, key := range sortedKeys { + sorted[i] = [2]string{key, data[key]} + } + + return sorted +} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/.MODULE_ROOT b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/.MODULE_ROOT new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE new file mode 100644 index 000000000..163291ed6 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE @@ -0,0 +1,9 @@ +MIT LICENSE + +Copyright 2020 Dustin Oprea + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go new file mode 100644 index 000000000..518bc91ad --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go @@ -0,0 +1,81 @@ +package pngstructure + +import ( + "bytes" + "fmt" + + "encoding/binary" +) + +type ChunkDecoder struct { +} + +func NewChunkDecoder() *ChunkDecoder { + return new(ChunkDecoder) +} + +func (cd *ChunkDecoder) Decode(c *Chunk) (decoded interface{}, err error) { + switch c.Type { + case "IHDR": + return cd.decodeIHDR(c) + } + + // We don't decode this type. + return nil, nil +} + +type ChunkIHDR struct { + Width uint32 + Height uint32 + BitDepth uint8 + ColorType uint8 + CompressionMethod uint8 + FilterMethod uint8 + InterlaceMethod uint8 +} + +func (ihdr *ChunkIHDR) String() string { + return fmt.Sprintf("IHDR", + ihdr.Width, ihdr.Height, ihdr.BitDepth, ihdr.ColorType, ihdr.CompressionMethod, ihdr.FilterMethod, ihdr.InterlaceMethod, + ) +} + +func (cd *ChunkDecoder) decodeIHDR(c *Chunk) (*ChunkIHDR, error) { + var ( + b = bytes.NewBuffer(c.Data) + ihdr = new(ChunkIHDR) + readf = func(data interface{}) error { + return binary.Read(b, binary.BigEndian, data) + } + ) + + if err := readf(&ihdr.Width); err != nil { + return nil, err + } + + if err := readf(&ihdr.Height); err != nil { + return nil, err + } + + if err := readf(&ihdr.BitDepth); err != nil { + return nil, err + } + + if err := readf(&ihdr.ColorType); err != nil { + return nil, err + } + + if err := readf(&ihdr.CompressionMethod); err != nil { + return nil, err + } + + if err := readf(&ihdr.FilterMethod); err != nil { + return nil, err + } + + if err := readf(&ihdr.InterlaceMethod); err != nil { + return nil, err + } + + return ihdr, nil +} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go new file mode 100644 index 000000000..4c8421905 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go @@ -0,0 +1,85 @@ +package pngstructure + +import ( + "bufio" + "bytes" + "image" + "io" + "os" + + "image/png" + + riimage "github.com/dsoprea/go-utility/v2/image" +) + +// PngMediaParser knows how to parse a PNG stream. +type PngMediaParser struct { +} + +// NewPngMediaParser returns a new `PngMediaParser`. +func NewPngMediaParser() riimage.MediaParser { + return new(PngMediaParser) +} + +// Parse parses a PNG stream given a `io.ReadSeeker`. +func (pmp *PngMediaParser) Parse( + rs io.ReadSeeker, + size int, +) (riimage.MediaContext, error) { + ps := NewPngSplitter() + if err := ps.readHeader(rs); err != nil { + return nil, err + } + + s := bufio.NewScanner(rs) + + // Since each segment can be any + // size, our buffer must be allowed + // to grow as large as the file. + buffer := []byte{} + s.Buffer(buffer, size) + s.Split(ps.Split) + + for s.Scan() { + } + + if err := s.Err(); err != nil { + return nil, err + } + + return ps.Chunks() +} + +// ParseFile parses a PNG stream given a file-path. +func (pmp *PngMediaParser) ParseFile(filepath string) (riimage.MediaContext, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer f.Close() + + stat, err := f.Stat() + if err != nil { + return nil, err + } + + size := stat.Size() + return pmp.Parse(f, int(size)) +} + +// ParseBytes parses a PNG stream given a byte-slice. +func (pmp *PngMediaParser) ParseBytes(data []byte) (riimage.MediaContext, error) { + br := bytes.NewReader(data) + return pmp.Parse(br, len(data)) +} + +// LooksLikeFormat returns a boolean indicating +// whether the stream looks like a PNG image. +func (pmp *PngMediaParser) LooksLikeFormat(data []byte) bool { + return bytes.Equal(data[:len(PngSignature)], PngSignature[:]) +} + +// GetImage returns an image.Image-compatible struct. +func (pmp *PngMediaParser) GetImage(r io.Reader) (img image.Image, err error) { + return png.Decode(r) +} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go new file mode 100644 index 000000000..dfe773b71 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go @@ -0,0 +1,386 @@ +package pngstructure + +import ( + "bytes" + "errors" + "fmt" + "io" + + "encoding/binary" + "hash/crc32" + + "github.com/dsoprea/go-exif/v3" + exifcommon "github.com/dsoprea/go-exif/v3/common" + riimage "github.com/dsoprea/go-utility/v2/image" +) + +var ( + PngSignature = [8]byte{137, 'P', 'N', 'G', '\r', '\n', 26, '\n'} + EXifChunkType = "eXIf" + IHDRChunkType = "IHDR" +) + +var ( + ErrNotPng = errors.New("not png data") + ErrCrcFailure = errors.New("crc failure") +) + +// ChunkSlice encapsulates a slice of chunks. +type ChunkSlice struct { + chunks []*Chunk +} + +func NewChunkSlice(chunks []*Chunk) (*ChunkSlice, error) { + if len(chunks) == 0 { + err := errors.New("ChunkSlice must be initialized with at least one chunk (IHDR)") + return nil, err + } else if chunks[0].Type != IHDRChunkType { + err := errors.New("first chunk in any ChunkSlice must be an IHDR") + return nil, err + } + + return &ChunkSlice{chunks}, nil +} + +func NewPngChunkSlice() (*ChunkSlice, error) { + ihdrChunk := &Chunk{ + Type: IHDRChunkType, + } + + ihdrChunk.UpdateCrc32() + + return NewChunkSlice([]*Chunk{ihdrChunk}) +} + +func (cs *ChunkSlice) String() string { + return fmt.Sprintf("ChunkSlize", len(cs.chunks)) +} + +// Chunks exposes the actual slice. +func (cs *ChunkSlice) Chunks() []*Chunk { + return cs.chunks +} + +// Write encodes and writes all chunks. +func (cs *ChunkSlice) WriteTo(w io.Writer) error { + if _, err := w.Write(PngSignature[:]); err != nil { + return err + } + + // TODO(dustin): !! This should respect + // the safe-to-copy characteristic. + for _, c := range cs.chunks { + if _, err := c.WriteTo(w); err != nil { + return err + } + } + + return nil +} + +// Index returns a map of chunk types to chunk slices, grouping all like chunks. +func (cs *ChunkSlice) Index() (index map[string][]*Chunk) { + index = make(map[string][]*Chunk) + for _, c := range cs.chunks { + if grouped, found := index[c.Type]; found { + index[c.Type] = append(grouped, c) + } else { + index[c.Type] = []*Chunk{c} + } + } + + return index +} + +// FindExif returns the the segment that hosts the EXIF data. +func (cs *ChunkSlice) FindExif() (chunk *Chunk, err error) { + index := cs.Index() + if chunks, found := index[EXifChunkType]; found { + return chunks[0], nil + } + + return nil, exif.ErrNoExif +} + +// Exif returns an `exif.Ifd` instance with the existing tags. +func (cs *ChunkSlice) Exif() (*exif.Ifd, []byte, error) { + chunk, err := cs.FindExif() + if err != nil { + return nil, nil, err + } + + im, err := exifcommon.NewIfdMappingWithStandard() + if err != nil { + return nil, nil, err + } + + ti := exif.NewTagIndex() + + _, index, err := exif.Collect(im, ti, chunk.Data) + if err != nil { + return nil, nil, err + } + + return index.RootIfd, chunk.Data, nil +} + +// ConstructExifBuilder returns an `exif.IfdBuilder` instance +// (needed for modifying) preloaded with all existing tags. +func (cs *ChunkSlice) ConstructExifBuilder() (*exif.IfdBuilder, error) { + rootIfd, _, err := cs.Exif() + if err != nil { + return nil, err + } + + return exif.NewIfdBuilderFromExistingChain(rootIfd), nil +} + +// SetExif encodes and sets EXIF data into this segment. +func (cs *ChunkSlice) SetExif(ib *exif.IfdBuilder) error { + // Encode. + + ibe := exif.NewIfdByteEncoder() + + exifData, err := ibe.EncodeToExif(ib) + if err != nil { + return err + } + + // Set. + + exifChunk, err := cs.FindExif() + + switch { + case err == nil: + // EXIF chunk already exists. + exifChunk.Data = exifData + exifChunk.Length = uint32(len(exifData)) + + case errors.Is(err, exif.ErrNoExif): + // Add a EXIF chunk for the first time. + exifChunk = &Chunk{ + Type: EXifChunkType, + Data: exifData, + Length: uint32(len(exifData)), + } + + // Insert exif after the IHDR chunk; it's + // a reliably appropriate place to put it. + cs.chunks = append( + cs.chunks[:1], + append( + []*Chunk{exifChunk}, + cs.chunks[1:]..., + )..., + ) + + default: + return err + } + + exifChunk.UpdateCrc32() + return nil +} + +// PngSplitter hosts the princpal `Split()` +// method uses by `bufio.Scanner`. +type PngSplitter struct { + chunks []*Chunk + currentOffset int + + doCheckCrc bool + crcErrors []string +} + +func (ps *PngSplitter) Chunks() (*ChunkSlice, error) { + return NewChunkSlice(ps.chunks) +} + +func (ps *PngSplitter) DoCheckCrc(doCheck bool) { + ps.doCheckCrc = doCheck +} + +func (ps *PngSplitter) CrcErrors() []string { + return ps.crcErrors +} + +func NewPngSplitter() *PngSplitter { + return &PngSplitter{ + chunks: make([]*Chunk, 0), + doCheckCrc: true, + crcErrors: make([]string, 0), + } +} + +// Chunk describes a single chunk. +type Chunk struct { + Offset int + Length uint32 + Type string + Data []byte + Crc uint32 +} + +func (c *Chunk) String() string { + return fmt.Sprintf("Chunk", c.Offset, c.Length, c.Type, c.Crc) +} + +func calculateCrc32(chunk *Chunk) uint32 { + c := crc32.NewIEEE() + + c.Write([]byte(chunk.Type)) + c.Write(chunk.Data) + + return c.Sum32() +} + +func (c *Chunk) UpdateCrc32() { + c.Crc = calculateCrc32(c) +} + +func (c *Chunk) CheckCrc32() bool { + expected := calculateCrc32(c) + return c.Crc == expected +} + +// Bytes encodes and returns the bytes for this chunk. +func (c *Chunk) Bytes() ([]byte, error) { + if len(c.Data) != int(c.Length) { + return nil, errors.New("length of data not correct") + } + b := make([]byte, 0, 4+4+c.Length+4) + b = binary.BigEndian.AppendUint32(b, c.Length) + b = append(b, c.Type...) + b = append(b, c.Data...) + b = binary.BigEndian.AppendUint32(b, c.Crc) + return b, nil +} + +// Write encodes and writes the bytes for this chunk. +func (c *Chunk) WriteTo(w io.Writer) (int, error) { + if len(c.Data) != int(c.Length) { + return 0, errors.New("length of data not correct") + } + + var n int + + b := make([]byte, 4) // uint32 buf + + binary.BigEndian.PutUint32(b, c.Length) + if nn, err := w.Write(b); err != nil { + return n + nn, err + } + + n += len(b) + + if nn, err := io.WriteString(w, c.Type); err != nil { + return n + nn, err + } + + n += len(c.Type) + + if nn, err := w.Write(c.Data); err != nil { + return n + nn, err + } + + n += len(c.Data) + + binary.BigEndian.PutUint32(b, c.Crc) + if nn, err := w.Write(b); err != nil { + return n + nn, err + } + + n += len(b) + + return n, nil +} + +// readHeader verifies that the PNG header bytes appear next. +func (ps *PngSplitter) readHeader(r io.Reader) error { + var ( + sigLen = len(PngSignature) + header = make([]byte, sigLen) + ) + + if _, err := r.Read(header); err != nil { + return err + } + + ps.currentOffset += sigLen + if !bytes.Equal(header, PngSignature[:]) { + return ErrNotPng + } + + return nil +} + +// Split fulfills the `bufio.SplitFunc` +// function definition for `bufio.Scanner`. +func (ps *PngSplitter) Split( + data []byte, + atEOF bool, +) ( + advance int, + token []byte, + err error, +) { + // We might have more than one chunk's worth, and, + // if `atEOF` is true, we won't be called again. + // We'll repeatedly try to read additional chunks, + // but, when we run out of the data we were given + // then we'll return the number of bytes for the + // chunks we've already completely read. Then, we'll + // be called again from the end ofthose bytes, at + // which point we'll indicate that we don't yet have + // enough for another chunk, and we should be then + // called with more. + for { + len_ := len(data) + if len_ < 8 { + return advance, nil, nil + } + + length := binary.BigEndian.Uint32(data[:4]) + type_ := string(data[4:8]) + chunkSize := (8 + int(length) + 4) + + if len_ < chunkSize { + return advance, nil, nil + } + + crcIndex := 8 + length + crc := binary.BigEndian.Uint32(data[crcIndex : crcIndex+4]) + + content := make([]byte, length) + copy(content, data[8:8+length]) + + c := &Chunk{ + Length: length, + Type: type_, + Data: content, + Crc: crc, + Offset: ps.currentOffset, + } + + ps.chunks = append(ps.chunks, c) + + if !c.CheckCrc32() { + ps.crcErrors = append(ps.crcErrors, type_) + + if ps.doCheckCrc { + err = ErrCrcFailure + return + } + } + + advance += chunkSize + ps.currentOffset += chunkSize + + data = data[chunkSize:] + } +} + +var ( + // Enforce interface conformance. + _ riimage.MediaContext = new(ChunkSlice) +) diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go new file mode 100644 index 000000000..42f28d282 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go @@ -0,0 +1,77 @@ +package pngstructure + +import ( + "fmt" + "os" + "path" +) + +var ( + assetsPath = "assets" +) + +func getModuleRootPath() (string, error) { + moduleRootPath := os.Getenv("PNG_MODULE_ROOT_PATH") + if moduleRootPath != "" { + return moduleRootPath, nil + } + + currentWd, err := os.Getwd() + if err != nil { + return "", err + } + + currentPath := currentWd + visited := make([]string, 0) + + for { + tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") + + _, err := os.Stat(tryStampFilepath) + if err != nil && !os.IsNotExist(err) { + return "", err + } else if err == nil { + break + } + + visited = append(visited, tryStampFilepath) + + currentPath = path.Dir(currentPath) + if currentPath == "/" { + return "", fmt.Errorf("could not find module-root: %v", visited) + } + } + + return currentPath, nil +} + +func getTestAssetsPath() (string, error) { + if assetsPath == "" { + moduleRootPath, err := getModuleRootPath() + if err != nil { + return "", err + } + + assetsPath = path.Join(moduleRootPath, "assets") + } + + return assetsPath, nil +} + +func getTestBasicImageFilepath() (string, error) { + assetsPath, err := getTestAssetsPath() + if err != nil { + return "", err + } + + return path.Join(assetsPath, "libpng.png"), nil +} + +func getTestExifImageFilepath() (string, error) { + assetsPath, err := getTestAssetsPath() + if err != nil { + return "", err + } + + return path.Join(assetsPath, "exif.png"), nil +} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go new file mode 100644 index 000000000..cac6020f2 --- /dev/null +++ b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go @@ -0,0 +1,67 @@ +package pngstructure + +import ( + "bytes" + "fmt" +) + +func DumpBytes(data []byte) { + fmt.Printf("DUMP: ") + for _, x := range data { + fmt.Printf("%02x ", x) + } + + fmt.Printf("\n") +} + +func DumpBytesClause(data []byte) { + fmt.Printf("DUMP: ") + + fmt.Printf("[]byte { ") + + for i, x := range data { + fmt.Printf("0x%02x", x) + + if i < len(data)-1 { + fmt.Printf(", ") + } + } + + fmt.Printf(" }\n") +} + +func DumpBytesToString(data []byte) (string, error) { + b := new(bytes.Buffer) + + for i, x := range data { + if _, err := b.WriteString(fmt.Sprintf("%02x", x)); err != nil { + return "", err + } + + if i < len(data)-1 { + if _, err := b.WriteRune(' '); err != nil { + return "", err + } + } + } + + return b.String(), nil +} + +func DumpBytesClauseToString(data []byte) (string, error) { + b := new(bytes.Buffer) + + for i, x := range data { + if _, err := b.WriteString(fmt.Sprintf("0x%02x", x)); err != nil { + return "", err + } + + if i < len(data)-1 { + if _, err := b.WriteString(", "); err != nil { + return "", err + } + } + } + + return b.String(), nil +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..cf66309c4 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 000000000..0c1b86793 --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, f +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, f +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 000000000..e31e35a90 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 000000000..065ff3dfa --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 000000000..ec5a63803 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/modules.txt b/vendor/modules.txt index a2ffe58bc..fb6f64486 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -79,6 +79,9 @@ codeberg.org/gruf/go-storage/s3 # codeberg.org/gruf/go-structr v0.8.7 ## explicit; go 1.21 codeberg.org/gruf/go-structr +# codeberg.org/superseriousbusiness/exif-terminator v0.9.0 +## explicit; go 1.21 +codeberg.org/superseriousbusiness/exif-terminator # github.com/DmitriyVTitov/size v1.5.0 ## explicit; go 1.14 github.com/DmitriyVTitov/size @@ -179,6 +182,24 @@ github.com/disintegration/imaging # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units +# github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b +## explicit; go 1.12 +github.com/dsoprea/go-exif/v3 +github.com/dsoprea/go-exif/v3/common +github.com/dsoprea/go-exif/v3/undefined +# github.com/dsoprea/go-iptc v0.0.0-20200609062250-162ae6b44feb +## explicit; go 1.13 +github.com/dsoprea/go-iptc +# github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd +## explicit; go 1.13 +github.com/dsoprea/go-logging +# github.com/dsoprea/go-photoshop-info-format v0.0.0-20200609050348-3db9b63b202c +## explicit; go 1.13 +github.com/dsoprea/go-photoshop-info-format +# github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e +## explicit; go 1.12 +github.com/dsoprea/go-utility/v2/filesystem +github.com/dsoprea/go-utility/v2/image # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize @@ -214,6 +235,9 @@ github.com/gin-gonic/gin/binding github.com/gin-gonic/gin/internal/bytesconv github.com/gin-gonic/gin/internal/json github.com/gin-gonic/gin/render +# github.com/go-errors/errors v1.1.1 +## explicit; go 1.14 +github.com/go-errors/errors # github.com/go-fed/httpsig v1.1.0 ## explicit; go 1.13 github.com/go-fed/httpsig @@ -301,6 +325,9 @@ github.com/go-swagger/go-swagger/cmd/swagger/commands/generate github.com/go-swagger/go-swagger/cmd/swagger/commands/initcmd github.com/go-swagger/go-swagger/codescan github.com/go-swagger/go-swagger/generator +# github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b +## explicit +github.com/go-xmlfmt/xmlfmt # github.com/goccy/go-json v0.10.3 ## explicit; go 1.19 github.com/goccy/go-json @@ -318,6 +345,13 @@ github.com/godbus/dbus/v5 # github.com/golang-jwt/jwt v3.2.2+incompatible ## explicit github.com/golang-jwt/jwt +# github.com/golang/geo v0.0.0-20200319012246-673a6f80352d +## explicit; go 1.12 +github.com/golang/geo/r1 +github.com/golang/geo/r2 +github.com/golang/geo/r3 +github.com/golang/geo/s1 +github.com/golang/geo/s2 # github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 github.com/google/go-cmp/cmp @@ -778,6 +812,12 @@ github.com/superseriousbusiness/activity/streams/values/rfc2045 github.com/superseriousbusiness/activity/streams/values/rfc5988 github.com/superseriousbusiness/activity/streams/values/string github.com/superseriousbusiness/activity/streams/vocab +# github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe +## explicit; go 1.17 +github.com/superseriousbusiness/go-jpeg-image-structure/v2 +# github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB +## explicit; go 1.12 +github.com/superseriousbusiness/go-png-image-structure/v2 # github.com/superseriousbusiness/httpsig v1.2.0-SSB ## explicit; go 1.21 github.com/superseriousbusiness/httpsig @@ -1059,6 +1099,7 @@ golang.org/x/mod/semver # golang.org/x/net v0.27.0 ## explicit; go 1.18 golang.org/x/net/bpf +golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/http/httpguts