mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-12-25 03:32:12 +00:00
Grand test fixup (#138)
* start fixing up tests * fix up tests + automate with drone * fiddle with linting * messing about with drone.yml * some more fiddling * hmmm * add cache * add vendor directory * verbose * ci updates * update some little things * update sig
This commit is contained in:
parent
329a5e8144
commit
98263a7de6
45
.drone.yml
45
.drone.yml
|
@ -1,13 +1,37 @@
|
||||||
|
---
|
||||||
|
### Drone configuration file for GoToSocial.
|
||||||
|
### Connects to https://drone.superseriousbusiness.org to perform testing, linting, and automatic builds/pushes to docker.
|
||||||
|
###
|
||||||
|
### For documentation on drone, see: https://docs.drone.io/
|
||||||
|
### For documentation on drone docker pipelines in particular: https://docs.drone.io/pipeline/docker/overview/
|
||||||
|
|
||||||
kind: pipeline
|
kind: pipeline
|
||||||
type: docker
|
type: docker
|
||||||
name: dockerpublish
|
name: default
|
||||||
steps:
|
steps:
|
||||||
- name: publish image
|
|
||||||
|
# We use golangci-lint for linting.
|
||||||
|
# See: https://golangci-lint.run/
|
||||||
|
- name: lint
|
||||||
|
image: golangci/golangci-lint:v1.41.1
|
||||||
|
commands:
|
||||||
|
- golangci-lint run --timeout 5m0s --tests=false --verbose
|
||||||
|
|
||||||
|
- name: test
|
||||||
|
image: golang:1.16.4
|
||||||
|
environment:
|
||||||
|
GTS_DB_ADDRESS: postgres
|
||||||
|
commands:
|
||||||
|
# `-count 1` => run all tests at least once
|
||||||
|
# `-p 1` => run maximum one test at a time
|
||||||
|
# `./...` => run all tests
|
||||||
|
- go test -count 1 -p 1 ./...
|
||||||
|
|
||||||
|
- name: publish
|
||||||
image: plugins/docker
|
image: plugins/docker
|
||||||
settings:
|
settings:
|
||||||
auto_tag: true
|
auto_tag: true
|
||||||
username:
|
username: gotosocial
|
||||||
from_secret: gts_docker_username
|
|
||||||
password:
|
password:
|
||||||
from_secret: gts_docker_password
|
from_secret: gts_docker_password
|
||||||
repo: superseriousbusiness/gotosocial
|
repo: superseriousbusiness/gotosocial
|
||||||
|
@ -16,3 +40,16 @@ steps:
|
||||||
event:
|
event:
|
||||||
exclude:
|
exclude:
|
||||||
- pull_request
|
- pull_request
|
||||||
|
|
||||||
|
services:
|
||||||
|
# We need this postgres service running for the test step.
|
||||||
|
# See: https://docs.drone.io/pipeline/docker/syntax/services/
|
||||||
|
- name: postgres
|
||||||
|
image: postgres
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
---
|
||||||
|
kind: signature
|
||||||
|
hmac: 78dd20d97444a9e2904552d56eb52f43ad30ba27e1d897a5ea6808971f9a0ae2
|
||||||
|
|
||||||
|
...
|
||||||
|
|
37
.github/workflows/golangci-lint.yml
vendored
37
.github/workflows/golangci-lint.yml
vendored
|
@ -1,37 +0,0 @@
|
||||||
name: golangci-lint
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- v*
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
jobs:
|
|
||||||
golangci:
|
|
||||||
name: lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v2
|
|
||||||
with:
|
|
||||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
|
||||||
version: v1.29
|
|
||||||
|
|
||||||
# Optional: working directory, useful for monorepos
|
|
||||||
# working-directory: somedir
|
|
||||||
|
|
||||||
# Optional: golangci-lint command line arguments.
|
|
||||||
# args: --issues-exit-code=0
|
|
||||||
|
|
||||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
|
||||||
# only-new-issues: true
|
|
||||||
|
|
||||||
# Optional: if set to true then the action will use pre-installed Go.
|
|
||||||
# skip-go-installation: true
|
|
||||||
|
|
||||||
# Optional: if set to true then the action don't cache or restore ~/go/pkg.
|
|
||||||
# skip-pkg-cache: true
|
|
||||||
|
|
||||||
# Optional: if set to true then the action don't cache or restore ~/.cache/go-build.
|
|
||||||
# skip-build-cache: true
|
|
|
@ -10,9 +10,7 @@ Check the [issues](https://github.com/superseriousbusiness/gotosocial/issues) to
|
||||||
|
|
||||||
## Communications
|
## Communications
|
||||||
|
|
||||||
Before starting on something, please comment on an issue to say that you're working on it, and send a message to `@dumpsterqueer@ondergrond.org` (Mastodon) to let them know.
|
Before starting on something, please comment on an issue to say that you're working on it, and/or drop into the GoToSocial Matrix room [here](https://matrix.to/#/#gotosocial:superseriousbusiness.org).
|
||||||
|
|
||||||
You can also drop into the GoToSocial Matrix room [here](https://matrix.to/#/!mdShFtfScQvVSmjIKX:ondergrond.org?via=ondergrond.org).
|
|
||||||
|
|
||||||
This is the recommended way of keeping in touch with other developers, asking direct questions about code, and letting everyone know what you're up to.
|
This is the recommended way of keeping in touch with other developers, asking direct questions about code, and letting everyone know what you're up to.
|
||||||
|
|
||||||
|
@ -36,6 +34,38 @@ If there are no errors, great, you're good to go!
|
||||||
|
|
||||||
To work with the stylesheet for templates, you need [Node.js](https://nodejs.org/en/download/), then run `yarn install` in `web/source/`. Recompiling the bundle.css is `node build.js` but can be automated with [nodemon](https://www.npmjs.com/package/nodemon) on file change: `nodemon -w style.css build.js`.
|
To work with the stylesheet for templates, you need [Node.js](https://nodejs.org/en/download/), then run `yarn install` in `web/source/`. Recompiling the bundle.css is `node build.js` but can be automated with [nodemon](https://www.npmjs.com/package/nodemon) on file change: `nodemon -w style.css build.js`.
|
||||||
|
|
||||||
|
### Golang forking quirks
|
||||||
|
|
||||||
|
One of the quirks of Golang is that it relies on the source management path being the same as the one used within `go.mod` and in package imports within individual Go files. This makes working with forks a bit awkward.
|
||||||
|
|
||||||
|
Let's say you fork GoToSocial to `github.com/yourgithubname/gotosocial`, and then clone that repository to `~/go/src/github.com/yourgithubname/gotosocial`. You will probably run into errors trying to run tests or build, so you might change your `go.mod` file so that the module is called `github.com/yourgithubname/gotosocial` instead of `github.com/superseriousbusiness/gotosocial`. But then this breaks all the imports within the project. Nightmare! So now you have to go through the source files and painstakingly replace `github.com/superseriousbusiness/gotosocial` with `github.com/yourgithubname/gotosocial`. This works OK, but when you decide to make a pull request against the original repo, all the changed paths are included! Argh!
|
||||||
|
|
||||||
|
The correct solution to this is to fork, then clone the upstream repository, then set `origin` of the upstream repository to that of your fork.
|
||||||
|
|
||||||
|
See [this blogpost](https://blog.sgmansfield.com/2016/06/working-with-forks-in-go/) for more details.
|
||||||
|
|
||||||
|
In case this post disappears, here are the steps (slightly modified):
|
||||||
|
|
||||||
|
>
|
||||||
|
> Pull the original package from the canonical place with the standard go get command:
|
||||||
|
>
|
||||||
|
> `go get github.com/superseriousbusiness/gotosocial`
|
||||||
|
>
|
||||||
|
> Fork the repository on Github or set up whatever other remote git repo you will be using. In this case, I would go to Github and fork the repository.
|
||||||
|
>
|
||||||
|
> Navigate to the top level of the repository on your computer. Note that this might not be the specific package you’re using:
|
||||||
|
>
|
||||||
|
> `cd $GOPATH/src/github.com/superseriousbusiness/gotosocial`
|
||||||
|
>
|
||||||
|
> Rename the current origin remote to upstream:
|
||||||
|
>
|
||||||
|
> `git remote rename origin upstream`
|
||||||
|
>
|
||||||
|
> Add your fork as origin:
|
||||||
|
>
|
||||||
|
> `git remote add origin git@github.com/yourgithubname/gotosocial`
|
||||||
|
>
|
||||||
|
|
||||||
## Setting up your test environment
|
## Setting up your test environment
|
||||||
|
|
||||||
GoToSocial provides a [testrig](https://github.com/superseriousbusiness/gotosocial/tree/main/testrig) with a bunch of mock packages you can use in integration tests.
|
GoToSocial provides a [testrig](https://github.com/superseriousbusiness/gotosocial/tree/main/testrig) with a bunch of mock packages you can use in integration tests.
|
||||||
|
|
|
@ -12,8 +12,11 @@ ADD cmd /go/src/github.com/superseriousbusiness/gotosocial/cmd
|
||||||
ADD internal /go/src/github.com/superseriousbusiness/gotosocial/internal
|
ADD internal /go/src/github.com/superseriousbusiness/gotosocial/internal
|
||||||
ADD testrig /go/src/github.com/superseriousbusiness/gotosocial/testrig
|
ADD testrig /go/src/github.com/superseriousbusiness/gotosocial/testrig
|
||||||
ADD docs/swagger.go /go/src/github.com/superseriousbusiness/gotosocial/docs/swagger.go
|
ADD docs/swagger.go /go/src/github.com/superseriousbusiness/gotosocial/docs/swagger.go
|
||||||
|
|
||||||
|
# dependencies and vendor
|
||||||
ADD go.mod /go/src/github.com/superseriousbusiness/gotosocial/go.mod
|
ADD go.mod /go/src/github.com/superseriousbusiness/gotosocial/go.mod
|
||||||
ADD go.sum /go/src/github.com/superseriousbusiness/gotosocial/go.sum
|
ADD go.sum /go/src/github.com/superseriousbusiness/gotosocial/go.sum
|
||||||
|
ADD vendor /go/src/github.com/superseriousbusiness/gotosocial/vendor
|
||||||
|
|
||||||
# move .git dir and version for versioning
|
# move .git dir and version for versioning
|
||||||
ADD .git /go/src/github.com/superseriousbusiness/gotosocial/.git
|
ADD .git /go/src/github.com/superseriousbusiness/gotosocial/.git
|
||||||
|
|
|
@ -107,6 +107,10 @@ Proper documentation for running and maintaining GoToSocial will be forthcoming
|
||||||
|
|
||||||
For now (if you want to run it pre-alpha, like a beast), check out the [quick and dirty getting started guide](https://docs.gotosocial.org/en/latest/installation_guide/quick_and_dirty/).
|
For now (if you want to run it pre-alpha, like a beast), check out the [quick and dirty getting started guide](https://docs.gotosocial.org/en/latest/installation_guide/quick_and_dirty/).
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
You wanna contribute to GtS? Great! ❤️❤️❤️ Check out the issues page to see if there's anything you wanna jump in on, and read the [CONTRIBUTING.md](./CONTRIBUTING.md) file for guidelines and setting up your dev environment.
|
||||||
|
|
||||||
## Contact
|
## Contact
|
||||||
|
|
||||||
For questions and comments, you can [join our Matrix channel](https://matrix.to/#/#gotosocial:superseriousbusiness.org) at `#gotosocial:superseriousbusiness.org`. This is the quickest way to reach the devs. You can also mail [admin@gotosocial.org](mailto:admin@gotosocial.org).
|
For questions and comments, you can [join our Matrix channel](https://matrix.to/#/#gotosocial:superseriousbusiness.org) at `#gotosocial:superseriousbusiness.org`. This is the quickest way to reach the devs. You can also mail [admin@gotosocial.org](mailto:admin@gotosocial.org).
|
||||||
|
|
|
@ -156,7 +156,7 @@ func (suite *MediaCreateTestSuite) TestStatusCreatePOSTImageHandlerSuccessful()
|
||||||
}
|
}
|
||||||
|
|
||||||
// check response
|
// check response
|
||||||
suite.EqualValues(http.StatusAccepted, recorder.Code)
|
suite.EqualValues(http.StatusOK, recorder.Code)
|
||||||
|
|
||||||
result := recorder.Result()
|
result := recorder.Result()
|
||||||
defer result.Body.Close()
|
defer result.Body.Close()
|
||||||
|
|
|
@ -96,7 +96,7 @@ func (suite *StatusCreateTestSuite) TestPostNewStatus() {
|
||||||
"status": {"this is a brand new status! #helloworld"},
|
"status": {"this is a brand new status! #helloworld"},
|
||||||
"spoiler_text": {"hello hello"},
|
"spoiler_text": {"hello hello"},
|
||||||
"sensitive": {"true"},
|
"sensitive": {"true"},
|
||||||
"visibility_advanced": {"mutuals_only"},
|
"visibility": {string(model.VisibilityMutualsOnly)},
|
||||||
"likeable": {"false"},
|
"likeable": {"false"},
|
||||||
"replyable": {"false"},
|
"replyable": {"false"},
|
||||||
"federated": {"false"},
|
"federated": {"false"},
|
||||||
|
@ -120,7 +120,7 @@ func (suite *StatusCreateTestSuite) TestPostNewStatus() {
|
||||||
assert.Equal(suite.T(), "hello hello", statusReply.SpoilerText)
|
assert.Equal(suite.T(), "hello hello", statusReply.SpoilerText)
|
||||||
assert.Equal(suite.T(), "<p>this is a brand new status! <a href=\"http://localhost:8080/tags/helloworld\" class=\"mention hashtag\" rel=\"tag nofollow noreferrer noopener\" target=\"_blank\">#<span>helloworld</span></a></p>", statusReply.Content)
|
assert.Equal(suite.T(), "<p>this is a brand new status! <a href=\"http://localhost:8080/tags/helloworld\" class=\"mention hashtag\" rel=\"tag nofollow noreferrer noopener\" target=\"_blank\">#<span>helloworld</span></a></p>", statusReply.Content)
|
||||||
assert.True(suite.T(), statusReply.Sensitive)
|
assert.True(suite.T(), statusReply.Sensitive)
|
||||||
assert.Equal(suite.T(), model.VisibilityPrivate, statusReply.Visibility)
|
assert.Equal(suite.T(), model.VisibilityPrivate, statusReply.Visibility) // even though we set this status to mutuals only, it should serialize to private, because masto has no idea about mutuals_only
|
||||||
assert.Len(suite.T(), statusReply.Tags, 1)
|
assert.Len(suite.T(), statusReply.Tags, 1)
|
||||||
assert.Equal(suite.T(), model.Tag{
|
assert.Equal(suite.T(), model.Tag{
|
||||||
Name: "helloworld",
|
Name: "helloworld",
|
||||||
|
@ -161,13 +161,11 @@ func (suite *StatusCreateTestSuite) TestPostAnotherNewStatus() {
|
||||||
b, err := ioutil.ReadAll(result.Body)
|
b, err := ioutil.ReadAll(result.Body)
|
||||||
assert.NoError(suite.T(), err)
|
assert.NoError(suite.T(), err)
|
||||||
|
|
||||||
fmt.Println(string(b))
|
|
||||||
|
|
||||||
statusReply := &model.Status{}
|
statusReply := &model.Status{}
|
||||||
err = json.Unmarshal(b, statusReply)
|
err = json.Unmarshal(b, statusReply)
|
||||||
assert.NoError(suite.T(), err)
|
assert.NoError(suite.T(), err)
|
||||||
|
|
||||||
assert.Equal(suite.T(), "<p><a href=\"http://localhost:8080/tags/test\" class=\"mention hashtag\" rel=\"tag nofollow noreferrer noopener\" target=\"_blank\">#<span>test</span></a> alright, should be able to post <a href=\"http://localhost:8080/tags/links\" class=\"mention hashtag\" rel=\"tag nofollow noreferrer noopener\" target=\"_blank\">#<span>links</span></a> with fragments in them now, let's see........<br/><br/><a href=\"https://docs.gotosocial.org/en/latest/user_guide/posts/#links\" rel=\"noopener nofollow noreferrer\" target=\"_blank\">docs.gotosocial.org/en/latest/user_guide/posts/#links</a><br/><a href=\"http://localhost:8080/tags/gotosocial\" class=\"mention hashtag\" rel=\"tag nofollow noreferrer noopener\" target=\"_blank\">#<span>gotosocial</span></a><br/><br/>(tobi remember to pull the docker image challenge)</p>", statusReply.Content)
|
assert.Equal(suite.T(), "\u003cp\u003e\u003ca href=\"http://localhost:8080/tags/test\" class=\"mention hashtag\" rel=\"tag nofollow noreferrer noopener\" target=\"_blank\"\u003e#\u003cspan\u003etest\u003c/span\u003e\u003c/a\u003e alright, should be able to post \u003ca href=\"http://localhost:8080/tags/links\" class=\"mention hashtag\" rel=\"tag nofollow noreferrer noopener\" target=\"_blank\"\u003e#\u003cspan\u003elinks\u003c/span\u003e\u003c/a\u003e with fragments in them now, let\u0026#39;s see........\u003cbr/\u003e\u003cbr/\u003e\u003ca href=\"https://docs.gotosocial.org/en/latest/user_guide/posts/#links\" rel=\"noopener nofollow noreferrer\" target=\"_blank\"\u003edocs.gotosocial.org/en/latest/user_guide/posts/#links\u003c/a\u003e\u003cbr/\u003e\u003cbr/\u003e\u003ca href=\"http://localhost:8080/tags/gotosocial\" class=\"mention hashtag\" rel=\"tag nofollow noreferrer noopener\" target=\"_blank\"\u003e#\u003cspan\u003egotosocial\u003c/span\u003e\u003c/a\u003e\u003cbr/\u003e\u003cbr/\u003e(tobi remember to pull the docker image challenge)\u003c/p\u003e", statusReply.Content)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *StatusCreateTestSuite) TestPostNewStatusWithEmoji() {
|
func (suite *StatusCreateTestSuite) TestPostNewStatusWithEmoji() {
|
||||||
|
|
|
@ -19,18 +19,13 @@
|
||||||
package federation_test
|
package federation_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-fed/activity/pub"
|
"github.com/go-fed/activity/pub"
|
||||||
|
"github.com/go-fed/httpsig"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
@ -117,63 +112,31 @@ func (suite *ProtocolTestSuite) TestAuthenticatePostInbox() {
|
||||||
sendingAccount := suite.accounts["remote_account_1"]
|
sendingAccount := suite.accounts["remote_account_1"]
|
||||||
inboxAccount := suite.accounts["local_account_1"]
|
inboxAccount := suite.accounts["local_account_1"]
|
||||||
|
|
||||||
encodedPublicKey, err := x509.MarshalPKIXPublicKey(sendingAccount.PublicKey)
|
tc := testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil), suite.db)
|
||||||
assert.NoError(suite.T(), err)
|
|
||||||
publicKeyBytes := pem.EncodeToMemory(&pem.Block{
|
|
||||||
Type: "PUBLIC KEY",
|
|
||||||
Bytes: encodedPublicKey,
|
|
||||||
})
|
|
||||||
publicKeyString := strings.ReplaceAll(string(publicKeyBytes), "\n", "\\n")
|
|
||||||
|
|
||||||
// for this test we need the client to return the public key of the activity creator on the 'remote' instance
|
|
||||||
responseBodyString := fmt.Sprintf(`
|
|
||||||
{
|
|
||||||
"@context": [
|
|
||||||
"https://www.w3.org/ns/activitystreams",
|
|
||||||
"https://w3id.org/security/v1"
|
|
||||||
],
|
|
||||||
|
|
||||||
"id": "%s",
|
|
||||||
"type": "Person",
|
|
||||||
"preferredUsername": "%s",
|
|
||||||
"inbox": "%s",
|
|
||||||
|
|
||||||
"publicKey": {
|
|
||||||
"id": "%s",
|
|
||||||
"owner": "%s",
|
|
||||||
"publicKeyPem": "%s"
|
|
||||||
}
|
|
||||||
}`, sendingAccount.URI, sendingAccount.Username, sendingAccount.InboxURI, sendingAccount.PublicKeyURI, sendingAccount.URI, publicKeyString)
|
|
||||||
|
|
||||||
// create a transport controller whose client will just return the response body string we specified above
|
|
||||||
tc := testrig.NewTestTransportController(testrig.NewMockHTTPClient(func(req *http.Request) (*http.Response, error) {
|
|
||||||
r := ioutil.NopCloser(bytes.NewReader([]byte(responseBodyString)))
|
|
||||||
return &http.Response{
|
|
||||||
StatusCode: 200,
|
|
||||||
Body: r,
|
|
||||||
}, nil
|
|
||||||
}), suite.db)
|
|
||||||
|
|
||||||
// now setup module being tested, with the mock transport controller
|
// now setup module being tested, with the mock transport controller
|
||||||
federator := federation.NewFederator(suite.db, testrig.NewTestFederatingDB(suite.db), tc, suite.config, suite.log, suite.typeConverter, testrig.NewTestMediaHandler(suite.db, suite.storage))
|
federator := federation.NewFederator(suite.db, testrig.NewTestFederatingDB(suite.db), tc, suite.config, suite.log, suite.typeConverter, testrig.NewTestMediaHandler(suite.db, suite.storage))
|
||||||
|
|
||||||
// setup request
|
request := httptest.NewRequest(http.MethodPost, "http://localhost:8080/users/the_mighty_zork/inbox", nil)
|
||||||
|
// we need these headers for the request to be validated
|
||||||
|
request.Header.Set("Signature", activity.SignatureHeader)
|
||||||
|
request.Header.Set("Date", activity.DateHeader)
|
||||||
|
request.Header.Set("Digest", activity.DigestHeader)
|
||||||
|
|
||||||
|
verifier, err := httpsig.NewVerifier(request)
|
||||||
|
assert.NoError(suite.T(), err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
// by the time AuthenticatePostInbox is called, PostInboxRequestBodyHook should have already been called,
|
// by the time AuthenticatePostInbox is called, PostInboxRequestBodyHook should have already been called,
|
||||||
// which should have set the account and username onto the request. We can replicate that behavior here:
|
// which should have set the account and username onto the request. We can replicate that behavior here:
|
||||||
ctxWithAccount := context.WithValue(ctx, util.APAccount, inboxAccount)
|
ctxWithAccount := context.WithValue(ctx, util.APAccount, inboxAccount)
|
||||||
ctxWithActivity := context.WithValue(ctxWithAccount, util.APActivity, activity)
|
ctxWithActivity := context.WithValue(ctxWithAccount, util.APActivity, activity)
|
||||||
|
ctxWithVerifier := context.WithValue(ctxWithActivity, util.APRequestingPublicKeyVerifier, verifier)
|
||||||
|
|
||||||
request := httptest.NewRequest(http.MethodPost, "http://localhost:8080/users/the_mighty_zork/inbox", nil) // the endpoint we're hitting
|
|
||||||
// we need these headers for the request to be validated
|
|
||||||
request.Header.Set("Signature", activity.SignatureHeader)
|
|
||||||
request.Header.Set("Date", activity.DateHeader)
|
|
||||||
request.Header.Set("Digest", activity.DigestHeader)
|
|
||||||
// we can pass this recorder as a writer and read it back after
|
// we can pass this recorder as a writer and read it back after
|
||||||
recorder := httptest.NewRecorder()
|
recorder := httptest.NewRecorder()
|
||||||
|
|
||||||
// trigger the function being tested, and return the new context it creates
|
// trigger the function being tested, and return the new context it creates
|
||||||
newContext, authed, err := federator.AuthenticatePostInbox(ctxWithActivity, recorder, request)
|
newContext, authed, err := federator.AuthenticatePostInbox(ctxWithVerifier, recorder, request)
|
||||||
assert.NoError(suite.T(), err)
|
assert.NoError(suite.T(), err)
|
||||||
assert.True(suite.T(), authed)
|
assert.True(suite.T(), authed)
|
||||||
|
|
||||||
|
|
|
@ -21,12 +21,10 @@
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db/pg"
|
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||||
"github.com/superseriousbusiness/oauth2/v4/models"
|
"github.com/superseriousbusiness/oauth2/v4/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -43,7 +41,7 @@ type PgClientStoreTestSuite struct {
|
||||||
|
|
||||||
// SetupSuite sets some variables on the suite that we can use as consts (more or less) throughout
|
// SetupSuite sets some variables on the suite that we can use as consts (more or less) throughout
|
||||||
func (suite *PgClientStoreTestSuite) SetupSuite() {
|
func (suite *PgClientStoreTestSuite) SetupSuite() {
|
||||||
suite.testClientID = "test-client-id"
|
suite.testClientID = "01FCVB74EW6YBYAEY7QG9CQQF6"
|
||||||
suite.testClientSecret = "test-client-secret"
|
suite.testClientSecret = "test-client-secret"
|
||||||
suite.testClientDomain = "https://example.org"
|
suite.testClientDomain = "https://example.org"
|
||||||
suite.testClientUserID = "test-client-user-id"
|
suite.testClientUserID = "test-client-user-id"
|
||||||
|
@ -51,50 +49,13 @@ func (suite *PgClientStoreTestSuite) SetupSuite() {
|
||||||
|
|
||||||
// SetupTest creates a postgres connection and creates the oauth_clients table before each test
|
// SetupTest creates a postgres connection and creates the oauth_clients table before each test
|
||||||
func (suite *PgClientStoreTestSuite) SetupTest() {
|
func (suite *PgClientStoreTestSuite) SetupTest() {
|
||||||
log := logrus.New()
|
suite.db = testrig.NewTestDB()
|
||||||
log.SetLevel(logrus.TraceLevel)
|
testrig.StandardDBSetup(suite.db, nil)
|
||||||
c := config.Empty()
|
|
||||||
c.DBConfig = &config.DBConfig{
|
|
||||||
Type: "postgres",
|
|
||||||
Address: "localhost",
|
|
||||||
Port: 5432,
|
|
||||||
User: "postgres",
|
|
||||||
Password: "postgres",
|
|
||||||
Database: "postgres",
|
|
||||||
ApplicationName: "gotosocial",
|
|
||||||
}
|
|
||||||
db, err := pg.NewPostgresService(context.Background(), c, log)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Panicf("error creating database connection: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
suite.db = db
|
|
||||||
|
|
||||||
models := []interface{}{
|
|
||||||
&oauth.Client{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, m := range models {
|
|
||||||
if err := suite.db.CreateTable(m); err != nil {
|
|
||||||
logrus.Panicf("db connection error: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TearDownTest drops the oauth_clients table and closes the pg connection after each test
|
// TearDownTest drops the oauth_clients table and closes the pg connection after each test
|
||||||
func (suite *PgClientStoreTestSuite) TearDownTest() {
|
func (suite *PgClientStoreTestSuite) TearDownTest() {
|
||||||
models := []interface{}{
|
testrig.StandardDBTeardown(suite.db)
|
||||||
&oauth.Client{},
|
|
||||||
}
|
|
||||||
for _, m := range models {
|
|
||||||
if err := suite.db.DropTable(m); err != nil {
|
|
||||||
logrus.Panicf("error dropping table: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := suite.db.Stop(context.Background()); err != nil {
|
|
||||||
logrus.Panicf("error closing db connection: %s", err)
|
|
||||||
}
|
|
||||||
suite.db = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *PgClientStoreTestSuite) TestClientStoreSetAndGet() {
|
func (suite *PgClientStoreTestSuite) TestClientStoreSetAndGet() {
|
||||||
|
|
|
@ -25,7 +25,6 @@
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-fed/activity/streams"
|
"github.com/go-fed/activity/streams"
|
||||||
"github.com/go-fed/activity/streams/vocab"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/ap"
|
"github.com/superseriousbusiness/gotosocial/internal/ap"
|
||||||
|
@ -375,62 +374,6 @@ func (suite *ASToInternalTestSuite) TestParseGargron() {
|
||||||
// TODO: write assertions here, rn we're just eyeballing the output
|
// TODO: write assertions here, rn we're just eyeballing the output
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *ASToInternalTestSuite) TestParseStatus() {
|
|
||||||
m := make(map[string]interface{})
|
|
||||||
err := json.Unmarshal([]byte(statusWithEmojisAndTagsAsActivityJson), &m)
|
|
||||||
assert.NoError(suite.T(), err)
|
|
||||||
|
|
||||||
t, err := streams.ToType(context.Background(), m)
|
|
||||||
assert.NoError(suite.T(), err)
|
|
||||||
|
|
||||||
create, ok := t.(vocab.ActivityStreamsCreate)
|
|
||||||
assert.True(suite.T(), ok)
|
|
||||||
|
|
||||||
obj := create.GetActivityStreamsObject()
|
|
||||||
assert.NotNil(suite.T(), obj)
|
|
||||||
|
|
||||||
first := obj.Begin()
|
|
||||||
assert.NotNil(suite.T(), first)
|
|
||||||
|
|
||||||
rep, ok := first.GetType().(ap.Statusable)
|
|
||||||
assert.True(suite.T(), ok)
|
|
||||||
|
|
||||||
status, err := suite.typeconverter.ASStatusToStatus(rep)
|
|
||||||
assert.NoError(suite.T(), err)
|
|
||||||
|
|
||||||
assert.Len(suite.T(), status.GTSEmojis, 3)
|
|
||||||
// assert.Len(suite.T(), status.GTSTags, 2) TODO: implement this first so that it can pick up tags
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ASToInternalTestSuite) TestParseStatusWithMention() {
|
|
||||||
m := make(map[string]interface{})
|
|
||||||
err := json.Unmarshal([]byte(statusWithMentionsActivityJson), &m)
|
|
||||||
assert.NoError(suite.T(), err)
|
|
||||||
|
|
||||||
t, err := streams.ToType(context.Background(), m)
|
|
||||||
assert.NoError(suite.T(), err)
|
|
||||||
|
|
||||||
create, ok := t.(vocab.ActivityStreamsCreate)
|
|
||||||
assert.True(suite.T(), ok)
|
|
||||||
|
|
||||||
obj := create.GetActivityStreamsObject()
|
|
||||||
assert.NotNil(suite.T(), obj)
|
|
||||||
|
|
||||||
first := obj.Begin()
|
|
||||||
assert.NotNil(suite.T(), first)
|
|
||||||
|
|
||||||
rep, ok := first.GetType().(ap.Statusable)
|
|
||||||
assert.True(suite.T(), ok)
|
|
||||||
|
|
||||||
status, err := suite.typeconverter.ASStatusToStatus(rep)
|
|
||||||
assert.NoError(suite.T(), err)
|
|
||||||
|
|
||||||
fmt.Printf("%+v", status)
|
|
||||||
|
|
||||||
assert.Len(suite.T(), status.GTSMentions, 1)
|
|
||||||
fmt.Println(status.GTSMentions[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (suite *ASToInternalTestSuite) TearDownTest() {
|
func (suite *ASToInternalTestSuite) TearDownTest() {
|
||||||
testrig.StandardDBTeardown(suite.db)
|
testrig.StandardDBTeardown(suite.db)
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,8 @@ func (c *converter) MastoVisToVis(m model.Visibility) gtsmodel.Visibility {
|
||||||
return gtsmodel.VisibilityUnlocked
|
return gtsmodel.VisibilityUnlocked
|
||||||
case model.VisibilityPrivate:
|
case model.VisibilityPrivate:
|
||||||
return gtsmodel.VisibilityFollowersOnly
|
return gtsmodel.VisibilityFollowersOnly
|
||||||
|
case model.VisibilityMutualsOnly:
|
||||||
|
return gtsmodel.VisibilityMutualsOnly
|
||||||
case model.VisibilityDirect:
|
case model.VisibilityDirect:
|
||||||
return gtsmodel.VisibilityDirect
|
return gtsmodel.VisibilityDirect
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||||
|
@ -52,9 +53,17 @@
|
||||||
&oauth.Client{},
|
&oauth.Client{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTestDB returns a new initialized, empty database for testing
|
// NewTestDB returns a new initialized, empty database for testing.
|
||||||
|
//
|
||||||
|
// If the environment variable GTS_DB_ADDRESS is set, it will take that
|
||||||
|
// value as the database address instead.
|
||||||
func NewTestDB() db.DB {
|
func NewTestDB() db.DB {
|
||||||
config := NewTestConfig()
|
config := NewTestConfig()
|
||||||
|
alternateAddress := os.Getenv("GTS_DB_ADDRESS")
|
||||||
|
if alternateAddress != "" {
|
||||||
|
config.DBConfig.Address = alternateAddress
|
||||||
|
}
|
||||||
|
|
||||||
l := logrus.New()
|
l := logrus.New()
|
||||||
l.SetLevel(logrus.TraceLevel)
|
l.SetLevel(logrus.TraceLevel)
|
||||||
testDB, err := pg.NewPostgresService(context.Background(), config, l)
|
testDB, err := pg.NewPostgresService(context.Background(), config, l)
|
||||||
|
|
22
vendor/github.com/aymerick/douceur/LICENSE
generated
vendored
Normal file
22
vendor/github.com/aymerick/douceur/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 Aymerick JEHANNE
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
60
vendor/github.com/aymerick/douceur/css/declaration.go
generated
vendored
Normal file
60
vendor/github.com/aymerick/douceur/css/declaration.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
package css
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Declaration represents a parsed style property
|
||||||
|
type Declaration struct {
|
||||||
|
Property string
|
||||||
|
Value string
|
||||||
|
Important bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDeclaration instanciates a new Declaration
|
||||||
|
func NewDeclaration() *Declaration {
|
||||||
|
return &Declaration{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns string representation of the Declaration
|
||||||
|
func (decl *Declaration) String() string {
|
||||||
|
return decl.StringWithImportant(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringWithImportant returns string representation with optional !important part
|
||||||
|
func (decl *Declaration) StringWithImportant(option bool) string {
|
||||||
|
result := fmt.Sprintf("%s: %s", decl.Property, decl.Value)
|
||||||
|
|
||||||
|
if option && decl.Important {
|
||||||
|
result += " !important"
|
||||||
|
}
|
||||||
|
|
||||||
|
result += ";"
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if both Declarations are equals
|
||||||
|
func (decl *Declaration) Equal(other *Declaration) bool {
|
||||||
|
return (decl.Property == other.Property) && (decl.Value == other.Value) && (decl.Important == other.Important)
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// DeclarationsByProperty
|
||||||
|
//
|
||||||
|
|
||||||
|
// DeclarationsByProperty represents sortable style declarations
|
||||||
|
type DeclarationsByProperty []*Declaration
|
||||||
|
|
||||||
|
// Implements sort.Interface
|
||||||
|
func (declarations DeclarationsByProperty) Len() int {
|
||||||
|
return len(declarations)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements sort.Interface
|
||||||
|
func (declarations DeclarationsByProperty) Swap(i, j int) {
|
||||||
|
declarations[i], declarations[j] = declarations[j], declarations[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements sort.Interface
|
||||||
|
func (declarations DeclarationsByProperty) Less(i, j int) bool {
|
||||||
|
return declarations[i].Property < declarations[j].Property
|
||||||
|
}
|
230
vendor/github.com/aymerick/douceur/css/rule.go
generated
vendored
Normal file
230
vendor/github.com/aymerick/douceur/css/rule.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
package css
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
indentSpace = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// RuleKind represents a Rule kind
|
||||||
|
type RuleKind int
|
||||||
|
|
||||||
|
// Rule kinds
|
||||||
|
const (
|
||||||
|
QualifiedRule RuleKind = iota
|
||||||
|
AtRule
|
||||||
|
)
|
||||||
|
|
||||||
|
// At Rules than have Rules inside their block instead of Declarations
|
||||||
|
var atRulesWithRulesBlock = []string{
|
||||||
|
"@document", "@font-feature-values", "@keyframes", "@media", "@supports",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rule represents a parsed CSS rule
|
||||||
|
type Rule struct {
|
||||||
|
Kind RuleKind
|
||||||
|
|
||||||
|
// At Rule name (eg: "@media")
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Raw prelude
|
||||||
|
Prelude string
|
||||||
|
|
||||||
|
// Qualified Rule selectors parsed from prelude
|
||||||
|
Selectors []string
|
||||||
|
|
||||||
|
// Style properties
|
||||||
|
Declarations []*Declaration
|
||||||
|
|
||||||
|
// At Rule embedded rules
|
||||||
|
Rules []*Rule
|
||||||
|
|
||||||
|
// Current rule embedding level
|
||||||
|
EmbedLevel int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRule instanciates a new Rule
|
||||||
|
func NewRule(kind RuleKind) *Rule {
|
||||||
|
return &Rule{
|
||||||
|
Kind: kind,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns string representation of rule kind
|
||||||
|
func (kind RuleKind) String() string {
|
||||||
|
switch kind {
|
||||||
|
case QualifiedRule:
|
||||||
|
return "Qualified Rule"
|
||||||
|
case AtRule:
|
||||||
|
return "At Rule"
|
||||||
|
default:
|
||||||
|
return "WAT"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EmbedsRules returns true if this rule embeds another rules
|
||||||
|
func (rule *Rule) EmbedsRules() bool {
|
||||||
|
if rule.Kind == AtRule {
|
||||||
|
for _, atRuleName := range atRulesWithRulesBlock {
|
||||||
|
if rule.Name == atRuleName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true if both rules are equals
|
||||||
|
func (rule *Rule) Equal(other *Rule) bool {
|
||||||
|
if (rule.Kind != other.Kind) ||
|
||||||
|
(rule.Prelude != other.Prelude) ||
|
||||||
|
(rule.Name != other.Name) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (len(rule.Selectors) != len(other.Selectors)) ||
|
||||||
|
(len(rule.Declarations) != len(other.Declarations)) ||
|
||||||
|
(len(rule.Rules) != len(other.Rules)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, sel := range rule.Selectors {
|
||||||
|
if sel != other.Selectors[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, decl := range rule.Declarations {
|
||||||
|
if !decl.Equal(other.Declarations[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, rule := range rule.Rules {
|
||||||
|
if !rule.Equal(other.Rules[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diff returns a string representation of rules differences
|
||||||
|
func (rule *Rule) Diff(other *Rule) []string {
|
||||||
|
result := []string{}
|
||||||
|
|
||||||
|
if rule.Kind != other.Kind {
|
||||||
|
result = append(result, fmt.Sprintf("Kind: %s | %s", rule.Kind.String(), other.Kind.String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Prelude != other.Prelude {
|
||||||
|
result = append(result, fmt.Sprintf("Prelude: \"%s\" | \"%s\"", rule.Prelude, other.Prelude))
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Name != other.Name {
|
||||||
|
result = append(result, fmt.Sprintf("Name: \"%s\" | \"%s\"", rule.Name, other.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rule.Selectors) != len(other.Selectors) {
|
||||||
|
result = append(result, fmt.Sprintf("Selectors: %v | %v", strings.Join(rule.Selectors, ", "), strings.Join(other.Selectors, ", ")))
|
||||||
|
} else {
|
||||||
|
for i, sel := range rule.Selectors {
|
||||||
|
if sel != other.Selectors[i] {
|
||||||
|
result = append(result, fmt.Sprintf("Selector: \"%s\" | \"%s\"", sel, other.Selectors[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rule.Declarations) != len(other.Declarations) {
|
||||||
|
result = append(result, fmt.Sprintf("Declarations Nb: %d | %d", len(rule.Declarations), len(other.Declarations)))
|
||||||
|
} else {
|
||||||
|
for i, decl := range rule.Declarations {
|
||||||
|
if !decl.Equal(other.Declarations[i]) {
|
||||||
|
result = append(result, fmt.Sprintf("Declaration: \"%s\" | \"%s\"", decl.String(), other.Declarations[i].String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rule.Rules) != len(other.Rules) {
|
||||||
|
result = append(result, fmt.Sprintf("Rules Nb: %d | %d", len(rule.Rules), len(other.Rules)))
|
||||||
|
} else {
|
||||||
|
|
||||||
|
for i, rule := range rule.Rules {
|
||||||
|
if !rule.Equal(other.Rules[i]) {
|
||||||
|
result = append(result, fmt.Sprintf("Rule: \"%s\" | \"%s\"", rule.String(), other.Rules[i].String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the string representation of a rule
|
||||||
|
func (rule *Rule) String() string {
|
||||||
|
result := ""
|
||||||
|
|
||||||
|
if rule.Kind == QualifiedRule {
|
||||||
|
for i, sel := range rule.Selectors {
|
||||||
|
if i != 0 {
|
||||||
|
result += ", "
|
||||||
|
}
|
||||||
|
result += sel
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// AtRule
|
||||||
|
result += fmt.Sprintf("%s", rule.Name)
|
||||||
|
|
||||||
|
if rule.Prelude != "" {
|
||||||
|
if result != "" {
|
||||||
|
result += " "
|
||||||
|
}
|
||||||
|
result += fmt.Sprintf("%s", rule.Prelude)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (len(rule.Declarations) == 0) && (len(rule.Rules) == 0) {
|
||||||
|
result += ";"
|
||||||
|
} else {
|
||||||
|
result += " {\n"
|
||||||
|
|
||||||
|
if rule.EmbedsRules() {
|
||||||
|
for _, subRule := range rule.Rules {
|
||||||
|
result += fmt.Sprintf("%s%s\n", rule.indent(), subRule.String())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, decl := range rule.Declarations {
|
||||||
|
result += fmt.Sprintf("%s%s\n", rule.indent(), decl.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result += fmt.Sprintf("%s}", rule.indentEndBlock())
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns identation spaces for declarations and rules
|
||||||
|
func (rule *Rule) indent() string {
|
||||||
|
result := ""
|
||||||
|
|
||||||
|
for i := 0; i < ((rule.EmbedLevel + 1) * indentSpace); i++ {
|
||||||
|
result += " "
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns identation spaces for end of block character
|
||||||
|
func (rule *Rule) indentEndBlock() string {
|
||||||
|
result := ""
|
||||||
|
|
||||||
|
for i := 0; i < (rule.EmbedLevel * indentSpace); i++ {
|
||||||
|
result += " "
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
25
vendor/github.com/aymerick/douceur/css/stylesheet.go
generated
vendored
Normal file
25
vendor/github.com/aymerick/douceur/css/stylesheet.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
package css
|
||||||
|
|
||||||
|
// Stylesheet represents a parsed stylesheet
|
||||||
|
type Stylesheet struct {
|
||||||
|
Rules []*Rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStylesheet instanciate a new Stylesheet
|
||||||
|
func NewStylesheet() *Stylesheet {
|
||||||
|
return &Stylesheet{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns string representation of the Stylesheet
|
||||||
|
func (sheet *Stylesheet) String() string {
|
||||||
|
result := ""
|
||||||
|
|
||||||
|
for _, rule := range sheet.Rules {
|
||||||
|
if result != "" {
|
||||||
|
result += "\n"
|
||||||
|
}
|
||||||
|
result += rule.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
409
vendor/github.com/aymerick/douceur/parser/parser.go
generated
vendored
Normal file
409
vendor/github.com/aymerick/douceur/parser/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,409 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gorilla/css/scanner"
|
||||||
|
|
||||||
|
"github.com/aymerick/douceur/css"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
importantSuffixRegexp = `(?i)\s*!important\s*$`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
importantRegexp *regexp.Regexp
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parser represents a CSS parser
|
||||||
|
type Parser struct {
|
||||||
|
scan *scanner.Scanner // Tokenizer
|
||||||
|
|
||||||
|
// Tokens parsed but not consumed yet
|
||||||
|
tokens []*scanner.Token
|
||||||
|
|
||||||
|
// Rule embedding level
|
||||||
|
embedLevel int
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
importantRegexp = regexp.MustCompile(importantSuffixRegexp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewParser instanciates a new parser
|
||||||
|
func NewParser(txt string) *Parser {
|
||||||
|
return &Parser{
|
||||||
|
scan: scanner.New(txt),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses a whole stylesheet
|
||||||
|
func Parse(text string) (*css.Stylesheet, error) {
|
||||||
|
result, err := NewParser(text).ParseStylesheet()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDeclarations parses CSS declarations
|
||||||
|
func ParseDeclarations(text string) ([]*css.Declaration, error) {
|
||||||
|
result, err := NewParser(text).ParseDeclarations()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseStylesheet parses a stylesheet
|
||||||
|
func (parser *Parser) ParseStylesheet() (*css.Stylesheet, error) {
|
||||||
|
result := css.NewStylesheet()
|
||||||
|
|
||||||
|
// Parse BOM
|
||||||
|
if _, err := parser.parseBOM(); err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse list of rules
|
||||||
|
rules, err := parser.ParseRules()
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Rules = rules
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseRules parses a list of rules
|
||||||
|
func (parser *Parser) ParseRules() ([]*css.Rule, error) {
|
||||||
|
result := []*css.Rule{}
|
||||||
|
|
||||||
|
inBlock := false
|
||||||
|
if parser.tokenChar("{") {
|
||||||
|
// parsing a block of rules
|
||||||
|
inBlock = true
|
||||||
|
parser.embedLevel++
|
||||||
|
|
||||||
|
parser.shiftToken()
|
||||||
|
}
|
||||||
|
|
||||||
|
for parser.tokenParsable() {
|
||||||
|
if parser.tokenIgnorable() {
|
||||||
|
parser.shiftToken()
|
||||||
|
} else if parser.tokenChar("}") {
|
||||||
|
if !inBlock {
|
||||||
|
errMsg := fmt.Sprintf("Unexpected } character: %s", parser.nextToken().String())
|
||||||
|
return result, errors.New(errMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
parser.shiftToken()
|
||||||
|
parser.embedLevel--
|
||||||
|
|
||||||
|
// finished
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
rule, err := parser.ParseRule()
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rule.EmbedLevel = parser.embedLevel
|
||||||
|
result = append(result, rule)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, parser.err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseRule parses a rule
|
||||||
|
func (parser *Parser) ParseRule() (*css.Rule, error) {
|
||||||
|
if parser.tokenAtKeyword() {
|
||||||
|
return parser.parseAtRule()
|
||||||
|
}
|
||||||
|
|
||||||
|
return parser.parseQualifiedRule()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDeclarations parses a list of declarations
|
||||||
|
func (parser *Parser) ParseDeclarations() ([]*css.Declaration, error) {
|
||||||
|
result := []*css.Declaration{}
|
||||||
|
|
||||||
|
if parser.tokenChar("{") {
|
||||||
|
parser.shiftToken()
|
||||||
|
}
|
||||||
|
|
||||||
|
for parser.tokenParsable() {
|
||||||
|
if parser.tokenIgnorable() {
|
||||||
|
parser.shiftToken()
|
||||||
|
} else if parser.tokenChar("}") {
|
||||||
|
// end of block
|
||||||
|
parser.shiftToken()
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
declaration, err := parser.ParseDeclaration()
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, declaration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, parser.err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDeclaration parses a declaration
|
||||||
|
func (parser *Parser) ParseDeclaration() (*css.Declaration, error) {
|
||||||
|
result := css.NewDeclaration()
|
||||||
|
curValue := ""
|
||||||
|
|
||||||
|
for parser.tokenParsable() {
|
||||||
|
if parser.tokenChar(":") {
|
||||||
|
result.Property = strings.TrimSpace(curValue)
|
||||||
|
curValue = ""
|
||||||
|
|
||||||
|
parser.shiftToken()
|
||||||
|
} else if parser.tokenChar(";") || parser.tokenChar("}") {
|
||||||
|
if result.Property == "" {
|
||||||
|
errMsg := fmt.Sprintf("Unexpected ; character: %s", parser.nextToken().String())
|
||||||
|
return result, errors.New(errMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if importantRegexp.MatchString(curValue) {
|
||||||
|
result.Important = true
|
||||||
|
curValue = importantRegexp.ReplaceAllString(curValue, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Value = strings.TrimSpace(curValue)
|
||||||
|
|
||||||
|
if parser.tokenChar(";") {
|
||||||
|
parser.shiftToken()
|
||||||
|
}
|
||||||
|
|
||||||
|
// finished
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
token := parser.shiftToken()
|
||||||
|
curValue += token.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("[parsed] Declaration: %s", result.String())
|
||||||
|
|
||||||
|
return result, parser.err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse an At Rule
|
||||||
|
func (parser *Parser) parseAtRule() (*css.Rule, error) {
|
||||||
|
// parse rule name (eg: "@import")
|
||||||
|
token := parser.shiftToken()
|
||||||
|
|
||||||
|
result := css.NewRule(css.AtRule)
|
||||||
|
result.Name = token.Value
|
||||||
|
|
||||||
|
for parser.tokenParsable() {
|
||||||
|
if parser.tokenChar(";") {
|
||||||
|
parser.shiftToken()
|
||||||
|
|
||||||
|
// finished
|
||||||
|
break
|
||||||
|
} else if parser.tokenChar("{") {
|
||||||
|
if result.EmbedsRules() {
|
||||||
|
// parse rules block
|
||||||
|
rules, err := parser.ParseRules()
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Rules = rules
|
||||||
|
} else {
|
||||||
|
// parse declarations block
|
||||||
|
declarations, err := parser.ParseDeclarations()
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Declarations = declarations
|
||||||
|
}
|
||||||
|
|
||||||
|
// finished
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
// parse prelude
|
||||||
|
prelude, err := parser.parsePrelude()
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Prelude = prelude
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("[parsed] Rule: %s", result.String())
|
||||||
|
|
||||||
|
return result, parser.err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse a Qualified Rule
|
||||||
|
func (parser *Parser) parseQualifiedRule() (*css.Rule, error) {
|
||||||
|
result := css.NewRule(css.QualifiedRule)
|
||||||
|
|
||||||
|
for parser.tokenParsable() {
|
||||||
|
if parser.tokenChar("{") {
|
||||||
|
if result.Prelude == "" {
|
||||||
|
errMsg := fmt.Sprintf("Unexpected { character: %s", parser.nextToken().String())
|
||||||
|
return result, errors.New(errMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse declarations block
|
||||||
|
declarations, err := parser.ParseDeclarations()
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Declarations = declarations
|
||||||
|
|
||||||
|
// finished
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
// parse prelude
|
||||||
|
prelude, err := parser.parsePrelude()
|
||||||
|
if err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Prelude = prelude
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Selectors = strings.Split(result.Prelude, ",")
|
||||||
|
for i, sel := range result.Selectors {
|
||||||
|
result.Selectors[i] = strings.TrimSpace(sel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("[parsed] Rule: %s", result.String())
|
||||||
|
|
||||||
|
return result, parser.err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse Rule prelude
|
||||||
|
func (parser *Parser) parsePrelude() (string, error) {
|
||||||
|
result := ""
|
||||||
|
|
||||||
|
for parser.tokenParsable() && !parser.tokenEndOfPrelude() {
|
||||||
|
token := parser.shiftToken()
|
||||||
|
result += token.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
result = strings.TrimSpace(result)
|
||||||
|
|
||||||
|
// log.Printf("[parsed] prelude: %s", result)
|
||||||
|
|
||||||
|
return result, parser.err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse BOM
|
||||||
|
func (parser *Parser) parseBOM() (bool, error) {
|
||||||
|
if parser.nextToken().Type == scanner.TokenBOM {
|
||||||
|
parser.shiftToken()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, parser.err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns next token without removing it from tokens buffer
|
||||||
|
func (parser *Parser) nextToken() *scanner.Token {
|
||||||
|
if len(parser.tokens) == 0 {
|
||||||
|
// fetch next token
|
||||||
|
nextToken := parser.scan.Next()
|
||||||
|
|
||||||
|
// log.Printf("[token] %s => %v", nextToken.Type.String(), nextToken.Value)
|
||||||
|
|
||||||
|
// queue it
|
||||||
|
parser.tokens = append(parser.tokens, nextToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parser.tokens[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns next token and remove it from the tokens buffer
|
||||||
|
func (parser *Parser) shiftToken() *scanner.Token {
|
||||||
|
var result *scanner.Token
|
||||||
|
|
||||||
|
result, parser.tokens = parser.tokens[0], parser.tokens[1:]
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns tokenizer error, or nil if no error
|
||||||
|
func (parser *Parser) err() error {
|
||||||
|
if parser.tokenError() {
|
||||||
|
token := parser.nextToken()
|
||||||
|
return fmt.Errorf("Tokenizer error: %s", token.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is Error
|
||||||
|
func (parser *Parser) tokenError() bool {
|
||||||
|
return parser.nextToken().Type == scanner.TokenError
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is EOF
|
||||||
|
func (parser *Parser) tokenEOF() bool {
|
||||||
|
return parser.nextToken().Type == scanner.TokenEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is a whitespace
|
||||||
|
func (parser *Parser) tokenWS() bool {
|
||||||
|
return parser.nextToken().Type == scanner.TokenS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is a comment
|
||||||
|
func (parser *Parser) tokenComment() bool {
|
||||||
|
return parser.nextToken().Type == scanner.TokenComment
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is a CDO or a CDC
|
||||||
|
func (parser *Parser) tokenCDOorCDC() bool {
|
||||||
|
switch parser.nextToken().Type {
|
||||||
|
case scanner.TokenCDO, scanner.TokenCDC:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is ignorable
|
||||||
|
func (parser *Parser) tokenIgnorable() bool {
|
||||||
|
return parser.tokenWS() || parser.tokenComment() || parser.tokenCDOorCDC()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is parsable
|
||||||
|
func (parser *Parser) tokenParsable() bool {
|
||||||
|
return !parser.tokenEOF() && !parser.tokenError()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is an At Rule keyword
|
||||||
|
func (parser *Parser) tokenAtKeyword() bool {
|
||||||
|
return parser.nextToken().Type == scanner.TokenAtKeyword
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token is given character
|
||||||
|
func (parser *Parser) tokenChar(value string) bool {
|
||||||
|
token := parser.nextToken()
|
||||||
|
return (token.Type == scanner.TokenChar) && (token.Value == value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if next token marks the end of a prelude
|
||||||
|
func (parser *Parser) tokenEndOfPrelude() bool {
|
||||||
|
return parser.tokenChar(";") || parser.tokenChar("{")
|
||||||
|
}
|
2
vendor/github.com/buckket/go-blurhash/.gitignore
generated
vendored
Normal file
2
vendor/github.com/buckket/go-blurhash/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
.idea
|
||||||
|
coverage.txt
|
14
vendor/github.com/buckket/go-blurhash/.travis.yml
generated
vendored
Normal file
14
vendor/github.com/buckket/go-blurhash/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.13.x
|
||||||
|
- 1.14.x
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get -t -v ./...
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
674
vendor/github.com/buckket/go-blurhash/LICENSE
generated
vendored
Normal file
674
vendor/github.com/buckket/go-blurhash/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,674 @@
|
||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you
|
||||||
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
|
or can get the source code. And you must show them these terms so they
|
||||||
|
know their rights.
|
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
|
that there is no warranty for this free software. For both users' and
|
||||||
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
|
changed, so that their problems will not be attributed erroneously to
|
||||||
|
authors of previous versions.
|
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run
|
||||||
|
modified versions of the software inside them, although the manufacturer
|
||||||
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
|
protecting users' freedom to change the software. The systematic
|
||||||
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the special requirements of the GNU Affero General Public License,
|
||||||
|
section 13, concerning interaction through a network will apply to the
|
||||||
|
combination as such.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short
|
||||||
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author>
|
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, your program's commands
|
||||||
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
96
vendor/github.com/buckket/go-blurhash/README.md
generated
vendored
Normal file
96
vendor/github.com/buckket/go-blurhash/README.md
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
# go-blurhash [![Build Status](https://travis-ci.org/buckket/go-blurhash.svg)](https://travis-ci.org/buckket/go-blurhash) [![Go Report Card](https://goreportcard.com/badge/github.com/buckket/go-blurhash)](https://goreportcard.com/report/github.com/buckket/go-blurhash) [![codecov](https://codecov.io/gh/buckket/go-blurhash/branch/master/graph/badge.svg)](https://codecov.io/gh/buckket/go-blurhash) [![GoDoc](https://godoc.org/github.com/buckket/go-blurhash?status.svg)](https://pkg.go.dev/github.com/buckket/go-blurhash)
|
||||||
|
|
||||||
|
**go-blurhash** is a pure Go implementation of the [BlurHash](https://github.com/woltapp/blurhash) algorithm, which is used by
|
||||||
|
[Mastodon](https://github.com/tootsuite/mastodon) an other Fediverse software to implement a swift way of preloading placeholder images as well
|
||||||
|
as hiding sensitive media. Read more about it [here](https://blog.joinmastodon.org/2019/05/improving-support-for-adult-content-on-mastodon/).
|
||||||
|
|
||||||
|
**tl;dr:** BlurHash is a compact representation of a placeholder for an image.
|
||||||
|
|
||||||
|
This library allows generating the BlurHash of a given image, as well as
|
||||||
|
reconstructing a blurred version with specified dimensions from a given BlurHash.
|
||||||
|
|
||||||
|
This library is based on the following reference implementations:
|
||||||
|
- Encoder: [https://github.com/woltapp/blurhash/blob/master/C](https://github.com/woltapp/blurhash/blob/master/C) (C)
|
||||||
|
- Deocder: [https://github.com/woltapp/blurhash/blob/master/TypeScript](https://github.com/woltapp/blurhash/blob/master/TypeScript) (TypeScript)
|
||||||
|
|
||||||
|
BlurHash is written by [Dag Ågren](https://github.com/DagAgren) / [Wolt](https://github.com/woltapp).
|
||||||
|
|
||||||
|
| | Before | After |
|
||||||
|
| ---------- |:------------------------------:| :-----------------------------:|
|
||||||
|
| **Image** | ![alt text][test] | "LFE.@D9F01_2%L%MIVD*9Goe-;WB" |
|
||||||
|
| **Hash** | "LFE.@D9F01_2%L%MIVD*9Goe-;WB" | ![alt text][test_blur]
|
||||||
|
|
||||||
|
[test]: test.png "Blurhash example input."
|
||||||
|
[test_blur]: test_blur.png "Blurhash example output"
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### From source
|
||||||
|
|
||||||
|
go get -u github.com/buckket/go-blurhash
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
go-blurhash exports three functions:
|
||||||
|
```go
|
||||||
|
func blurhash.Encode(xComponents, yComponents int, rgba image.Image) (string, error)
|
||||||
|
func blurhash.Decode(hash string, width, height, punch int) (image.Image, error)
|
||||||
|
func blurhash.Components(hash string) (xComponents, yComponents int, err error)
|
||||||
|
```
|
||||||
|
|
||||||
|
Here’s a simple demonstration. Check [pkg.go.dev](https://pkg.go.dev/github.com/buckket/go-blurhash) for the full documentation.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/buckket/go-blurhash"
|
||||||
|
"image/png"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Generate the BlurHash for a given image
|
||||||
|
imageFile, _ := os.Open("test.png")
|
||||||
|
loadedImage, err := png.Decode(imageFile)
|
||||||
|
str, _ := blurhash.Encode(4, 3, loadedImage)
|
||||||
|
if err != nil {
|
||||||
|
// Handle errors
|
||||||
|
}
|
||||||
|
fmt.Printf("Hash: %s\n", str)
|
||||||
|
|
||||||
|
// Generate an image for a given BlurHash
|
||||||
|
// Width will be 300px and Height will be 500px
|
||||||
|
// Punch specifies the contrasts and defaults to 1
|
||||||
|
img, err := blurhash.Decode(str, 300, 500, 1)
|
||||||
|
if err != nil {
|
||||||
|
// Handle errors
|
||||||
|
}
|
||||||
|
f, _ := os.Create("test_blur.png")
|
||||||
|
_ = png.Encode(f, img)
|
||||||
|
|
||||||
|
// Get the x and y components used for encoding a given BlurHash
|
||||||
|
x, y, err := blurhash.Components("LFE.@D9F01_2%L%MIVD*9Goe-;WB")
|
||||||
|
if err != nil {
|
||||||
|
// Handle errors
|
||||||
|
}
|
||||||
|
fmt.Printf("xComponents: %d, yComponents: %d", x, y)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
- Presumably a bit slower than the C implementation (TODO: Benchmarks)
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- As mentioned [here](https://github.com/woltapp/blurhash#how-fast-is-encoding-decoding), it’s best to
|
||||||
|
generate very small images (~32x32px) via BlurHash and scale them up to the desired dimensions afterwards for optimal performance.
|
||||||
|
- Since [#2](https://github.com/buckket/go-blurhash/pull/2) we diverted from the reference implementation by memorizing sRGBtoLinear values, thus increasing encoding speed at the cost of higher memory usage.
|
||||||
|
- Starting with v1.1.0 the signature of blurhash.Encode() has changed slightly (see [#3](https://github.com/buckket/go-blurhash/issues/3)).
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
GNU GPLv3+
|
||||||
|
|
58
vendor/github.com/buckket/go-blurhash/base83/base83.go
generated
vendored
Normal file
58
vendor/github.com/buckket/go-blurhash/base83/base83.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
package base83
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const characters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz#$%*+,-.:;=?@[]^_{|}~"
|
||||||
|
|
||||||
|
// An InvalidCharacterError occurs when a characters is found which is not part of the Base83 character set.
|
||||||
|
type InvalidCharacterError rune
|
||||||
|
|
||||||
|
func (e InvalidCharacterError) Error() string {
|
||||||
|
return fmt.Sprintf("base83: invalid string (character %q out of range)", rune(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// An InvalidLengthError occurs when a given value cannot be encoded to a string of given length.
|
||||||
|
type InvalidLengthError int
|
||||||
|
|
||||||
|
func (e InvalidLengthError) Error() string {
|
||||||
|
return fmt.Sprintf("base83: invalid length (%d)", int(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode will encode the given integer value to a Base83 string with given length.
|
||||||
|
// If length is too short to encode the given value InvalidLengthError will be returned.
|
||||||
|
func Encode(value, length int) (string, error) {
|
||||||
|
divisor := int(math.Pow(83, float64(length)))
|
||||||
|
if value/divisor != 0 {
|
||||||
|
return "", InvalidLengthError(length)
|
||||||
|
}
|
||||||
|
divisor /= 83
|
||||||
|
|
||||||
|
var str strings.Builder
|
||||||
|
str.Grow(length)
|
||||||
|
for i := 0; i < length; i++ {
|
||||||
|
if divisor <= 0 {
|
||||||
|
return "", InvalidLengthError(length)
|
||||||
|
}
|
||||||
|
digit := (value / divisor) % 83
|
||||||
|
divisor /= 83
|
||||||
|
str.WriteRune(rune(characters[digit]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return str.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode will decode the given Base83 string to an integer.
|
||||||
|
func Decode(str string) (value int, err error) {
|
||||||
|
for _, r := range str {
|
||||||
|
idx := strings.IndexRune(characters, r)
|
||||||
|
if idx == -1 {
|
||||||
|
return 0, InvalidCharacterError(r)
|
||||||
|
}
|
||||||
|
value = value*83 + idx
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
109
vendor/github.com/buckket/go-blurhash/decode.go
generated
vendored
Normal file
109
vendor/github.com/buckket/go-blurhash/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
package blurhash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/buckket/go-blurhash/base83"
|
||||||
|
"image"
|
||||||
|
"image/color"
|
||||||
|
"math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// An InvalidHashError occurs when the given hash is either too short or the length does not match its size flag.
|
||||||
|
type InvalidHashError string
|
||||||
|
|
||||||
|
func (e InvalidHashError) Error() string {
|
||||||
|
return fmt.Sprintf("blurhash: %s", string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Components decodes and returns the number of x and y components in the given BlurHash.
|
||||||
|
func Components(hash string) (xComponents, yComponents int, err error) {
|
||||||
|
if len(hash) < 6 {
|
||||||
|
return 0, 0, InvalidHashError("hash is invalid (too short)")
|
||||||
|
}
|
||||||
|
|
||||||
|
sizeFlag, err := base83.Decode(string(hash[0]))
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
yComponents = (sizeFlag / 9) + 1
|
||||||
|
xComponents = (sizeFlag % 9) + 1
|
||||||
|
|
||||||
|
if len(hash) != 4+2*xComponents*yComponents {
|
||||||
|
return 0, 0, InvalidHashError("hash is invalid (length mismatch)")
|
||||||
|
}
|
||||||
|
|
||||||
|
return xComponents, yComponents, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode generates an image of the given BlurHash with a size of width and height.
|
||||||
|
// Punch is a multiplier that adjusts the contrast of the resulting image.
|
||||||
|
func Decode(hash string, width, height, punch int) (image.Image, error) {
|
||||||
|
xComp, yComp, err := Components(hash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
quantisedMaximumValue, err := base83.Decode(string(hash[1]))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
maximumValue := (float64(quantisedMaximumValue) + 1) / 166
|
||||||
|
|
||||||
|
if punch == 0 {
|
||||||
|
punch = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
colors := make([][3]float64, xComp*yComp)
|
||||||
|
|
||||||
|
for i := range colors {
|
||||||
|
if i == 0 {
|
||||||
|
value, err := base83.Decode(hash[2:6])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
colors[i] = decodeDC(value)
|
||||||
|
} else {
|
||||||
|
value, err := base83.Decode(hash[4+i*2 : 6+i*2])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
colors[i] = decodeAC(value, maximumValue*float64(punch))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
img := image.NewNRGBA(image.Rect(0, 0, width, height))
|
||||||
|
|
||||||
|
for y := 0; y < height; y++ {
|
||||||
|
for x := 0; x < width; x++ {
|
||||||
|
var r, g, b float64
|
||||||
|
for j := 0; j < yComp; j++ {
|
||||||
|
for i := 0; i < xComp; i++ {
|
||||||
|
basis := math.Cos(math.Pi*float64(x)*float64(i)/float64(width)) *
|
||||||
|
math.Cos(math.Pi*float64(y)*float64(j)/float64(height))
|
||||||
|
pcolor := colors[i+j*xComp]
|
||||||
|
r += pcolor[0] * basis
|
||||||
|
g += pcolor[1] * basis
|
||||||
|
b += pcolor[2] * basis
|
||||||
|
}
|
||||||
|
}
|
||||||
|
img.SetNRGBA(x, y, color.NRGBA{R: uint8(linearTosRGB(r)), G: uint8(linearTosRGB(g)), B: uint8(linearTosRGB(b)), A: 255})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return img, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeDC(value int) [3]float64 {
|
||||||
|
return [3]float64{sRGBToLinear(value >> 16), sRGBToLinear(value >> 8 & 255), sRGBToLinear(value & 255)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeAC(value int, maximumValue float64) [3]float64 {
|
||||||
|
quantR := math.Floor(float64(value) / (19 * 19))
|
||||||
|
quantG := math.Mod(math.Floor(float64(value)/19), 19)
|
||||||
|
quantB := math.Mod(float64(value), 19)
|
||||||
|
sp := func(quant float64) float64 {
|
||||||
|
return signPow((quant-9)/9, 2.0) * maximumValue
|
||||||
|
}
|
||||||
|
return [3]float64{sp(quantR), sp(quantG), sp(quantB)}
|
||||||
|
}
|
164
vendor/github.com/buckket/go-blurhash/encode.go
generated
vendored
Normal file
164
vendor/github.com/buckket/go-blurhash/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
||||||
|
package blurhash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/buckket/go-blurhash/base83"
|
||||||
|
"image"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
initLinearTable(channelToLinear[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
var channelToLinear [256]float64
|
||||||
|
|
||||||
|
func initLinearTable(table []float64) {
|
||||||
|
for i := range table {
|
||||||
|
channelToLinear[i] = sRGBToLinear(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An InvalidParameterError occurs when an invalid argument is passed to either the Decode or Encode function.
|
||||||
|
type InvalidParameterError struct {
|
||||||
|
Value int
|
||||||
|
Parameter string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e InvalidParameterError) Error() string {
|
||||||
|
return fmt.Sprintf("blurhash: %sComponents (%d) must be element of [1-9]", e.Parameter, e.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An EncodingError represents an error that occurred during the encoding of the given value.
|
||||||
|
// This most likely means that your input image is invalid and can not be processed.
|
||||||
|
type EncodingError string
|
||||||
|
|
||||||
|
func (e EncodingError) Error() string {
|
||||||
|
return fmt.Sprintf("blurhash: %s", string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode calculates the Blurhash for an image using the given x and y component counts.
|
||||||
|
// The x and y components have to be between 1 and 9 respectively.
|
||||||
|
// The image must be of image.Image type.
|
||||||
|
func Encode(xComponents int, yComponents int, rgba image.Image) (string, error) {
|
||||||
|
if xComponents < 1 || xComponents > 9 {
|
||||||
|
return "", InvalidParameterError{xComponents, "x"}
|
||||||
|
}
|
||||||
|
if yComponents < 1 || yComponents > 9 {
|
||||||
|
return "", InvalidParameterError{yComponents, "y"}
|
||||||
|
}
|
||||||
|
|
||||||
|
var blurhash strings.Builder
|
||||||
|
blurhash.Grow(4 + 2*xComponents*yComponents)
|
||||||
|
|
||||||
|
// Size Flag
|
||||||
|
str, err := base83.Encode((xComponents-1)+(yComponents-1)*9, 1)
|
||||||
|
if err != nil {
|
||||||
|
return "", EncodingError("could not encode size flag")
|
||||||
|
}
|
||||||
|
blurhash.WriteString(str)
|
||||||
|
|
||||||
|
factors := make([]float64, yComponents*xComponents*3)
|
||||||
|
multiplyBasisFunction(rgba, factors, xComponents, yComponents)
|
||||||
|
|
||||||
|
var maximumValue float64
|
||||||
|
var quantisedMaximumValue int
|
||||||
|
var acCount = xComponents*yComponents - 1
|
||||||
|
if acCount > 0 {
|
||||||
|
var actualMaximumValue float64
|
||||||
|
for i := 0; i < acCount*3; i++ {
|
||||||
|
actualMaximumValue = math.Max(math.Abs(factors[i+3]), actualMaximumValue)
|
||||||
|
}
|
||||||
|
quantisedMaximumValue = int(math.Max(0, math.Min(82, math.Floor(actualMaximumValue*166-0.5))))
|
||||||
|
maximumValue = (float64(quantisedMaximumValue) + 1) / 166
|
||||||
|
} else {
|
||||||
|
maximumValue = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Quantised max AC component
|
||||||
|
str, err = base83.Encode(quantisedMaximumValue, 1)
|
||||||
|
if err != nil {
|
||||||
|
return "", EncodingError("could not encode quantised max AC component")
|
||||||
|
}
|
||||||
|
blurhash.WriteString(str)
|
||||||
|
|
||||||
|
// DC value
|
||||||
|
str, err = base83.Encode(encodeDC(factors[0], factors[1], factors[2]), 4)
|
||||||
|
if err != nil {
|
||||||
|
return "", EncodingError("could not encode DC value")
|
||||||
|
}
|
||||||
|
blurhash.WriteString(str)
|
||||||
|
|
||||||
|
// AC values
|
||||||
|
for i := 0; i < acCount; i++ {
|
||||||
|
str, err = base83.Encode(encodeAC(factors[3+(i*3+0)], factors[3+(i*3+1)], factors[3+(i*3+2)], maximumValue), 2)
|
||||||
|
if err != nil {
|
||||||
|
return "", EncodingError("could not encode AC value")
|
||||||
|
}
|
||||||
|
blurhash.WriteString(str)
|
||||||
|
}
|
||||||
|
|
||||||
|
if blurhash.Len() != 4+2*xComponents*yComponents {
|
||||||
|
return "", EncodingError("hash does not match expected size")
|
||||||
|
}
|
||||||
|
|
||||||
|
return blurhash.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func multiplyBasisFunction(rgba image.Image, factors []float64, xComponents int, yComponents int) {
|
||||||
|
height := rgba.Bounds().Max.Y
|
||||||
|
width := rgba.Bounds().Max.X
|
||||||
|
|
||||||
|
xvalues := make([][]float64, xComponents)
|
||||||
|
for xComponent := 0; xComponent < xComponents; xComponent++ {
|
||||||
|
xvalues[xComponent] = make([]float64, width)
|
||||||
|
for x := 0; x < width; x++ {
|
||||||
|
xvalues[xComponent][x] = math.Cos(math.Pi * float64(xComponent) * float64(x) / float64(width))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
yvalues := make([][]float64, yComponents)
|
||||||
|
for yComponent := 0; yComponent < yComponents; yComponent++ {
|
||||||
|
yvalues[yComponent] = make([]float64, height)
|
||||||
|
for y := 0; y < height; y++ {
|
||||||
|
yvalues[yComponent][y] = math.Cos(math.Pi * float64(yComponent) * float64(y) / float64(height))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for y := 0; y < height; y++ {
|
||||||
|
for x := 0; x < width; x++ {
|
||||||
|
rt, gt, bt, _ := rgba.At(x, y).RGBA()
|
||||||
|
lr := channelToLinear[rt>>8]
|
||||||
|
lg := channelToLinear[gt>>8]
|
||||||
|
lb := channelToLinear[bt>>8]
|
||||||
|
|
||||||
|
for yc := 0; yc < yComponents; yc++ {
|
||||||
|
for xc := 0; xc < xComponents; xc++ {
|
||||||
|
|
||||||
|
scale := 1 / float64(width*height)
|
||||||
|
|
||||||
|
if xc != 0 || yc != 0 {
|
||||||
|
scale = 2 / float64(width*height)
|
||||||
|
}
|
||||||
|
|
||||||
|
basis := xvalues[xc][x] * yvalues[yc][y]
|
||||||
|
factors[0+xc*3+yc*3*xComponents] += lr * basis * scale
|
||||||
|
factors[1+xc*3+yc*3*xComponents] += lg * basis * scale
|
||||||
|
factors[2+xc*3+yc*3*xComponents] += lb * basis * scale
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeDC(r, g, b float64) int {
|
||||||
|
return (linearTosRGB(r) << 16) + (linearTosRGB(g) << 8) + linearTosRGB(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeAC(r, g, b, maximumValue float64) int {
|
||||||
|
quant := func(f float64) int {
|
||||||
|
return int(math.Max(0, math.Min(18, math.Floor(signPow(f/maximumValue, 0.5)*9+9.5))))
|
||||||
|
}
|
||||||
|
return quant(r)*19*19 + quant(g)*19 + quant(b)
|
||||||
|
}
|
3
vendor/github.com/buckket/go-blurhash/go.mod
generated
vendored
Normal file
3
vendor/github.com/buckket/go-blurhash/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/buckket/go-blurhash
|
||||||
|
|
||||||
|
go 1.14
|
BIN
vendor/github.com/buckket/go-blurhash/test.png
generated
vendored
Normal file
BIN
vendor/github.com/buckket/go-blurhash/test.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 61 KiB |
BIN
vendor/github.com/buckket/go-blurhash/test_blur.png
generated
vendored
Normal file
BIN
vendor/github.com/buckket/go-blurhash/test_blur.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 18 KiB |
23
vendor/github.com/buckket/go-blurhash/utils.go
generated
vendored
Normal file
23
vendor/github.com/buckket/go-blurhash/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
package blurhash
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
func linearTosRGB(value float64) int {
|
||||||
|
v := math.Max(0, math.Min(1, value))
|
||||||
|
if v <= 0.0031308 {
|
||||||
|
return int(v*12.92*255 + 0.5)
|
||||||
|
}
|
||||||
|
return int((1.055*math.Pow(v, 1/2.4)-0.055)*255 + 0.5)
|
||||||
|
}
|
||||||
|
|
||||||
|
func sRGBToLinear(value int) float64 {
|
||||||
|
v := float64(value) / 255
|
||||||
|
if v <= 0.04045 {
|
||||||
|
return v / 12.92
|
||||||
|
}
|
||||||
|
return math.Pow((v+0.055)/1.055, 2.4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func signPow(value, exp float64) float64 {
|
||||||
|
return math.Copysign(math.Pow(math.Abs(value), exp), value)
|
||||||
|
}
|
202
vendor/github.com/coreos/go-oidc/v3/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/go-oidc/v3/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
5
vendor/github.com/coreos/go-oidc/v3/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/go-oidc/v3/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
CoreOS Project
|
||||||
|
Copyright 2014 CoreOS, Inc
|
||||||
|
|
||||||
|
This product includes software developed at CoreOS, Inc.
|
||||||
|
(http://www.coreos.com/).
|
16
vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
generated
vendored
Normal file
16
vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package oidc
|
||||||
|
|
||||||
|
// JOSE asymmetric signing algorithm values as defined by RFC 7518
|
||||||
|
//
|
||||||
|
// see: https://tools.ietf.org/html/rfc7518#section-3.1
|
||||||
|
const (
|
||||||
|
RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
|
||||||
|
RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
|
||||||
|
RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
|
||||||
|
ES256 = "ES256" // ECDSA using P-256 and SHA-256
|
||||||
|
ES384 = "ES384" // ECDSA using P-384 and SHA-384
|
||||||
|
ES512 = "ES512" // ECDSA using P-521 and SHA-512
|
||||||
|
PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
|
||||||
|
PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
|
||||||
|
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
|
||||||
|
)
|
208
vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
generated
vendored
Normal file
208
vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
generated
vendored
Normal file
|
@ -0,0 +1,208 @@
|
||||||
|
package oidc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
jose "gopkg.in/square/go-jose.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
|
||||||
|
// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
|
||||||
|
// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
|
||||||
|
// exposed for providers that don't support discovery or to prevent round trips to the
|
||||||
|
// discovery URL.
|
||||||
|
//
|
||||||
|
// The returned KeySet is a long lived verifier that caches keys based on cache-control
|
||||||
|
// headers. Reuse a common remote key set instead of creating new ones as needed.
|
||||||
|
func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet {
|
||||||
|
return newRemoteKeySet(ctx, jwksURL, time.Now)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet {
|
||||||
|
if now == nil {
|
||||||
|
now = time.Now
|
||||||
|
}
|
||||||
|
return &RemoteKeySet{jwksURL: jwksURL, ctx: cloneContext(ctx), now: now}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
|
||||||
|
// a jwks_uri endpoint.
|
||||||
|
type RemoteKeySet struct {
|
||||||
|
jwksURL string
|
||||||
|
ctx context.Context
|
||||||
|
now func() time.Time
|
||||||
|
|
||||||
|
// guard all other fields
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
// inflight suppresses parallel execution of updateKeys and allows
|
||||||
|
// multiple goroutines to wait for its result.
|
||||||
|
inflight *inflight
|
||||||
|
|
||||||
|
// A set of cached keys.
|
||||||
|
cachedKeys []jose.JSONWebKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// inflight is used to wait on some in-flight request from multiple goroutines.
|
||||||
|
type inflight struct {
|
||||||
|
doneCh chan struct{}
|
||||||
|
|
||||||
|
keys []jose.JSONWebKey
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInflight() *inflight {
|
||||||
|
return &inflight{doneCh: make(chan struct{})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait returns a channel that multiple goroutines can receive on. Once it returns
|
||||||
|
// a value, the inflight request is done and result() can be inspected.
|
||||||
|
func (i *inflight) wait() <-chan struct{} {
|
||||||
|
return i.doneCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// done can only be called by a single goroutine. It records the result of the
|
||||||
|
// inflight request and signals other goroutines that the result is safe to
|
||||||
|
// inspect.
|
||||||
|
func (i *inflight) done(keys []jose.JSONWebKey, err error) {
|
||||||
|
i.keys = keys
|
||||||
|
i.err = err
|
||||||
|
close(i.doneCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
// result cannot be called until the wait() channel has returned a value.
|
||||||
|
func (i *inflight) result() ([]jose.JSONWebKey, error) {
|
||||||
|
return i.keys, i.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignature validates a payload against a signature from the jwks_uri.
|
||||||
|
//
|
||||||
|
// Users MUST NOT call this method directly and should use an IDTokenVerifier
|
||||||
|
// instead. This method skips critical validations such as 'alg' values and is
|
||||||
|
// only exported to implement the KeySet interface.
|
||||||
|
func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
|
||||||
|
jws, err := jose.ParseSigned(jwt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||||
|
}
|
||||||
|
return r.verify(ctx, jws)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
|
||||||
|
// We don't support JWTs signed with multiple signatures.
|
||||||
|
keyID := ""
|
||||||
|
for _, sig := range jws.Signatures {
|
||||||
|
keyID = sig.Header.KeyID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := r.keysFromCache()
|
||||||
|
for _, key := range keys {
|
||||||
|
if keyID == "" || key.KeyID == keyID {
|
||||||
|
if payload, err := jws.Verify(&key); err == nil {
|
||||||
|
return payload, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the kid doesn't match, check for new keys from the remote. This is the
|
||||||
|
// strategy recommended by the spec.
|
||||||
|
//
|
||||||
|
// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
|
||||||
|
keys, err := r.keysFromRemote(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("fetching keys %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
if keyID == "" || key.KeyID == keyID {
|
||||||
|
if payload, err := jws.Verify(&key); err == nil {
|
||||||
|
return payload, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errors.New("failed to verify id token signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
return r.cachedKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
// keysFromRemote syncs the key set from the remote set, records the values in the
|
||||||
|
// cache, and returns the key set.
|
||||||
|
func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
|
||||||
|
// Need to lock to inspect the inflight request field.
|
||||||
|
r.mu.Lock()
|
||||||
|
// If there's not a current inflight request, create one.
|
||||||
|
if r.inflight == nil {
|
||||||
|
r.inflight = newInflight()
|
||||||
|
|
||||||
|
// This goroutine has exclusive ownership over the current inflight
|
||||||
|
// request. It releases the resource by nil'ing the inflight field
|
||||||
|
// once the goroutine is done.
|
||||||
|
go func() {
|
||||||
|
// Sync keys and finish inflight when that's done.
|
||||||
|
keys, err := r.updateKeys()
|
||||||
|
|
||||||
|
r.inflight.done(keys, err)
|
||||||
|
|
||||||
|
// Lock to update the keys and indicate that there is no longer an
|
||||||
|
// inflight request.
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
r.cachedKeys = keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free inflight so a different request can run.
|
||||||
|
r.inflight = nil
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
inflight := r.inflight
|
||||||
|
r.mu.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case <-inflight.wait():
|
||||||
|
return inflight.result()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) {
|
||||||
|
req, err := http.NewRequest("GET", r.jwksURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: can't create request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := doRequest(r.ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: get keys failed %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
var keySet jose.JSONWebKeySet
|
||||||
|
err = unmarshalResp(resp, body, &keySet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
|
||||||
|
}
|
||||||
|
return keySet.Keys, nil
|
||||||
|
}
|
459
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
Normal file
459
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
Normal file
|
@ -0,0 +1,459 @@
|
||||||
|
// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
|
||||||
|
package oidc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/sha512"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io/ioutil"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
jose "gopkg.in/square/go-jose.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
|
||||||
|
ScopeOpenID = "openid"
|
||||||
|
|
||||||
|
// ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
|
||||||
|
// OAuth2 refresh tokens.
|
||||||
|
//
|
||||||
|
// Support for this scope differs between OpenID Connect providers. For instance
|
||||||
|
// Google rejects it, favoring appending "access_type=offline" as part of the
|
||||||
|
// authorization request instead.
|
||||||
|
//
|
||||||
|
// See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
|
||||||
|
ScopeOfflineAccess = "offline_access"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNoAtHash = errors.New("id token did not have an access token hash")
|
||||||
|
errInvalidAtHash = errors.New("access token hash does not match value in ID token")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ClientContext returns a new Context that carries the provided HTTP client.
|
||||||
|
//
|
||||||
|
// This method sets the same context key used by the golang.org/x/oauth2 package,
|
||||||
|
// so the returned context works for that package too.
|
||||||
|
//
|
||||||
|
// myClient := &http.Client{}
|
||||||
|
// ctx := oidc.ClientContext(parentContext, myClient)
|
||||||
|
//
|
||||||
|
// // This will use the custom client
|
||||||
|
// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
|
||||||
|
//
|
||||||
|
func ClientContext(ctx context.Context, client *http.Client) context.Context {
|
||||||
|
return context.WithValue(ctx, oauth2.HTTPClient, client)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cloneContext copies a context's bag-of-values into a new context that isn't
|
||||||
|
// associated with its cancelation. This is used to initialize remote keys sets
|
||||||
|
// which run in the background and aren't associated with the initial context.
|
||||||
|
func cloneContext(ctx context.Context) context.Context {
|
||||||
|
cp := context.Background()
|
||||||
|
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
||||||
|
cp = ClientContext(cp, c)
|
||||||
|
}
|
||||||
|
return cp
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||||
|
client := http.DefaultClient
|
||||||
|
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
||||||
|
client = c
|
||||||
|
}
|
||||||
|
return client.Do(req.WithContext(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provider represents an OpenID Connect server's configuration.
|
||||||
|
type Provider struct {
|
||||||
|
issuer string
|
||||||
|
authURL string
|
||||||
|
tokenURL string
|
||||||
|
userInfoURL string
|
||||||
|
algorithms []string
|
||||||
|
|
||||||
|
// Raw claims returned by the server.
|
||||||
|
rawClaims []byte
|
||||||
|
|
||||||
|
remoteKeySet KeySet
|
||||||
|
}
|
||||||
|
|
||||||
|
type cachedKeys struct {
|
||||||
|
keys []jose.JSONWebKey
|
||||||
|
expiry time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type providerJSON struct {
|
||||||
|
Issuer string `json:"issuer"`
|
||||||
|
AuthURL string `json:"authorization_endpoint"`
|
||||||
|
TokenURL string `json:"token_endpoint"`
|
||||||
|
JWKSURL string `json:"jwks_uri"`
|
||||||
|
UserInfoURL string `json:"userinfo_endpoint"`
|
||||||
|
Algorithms []string `json:"id_token_signing_alg_values_supported"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// supportedAlgorithms is a list of algorithms explicitly supported by this
|
||||||
|
// package. If a provider supports other algorithms, such as HS256 or none,
|
||||||
|
// those values won't be passed to the IDTokenVerifier.
|
||||||
|
var supportedAlgorithms = map[string]bool{
|
||||||
|
RS256: true,
|
||||||
|
RS384: true,
|
||||||
|
RS512: true,
|
||||||
|
ES256: true,
|
||||||
|
ES384: true,
|
||||||
|
ES512: true,
|
||||||
|
PS256: true,
|
||||||
|
PS384: true,
|
||||||
|
PS512: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
|
||||||
|
//
|
||||||
|
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
|
||||||
|
// or "https://login.salesforce.com".
|
||||||
|
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
|
||||||
|
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
|
||||||
|
req, err := http.NewRequest("GET", wellKnown, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := doRequest(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
var p providerJSON
|
||||||
|
err = unmarshalResp(resp, body, &p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Issuer != issuer {
|
||||||
|
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
|
||||||
|
}
|
||||||
|
var algs []string
|
||||||
|
for _, a := range p.Algorithms {
|
||||||
|
if supportedAlgorithms[a] {
|
||||||
|
algs = append(algs, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &Provider{
|
||||||
|
issuer: p.Issuer,
|
||||||
|
authURL: p.AuthURL,
|
||||||
|
tokenURL: p.TokenURL,
|
||||||
|
userInfoURL: p.UserInfoURL,
|
||||||
|
algorithms: algs,
|
||||||
|
rawClaims: body,
|
||||||
|
remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Claims unmarshals raw fields returned by the server during discovery.
|
||||||
|
//
|
||||||
|
// var claims struct {
|
||||||
|
// ScopesSupported []string `json:"scopes_supported"`
|
||||||
|
// ClaimsSupported []string `json:"claims_supported"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// if err := provider.Claims(&claims); err != nil {
|
||||||
|
// // handle unmarshaling error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// For a list of fields defined by the OpenID Connect spec see:
|
||||||
|
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
||||||
|
func (p *Provider) Claims(v interface{}) error {
|
||||||
|
if p.rawClaims == nil {
|
||||||
|
return errors.New("oidc: claims not set")
|
||||||
|
}
|
||||||
|
return json.Unmarshal(p.rawClaims, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
|
||||||
|
func (p *Provider) Endpoint() oauth2.Endpoint {
|
||||||
|
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserInfo represents the OpenID Connect userinfo claims.
|
||||||
|
type UserInfo struct {
|
||||||
|
Subject string `json:"sub"`
|
||||||
|
Profile string `json:"profile"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
EmailVerified bool `json:"email_verified"`
|
||||||
|
|
||||||
|
claims []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type userInfoRaw struct {
|
||||||
|
Subject string `json:"sub"`
|
||||||
|
Profile string `json:"profile"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
// Handle providers that return email_verified as a string
|
||||||
|
// https://forums.aws.amazon.com/thread.jspa?messageID=949441󧳁 and
|
||||||
|
// https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11
|
||||||
|
EmailVerified stringAsBool `json:"email_verified"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Claims unmarshals the raw JSON object claims into the provided object.
|
||||||
|
func (u *UserInfo) Claims(v interface{}) error {
|
||||||
|
if u.claims == nil {
|
||||||
|
return errors.New("oidc: claims not set")
|
||||||
|
}
|
||||||
|
return json.Unmarshal(u.claims, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserInfo uses the token source to query the provider's user info endpoint.
|
||||||
|
func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
|
||||||
|
if p.userInfoURL == "" {
|
||||||
|
return nil, errors.New("oidc: user info endpoint is not supported by this provider")
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", p.userInfoURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: create GET request: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := tokenSource.Token()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: get access token: %v", err)
|
||||||
|
}
|
||||||
|
token.SetAuthHeader(req)
|
||||||
|
|
||||||
|
resp, err := doRequest(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
ct := resp.Header.Get("Content-Type")
|
||||||
|
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
||||||
|
if parseErr == nil && mediaType == "application/jwt" {
|
||||||
|
payload, err := p.remoteKeySet.VerifySignature(ctx, string(body))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err)
|
||||||
|
}
|
||||||
|
body = payload
|
||||||
|
}
|
||||||
|
|
||||||
|
var userInfo userInfoRaw
|
||||||
|
if err := json.Unmarshal(body, &userInfo); err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
|
||||||
|
}
|
||||||
|
return &UserInfo{
|
||||||
|
Subject: userInfo.Subject,
|
||||||
|
Profile: userInfo.Profile,
|
||||||
|
Email: userInfo.Email,
|
||||||
|
EmailVerified: bool(userInfo.EmailVerified),
|
||||||
|
claims: body,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDToken is an OpenID Connect extension that provides a predictable representation
|
||||||
|
// of an authorization event.
|
||||||
|
//
|
||||||
|
// The ID Token only holds fields OpenID Connect requires. To access additional
|
||||||
|
// claims returned by the server, use the Claims method.
|
||||||
|
type IDToken struct {
|
||||||
|
// The URL of the server which issued this token. OpenID Connect
|
||||||
|
// requires this value always be identical to the URL used for
|
||||||
|
// initial discovery.
|
||||||
|
//
|
||||||
|
// Note: Because of a known issue with Google Accounts' implementation
|
||||||
|
// this value may differ when using Google.
|
||||||
|
//
|
||||||
|
// See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
|
||||||
|
Issuer string
|
||||||
|
|
||||||
|
// The client ID, or set of client IDs, that this token is issued for. For
|
||||||
|
// common uses, this is the client that initialized the auth flow.
|
||||||
|
//
|
||||||
|
// This package ensures the audience contains an expected value.
|
||||||
|
Audience []string
|
||||||
|
|
||||||
|
// A unique string which identifies the end user.
|
||||||
|
Subject string
|
||||||
|
|
||||||
|
// Expiry of the token. Ths package will not process tokens that have
|
||||||
|
// expired unless that validation is explicitly turned off.
|
||||||
|
Expiry time.Time
|
||||||
|
// When the token was issued by the provider.
|
||||||
|
IssuedAt time.Time
|
||||||
|
|
||||||
|
// Initial nonce provided during the authentication redirect.
|
||||||
|
//
|
||||||
|
// This package does NOT provided verification on the value of this field
|
||||||
|
// and it's the user's responsibility to ensure it contains a valid value.
|
||||||
|
Nonce string
|
||||||
|
|
||||||
|
// at_hash claim, if set in the ID token. Callers can verify an access token
|
||||||
|
// that corresponds to the ID token using the VerifyAccessToken method.
|
||||||
|
AccessTokenHash string
|
||||||
|
|
||||||
|
// signature algorithm used for ID token, needed to compute a verification hash of an
|
||||||
|
// access token
|
||||||
|
sigAlgorithm string
|
||||||
|
|
||||||
|
// Raw payload of the id_token.
|
||||||
|
claims []byte
|
||||||
|
|
||||||
|
// Map of distributed claim names to claim sources
|
||||||
|
distributedClaims map[string]claimSource
|
||||||
|
}
|
||||||
|
|
||||||
|
// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
|
||||||
|
//
|
||||||
|
// idToken, err := idTokenVerifier.Verify(rawIDToken)
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
// var claims struct {
|
||||||
|
// Email string `json:"email"`
|
||||||
|
// EmailVerified bool `json:"email_verified"`
|
||||||
|
// }
|
||||||
|
// if err := idToken.Claims(&claims); err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
func (i *IDToken) Claims(v interface{}) error {
|
||||||
|
if i.claims == nil {
|
||||||
|
return errors.New("oidc: claims not set")
|
||||||
|
}
|
||||||
|
return json.Unmarshal(i.claims, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
|
||||||
|
// matches the hash in the id token. It returns an error if the hashes don't match.
|
||||||
|
// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
|
||||||
|
// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
|
||||||
|
func (i *IDToken) VerifyAccessToken(accessToken string) error {
|
||||||
|
if i.AccessTokenHash == "" {
|
||||||
|
return errNoAtHash
|
||||||
|
}
|
||||||
|
var h hash.Hash
|
||||||
|
switch i.sigAlgorithm {
|
||||||
|
case RS256, ES256, PS256:
|
||||||
|
h = sha256.New()
|
||||||
|
case RS384, ES384, PS384:
|
||||||
|
h = sha512.New384()
|
||||||
|
case RS512, ES512, PS512:
|
||||||
|
h = sha512.New()
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
|
||||||
|
}
|
||||||
|
h.Write([]byte(accessToken)) // hash documents that Write will never return an error
|
||||||
|
sum := h.Sum(nil)[:h.Size()/2]
|
||||||
|
actual := base64.RawURLEncoding.EncodeToString(sum)
|
||||||
|
if actual != i.AccessTokenHash {
|
||||||
|
return errInvalidAtHash
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type idToken struct {
|
||||||
|
Issuer string `json:"iss"`
|
||||||
|
Subject string `json:"sub"`
|
||||||
|
Audience audience `json:"aud"`
|
||||||
|
Expiry jsonTime `json:"exp"`
|
||||||
|
IssuedAt jsonTime `json:"iat"`
|
||||||
|
NotBefore *jsonTime `json:"nbf"`
|
||||||
|
Nonce string `json:"nonce"`
|
||||||
|
AtHash string `json:"at_hash"`
|
||||||
|
ClaimNames map[string]string `json:"_claim_names"`
|
||||||
|
ClaimSources map[string]claimSource `json:"_claim_sources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type claimSource struct {
|
||||||
|
Endpoint string `json:"endpoint"`
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type stringAsBool bool
|
||||||
|
|
||||||
|
func (sb *stringAsBool) UnmarshalJSON(b []byte) error {
|
||||||
|
switch string(b) {
|
||||||
|
case "true", `"true"`:
|
||||||
|
*sb = stringAsBool(true)
|
||||||
|
case "false", `"false"`:
|
||||||
|
*sb = stringAsBool(false)
|
||||||
|
default:
|
||||||
|
return errors.New("invalid value for boolean")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type audience []string
|
||||||
|
|
||||||
|
func (a *audience) UnmarshalJSON(b []byte) error {
|
||||||
|
var s string
|
||||||
|
if json.Unmarshal(b, &s) == nil {
|
||||||
|
*a = audience{s}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var auds []string
|
||||||
|
if err := json.Unmarshal(b, &auds); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*a = audience(auds)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonTime time.Time
|
||||||
|
|
||||||
|
func (j *jsonTime) UnmarshalJSON(b []byte) error {
|
||||||
|
var n json.Number
|
||||||
|
if err := json.Unmarshal(b, &n); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var unix int64
|
||||||
|
|
||||||
|
if t, err := n.Int64(); err == nil {
|
||||||
|
unix = t
|
||||||
|
} else {
|
||||||
|
f, err := n.Float64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
unix = int64(f)
|
||||||
|
}
|
||||||
|
*j = jsonTime(time.Unix(unix, 0))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
|
||||||
|
err := json.Unmarshal(body, &v)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ct := r.Header.Get("Content-Type")
|
||||||
|
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
||||||
|
if parseErr == nil && mediaType == "application/json" {
|
||||||
|
return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
|
||||||
|
}
|
336
vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
generated
vendored
Normal file
336
vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
generated
vendored
Normal file
|
@ -0,0 +1,336 @@
|
||||||
|
package oidc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
jose "gopkg.in/square/go-jose.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
issuerGoogleAccounts = "https://accounts.google.com"
|
||||||
|
issuerGoogleAccountsNoScheme = "accounts.google.com"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
|
||||||
|
// of JSON web tokens. This is expected to be backed by a remote key set through
|
||||||
|
// provider metadata discovery or an in-memory set of keys delivered out-of-band.
|
||||||
|
type KeySet interface {
|
||||||
|
// VerifySignature parses the JSON web token, verifies the signature, and returns
|
||||||
|
// the raw payload. Header and claim fields are validated by other parts of the
|
||||||
|
// package. For example, the KeySet does not need to check values such as signature
|
||||||
|
// algorithm, issuer, and audience since the IDTokenVerifier validates these values
|
||||||
|
// independently.
|
||||||
|
//
|
||||||
|
// If VerifySignature makes HTTP requests to verify the token, it's expected to
|
||||||
|
// use any HTTP client associated with the context through ClientContext.
|
||||||
|
VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDTokenVerifier provides verification for ID Tokens.
|
||||||
|
type IDTokenVerifier struct {
|
||||||
|
keySet KeySet
|
||||||
|
config *Config
|
||||||
|
issuer string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
|
||||||
|
//
|
||||||
|
// It's easier to use provider discovery to construct an IDTokenVerifier than creating
|
||||||
|
// one directly. This method is intended to be used with provider that don't support
|
||||||
|
// metadata discovery, or avoiding round trips when the key set URL is already known.
|
||||||
|
//
|
||||||
|
// This constructor can be used to create a verifier directly using the issuer URL and
|
||||||
|
// JSON Web Key Set URL without using discovery:
|
||||||
|
//
|
||||||
|
// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
|
||||||
|
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
|
||||||
|
//
|
||||||
|
// Since KeySet is an interface, this constructor can also be used to supply custom
|
||||||
|
// public key sources. For example, if a user wanted to supply public keys out-of-band
|
||||||
|
// and hold them statically in-memory:
|
||||||
|
//
|
||||||
|
// // Custom KeySet implementation.
|
||||||
|
// keySet := newStatisKeySet(publicKeys...)
|
||||||
|
//
|
||||||
|
// // Verifier uses the custom KeySet implementation.
|
||||||
|
// verifier := oidc.NewVerifier("https://auth.example.com", keySet, config)
|
||||||
|
//
|
||||||
|
func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
|
||||||
|
return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is the configuration for an IDTokenVerifier.
|
||||||
|
type Config struct {
|
||||||
|
// Expected audience of the token. For a majority of the cases this is expected to be
|
||||||
|
// the ID of the client that initialized the login flow. It may occasionally differ if
|
||||||
|
// the provider supports the authorizing party (azp) claim.
|
||||||
|
//
|
||||||
|
// If not provided, users must explicitly set SkipClientIDCheck.
|
||||||
|
ClientID string
|
||||||
|
// If specified, only this set of algorithms may be used to sign the JWT.
|
||||||
|
//
|
||||||
|
// If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this
|
||||||
|
// defaults to the set of algorithms the provider supports. Otherwise this values
|
||||||
|
// defaults to RS256.
|
||||||
|
SupportedSigningAlgs []string
|
||||||
|
|
||||||
|
// If true, no ClientID check performed. Must be true if ClientID field is empty.
|
||||||
|
SkipClientIDCheck bool
|
||||||
|
// If true, token expiry is not checked.
|
||||||
|
SkipExpiryCheck bool
|
||||||
|
|
||||||
|
// SkipIssuerCheck is intended for specialized cases where the the caller wishes to
|
||||||
|
// defer issuer validation. When enabled, callers MUST independently verify the Token's
|
||||||
|
// Issuer is a known good value.
|
||||||
|
//
|
||||||
|
// Mismatched issuers often indicate client mis-configuration. If mismatches are
|
||||||
|
// unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
|
||||||
|
// this option.
|
||||||
|
SkipIssuerCheck bool
|
||||||
|
|
||||||
|
// Time function to check Token expiry. Defaults to time.Now
|
||||||
|
Now func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
|
||||||
|
//
|
||||||
|
// The returned IDTokenVerifier is tied to the Provider's context and its behavior is
|
||||||
|
// undefined once the Provider's context is canceled.
|
||||||
|
func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
|
||||||
|
if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 {
|
||||||
|
// Make a copy so we don't modify the config values.
|
||||||
|
cp := &Config{}
|
||||||
|
*cp = *config
|
||||||
|
cp.SupportedSigningAlgs = p.algorithms
|
||||||
|
config = cp
|
||||||
|
}
|
||||||
|
return NewVerifier(p.issuer, p.remoteKeySet, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseJWT(p string) ([]byte, error) {
|
||||||
|
parts := strings.Split(p, ".")
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
|
||||||
|
}
|
||||||
|
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
|
||||||
|
}
|
||||||
|
return payload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func contains(sli []string, ele string) bool {
|
||||||
|
for _, s := range sli {
|
||||||
|
if s == ele {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the Claims from the distributed JWT token
|
||||||
|
func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
|
||||||
|
req, err := http.NewRequest("GET", src.Endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed request: %v", err)
|
||||||
|
}
|
||||||
|
if src.AccessToken != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+src.AccessToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := doRequest(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := verifier.Verify(ctx, string(body))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return token.claims, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseClaim(raw []byte, name string, v interface{}) error {
|
||||||
|
var parsed map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(raw, &parsed); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
val, ok := parsed[name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("claim doesn't exist: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Unmarshal([]byte(val), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify parses a raw ID Token, verifies it's been signed by the provider, performs
|
||||||
|
// any additional checks depending on the Config, and returns the payload.
|
||||||
|
//
|
||||||
|
// Verify does NOT do nonce validation, which is the callers responsibility.
|
||||||
|
//
|
||||||
|
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
|
||||||
|
//
|
||||||
|
// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Extract the ID Token from oauth2 token.
|
||||||
|
// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
|
||||||
|
// if !ok {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// token, err := verifier.Verify(ctx, rawIDToken)
|
||||||
|
//
|
||||||
|
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
|
||||||
|
jws, err := jose.ParseSigned(rawIDToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Throw out tokens with invalid claims before trying to verify the token. This lets
|
||||||
|
// us do cheap checks before possibly re-syncing keys.
|
||||||
|
payload, err := parseJWT(rawIDToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||||
|
}
|
||||||
|
var token idToken
|
||||||
|
if err := json.Unmarshal(payload, &token); err != nil {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
distributedClaims := make(map[string]claimSource)
|
||||||
|
|
||||||
|
//step through the token to map claim names to claim sources"
|
||||||
|
for cn, src := range token.ClaimNames {
|
||||||
|
if src == "" {
|
||||||
|
return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
|
||||||
|
}
|
||||||
|
s, ok := token.ClaimSources[src]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("oidc: source does not exist")
|
||||||
|
}
|
||||||
|
distributedClaims[cn] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
t := &IDToken{
|
||||||
|
Issuer: token.Issuer,
|
||||||
|
Subject: token.Subject,
|
||||||
|
Audience: []string(token.Audience),
|
||||||
|
Expiry: time.Time(token.Expiry),
|
||||||
|
IssuedAt: time.Time(token.IssuedAt),
|
||||||
|
Nonce: token.Nonce,
|
||||||
|
AccessTokenHash: token.AtHash,
|
||||||
|
claims: payload,
|
||||||
|
distributedClaims: distributedClaims,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check issuer.
|
||||||
|
if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
|
||||||
|
// Google sometimes returns "accounts.google.com" as the issuer claim instead of
|
||||||
|
// the required "https://accounts.google.com". Detect this case and allow it only
|
||||||
|
// for Google.
|
||||||
|
//
|
||||||
|
// We will not add hooks to let other providers go off spec like this.
|
||||||
|
if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
|
||||||
|
return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
|
||||||
|
//
|
||||||
|
// This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
|
||||||
|
if !v.config.SkipClientIDCheck {
|
||||||
|
if v.config.ClientID != "" {
|
||||||
|
if !contains(t.Audience, v.config.ClientID) {
|
||||||
|
return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a SkipExpiryCheck is false, make sure token is not expired.
|
||||||
|
if !v.config.SkipExpiryCheck {
|
||||||
|
now := time.Now
|
||||||
|
if v.config.Now != nil {
|
||||||
|
now = v.config.Now
|
||||||
|
}
|
||||||
|
nowTime := now()
|
||||||
|
|
||||||
|
if t.Expiry.Before(nowTime) {
|
||||||
|
return nil, fmt.Errorf("oidc: token is expired (Token Expiry: %v)", t.Expiry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If nbf claim is provided in token, ensure that it is indeed in the past.
|
||||||
|
if token.NotBefore != nil {
|
||||||
|
nbfTime := time.Time(*token.NotBefore)
|
||||||
|
leeway := 1 * time.Minute
|
||||||
|
|
||||||
|
if nowTime.Add(leeway).Before(nbfTime) {
|
||||||
|
return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(jws.Signatures) {
|
||||||
|
case 0:
|
||||||
|
return nil, fmt.Errorf("oidc: id token not signed")
|
||||||
|
case 1:
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := jws.Signatures[0]
|
||||||
|
supportedSigAlgs := v.config.SupportedSigningAlgs
|
||||||
|
if len(supportedSigAlgs) == 0 {
|
||||||
|
supportedSigAlgs = []string{RS256}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !contains(supportedSigAlgs, sig.Header.Algorithm) {
|
||||||
|
return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.sigAlgorithm = sig.Header.Algorithm
|
||||||
|
|
||||||
|
gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to verify signature: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the payload returned by the square actually matches the payload parsed earlier.
|
||||||
|
if !bytes.Equal(gotPayload, payload) {
|
||||||
|
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
|
||||||
|
}
|
||||||
|
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nonce returns an auth code option which requires the ID Token created by the
|
||||||
|
// OpenID Connect provider to contain the specified nonce.
|
||||||
|
func Nonce(nonce string) oauth2.AuthCodeOption {
|
||||||
|
return oauth2.SetAuthURLParam("nonce", nonce)
|
||||||
|
}
|
21
vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
generated
vendored
Normal file
21
vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Brian Goff
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
14
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
14
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
package md2man
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/russross/blackfriday/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Render converts a markdown document into a roff formatted document.
|
||||||
|
func Render(doc []byte) []byte {
|
||||||
|
renderer := NewRoffRenderer()
|
||||||
|
|
||||||
|
return blackfriday.Run(doc,
|
||||||
|
[]blackfriday.Option{blackfriday.WithRenderer(renderer),
|
||||||
|
blackfriday.WithExtensions(renderer.GetExtensions())}...)
|
||||||
|
}
|
336
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
336
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
|
@ -0,0 +1,336 @@
|
||||||
|
package md2man
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/russross/blackfriday/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// roffRenderer implements the blackfriday.Renderer interface for creating
|
||||||
|
// roff format (manpages) from markdown text
|
||||||
|
type roffRenderer struct {
|
||||||
|
extensions blackfriday.Extensions
|
||||||
|
listCounters []int
|
||||||
|
firstHeader bool
|
||||||
|
firstDD bool
|
||||||
|
listDepth int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
titleHeader = ".TH "
|
||||||
|
topLevelHeader = "\n\n.SH "
|
||||||
|
secondLevelHdr = "\n.SH "
|
||||||
|
otherHeader = "\n.SS "
|
||||||
|
crTag = "\n"
|
||||||
|
emphTag = "\\fI"
|
||||||
|
emphCloseTag = "\\fP"
|
||||||
|
strongTag = "\\fB"
|
||||||
|
strongCloseTag = "\\fP"
|
||||||
|
breakTag = "\n.br\n"
|
||||||
|
paraTag = "\n.PP\n"
|
||||||
|
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||||||
|
linkTag = "\n\\[la]"
|
||||||
|
linkCloseTag = "\\[ra]"
|
||||||
|
codespanTag = "\\fB\\fC"
|
||||||
|
codespanCloseTag = "\\fR"
|
||||||
|
codeTag = "\n.PP\n.RS\n\n.nf\n"
|
||||||
|
codeCloseTag = "\n.fi\n.RE\n"
|
||||||
|
quoteTag = "\n.PP\n.RS\n"
|
||||||
|
quoteCloseTag = "\n.RE\n"
|
||||||
|
listTag = "\n.RS\n"
|
||||||
|
listCloseTag = "\n.RE\n"
|
||||||
|
dtTag = "\n.TP\n"
|
||||||
|
dd2Tag = "\n"
|
||||||
|
tableStart = "\n.TS\nallbox;\n"
|
||||||
|
tableEnd = ".TE\n"
|
||||||
|
tableCellStart = "T{\n"
|
||||||
|
tableCellEnd = "\nT}\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||||
|
// from markdown
|
||||||
|
func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||||||
|
var extensions blackfriday.Extensions
|
||||||
|
|
||||||
|
extensions |= blackfriday.NoIntraEmphasis
|
||||||
|
extensions |= blackfriday.Tables
|
||||||
|
extensions |= blackfriday.FencedCode
|
||||||
|
extensions |= blackfriday.SpaceHeadings
|
||||||
|
extensions |= blackfriday.Footnotes
|
||||||
|
extensions |= blackfriday.Titleblock
|
||||||
|
extensions |= blackfriday.DefinitionLists
|
||||||
|
return &roffRenderer{
|
||||||
|
extensions: extensions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExtensions returns the list of extensions used by this renderer implementation
|
||||||
|
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||||||
|
return r.extensions
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderHeader handles outputting the header at document start
|
||||||
|
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||||||
|
// disable hyphenation
|
||||||
|
out(w, ".nh\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderFooter handles outputting the footer at the document end; the roff
|
||||||
|
// renderer has no footer information
|
||||||
|
func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderNode is called for each node in a markdown document; based on the node
|
||||||
|
// type the equivalent roff output is sent to the writer
|
||||||
|
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||||
|
|
||||||
|
var walkAction = blackfriday.GoToNext
|
||||||
|
|
||||||
|
switch node.Type {
|
||||||
|
case blackfriday.Text:
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
case blackfriday.Softbreak:
|
||||||
|
out(w, crTag)
|
||||||
|
case blackfriday.Hardbreak:
|
||||||
|
out(w, breakTag)
|
||||||
|
case blackfriday.Emph:
|
||||||
|
if entering {
|
||||||
|
out(w, emphTag)
|
||||||
|
} else {
|
||||||
|
out(w, emphCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Strong:
|
||||||
|
if entering {
|
||||||
|
out(w, strongTag)
|
||||||
|
} else {
|
||||||
|
out(w, strongCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Link:
|
||||||
|
if !entering {
|
||||||
|
out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Image:
|
||||||
|
// ignore images
|
||||||
|
walkAction = blackfriday.SkipChildren
|
||||||
|
case blackfriday.Code:
|
||||||
|
out(w, codespanTag)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, codespanCloseTag)
|
||||||
|
case blackfriday.Document:
|
||||||
|
break
|
||||||
|
case blackfriday.Paragraph:
|
||||||
|
// roff .PP markers break lists
|
||||||
|
if r.listDepth > 0 {
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
out(w, paraTag)
|
||||||
|
} else {
|
||||||
|
out(w, crTag)
|
||||||
|
}
|
||||||
|
case blackfriday.BlockQuote:
|
||||||
|
if entering {
|
||||||
|
out(w, quoteTag)
|
||||||
|
} else {
|
||||||
|
out(w, quoteCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Heading:
|
||||||
|
r.handleHeading(w, node, entering)
|
||||||
|
case blackfriday.HorizontalRule:
|
||||||
|
out(w, hruleTag)
|
||||||
|
case blackfriday.List:
|
||||||
|
r.handleList(w, node, entering)
|
||||||
|
case blackfriday.Item:
|
||||||
|
r.handleItem(w, node, entering)
|
||||||
|
case blackfriday.CodeBlock:
|
||||||
|
out(w, codeTag)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, codeCloseTag)
|
||||||
|
case blackfriday.Table:
|
||||||
|
r.handleTable(w, node, entering)
|
||||||
|
case blackfriday.TableHead:
|
||||||
|
case blackfriday.TableBody:
|
||||||
|
case blackfriday.TableRow:
|
||||||
|
// no action as cell entries do all the nroff formatting
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
case blackfriday.TableCell:
|
||||||
|
r.handleTableCell(w, node, entering)
|
||||||
|
case blackfriday.HTMLSpan:
|
||||||
|
// ignore other HTML tags
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||||
|
}
|
||||||
|
return walkAction
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
switch node.Level {
|
||||||
|
case 1:
|
||||||
|
if !r.firstHeader {
|
||||||
|
out(w, titleHeader)
|
||||||
|
r.firstHeader = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
out(w, topLevelHeader)
|
||||||
|
case 2:
|
||||||
|
out(w, secondLevelHdr)
|
||||||
|
default:
|
||||||
|
out(w, otherHeader)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
openTag := listTag
|
||||||
|
closeTag := listCloseTag
|
||||||
|
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||||
|
// tags for definition lists handled within Item node
|
||||||
|
openTag = ""
|
||||||
|
closeTag = ""
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
r.listDepth++
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
r.listCounters = append(r.listCounters, 1)
|
||||||
|
}
|
||||||
|
out(w, openTag)
|
||||||
|
} else {
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
r.listCounters = r.listCounters[:len(r.listCounters)-1]
|
||||||
|
}
|
||||||
|
out(w, closeTag)
|
||||||
|
r.listDepth--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||||||
|
r.listCounters[len(r.listCounters)-1]++
|
||||||
|
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||||||
|
// DT (definition term): line just before DD (see below).
|
||||||
|
out(w, dtTag)
|
||||||
|
r.firstDD = true
|
||||||
|
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||||
|
// DD (definition description): line that starts with ": ".
|
||||||
|
//
|
||||||
|
// We have to distinguish between the first DD and the
|
||||||
|
// subsequent ones, as there should be no vertical
|
||||||
|
// whitespace between the DT and the first DD.
|
||||||
|
if r.firstDD {
|
||||||
|
r.firstDD = false
|
||||||
|
} else {
|
||||||
|
out(w, dd2Tag)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out(w, ".IP \\(bu 2\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out(w, "\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
out(w, tableStart)
|
||||||
|
// call walker to count cells (and rows?) so format section can be produced
|
||||||
|
columns := countColumns(node)
|
||||||
|
out(w, strings.Repeat("l ", columns)+"\n")
|
||||||
|
out(w, strings.Repeat("l ", columns)+".\n")
|
||||||
|
} else {
|
||||||
|
out(w, tableEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
var start string
|
||||||
|
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||||||
|
start = "\t"
|
||||||
|
}
|
||||||
|
if node.IsHeader {
|
||||||
|
start += codespanTag
|
||||||
|
} else if nodeLiteralSize(node) > 30 {
|
||||||
|
start += tableCellStart
|
||||||
|
}
|
||||||
|
out(w, start)
|
||||||
|
} else {
|
||||||
|
var end string
|
||||||
|
if node.IsHeader {
|
||||||
|
end = codespanCloseTag
|
||||||
|
} else if nodeLiteralSize(node) > 30 {
|
||||||
|
end = tableCellEnd
|
||||||
|
}
|
||||||
|
if node.Next == nil && end != tableCellEnd {
|
||||||
|
// Last cell: need to carriage return if we are at the end of the
|
||||||
|
// header row and content isn't wrapped in a "tablecell"
|
||||||
|
end += crTag
|
||||||
|
}
|
||||||
|
out(w, end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func nodeLiteralSize(node *blackfriday.Node) int {
|
||||||
|
total := 0
|
||||||
|
for n := node.FirstChild; n != nil; n = n.FirstChild {
|
||||||
|
total += len(n.Literal)
|
||||||
|
}
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
// because roff format requires knowing the column count before outputting any table
|
||||||
|
// data we need to walk a table tree and count the columns
|
||||||
|
func countColumns(node *blackfriday.Node) int {
|
||||||
|
var columns int
|
||||||
|
|
||||||
|
node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||||
|
switch node.Type {
|
||||||
|
case blackfriday.TableRow:
|
||||||
|
if !entering {
|
||||||
|
return blackfriday.Terminate
|
||||||
|
}
|
||||||
|
case blackfriday.TableCell:
|
||||||
|
if entering {
|
||||||
|
columns++
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
})
|
||||||
|
return columns
|
||||||
|
}
|
||||||
|
|
||||||
|
func out(w io.Writer, output string) {
|
||||||
|
io.WriteString(w, output) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||||
|
for i := 0; i < len(text); i++ {
|
||||||
|
// escape initial apostrophe or period
|
||||||
|
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||||||
|
out(w, "\\&")
|
||||||
|
}
|
||||||
|
|
||||||
|
// directly copy normal characters
|
||||||
|
org := i
|
||||||
|
|
||||||
|
for i < len(text) && text[i] != '\\' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i > org {
|
||||||
|
w.Write(text[org:i]) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// escape a character
|
||||||
|
if i >= len(text) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||||
|
}
|
||||||
|
}
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
ISC License
|
||||||
|
|
||||||
|
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose with or without fee is hereby granted, provided that the above
|
||||||
|
// copyright notice and this permission notice appear in all copies.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
|
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||||
|
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||||
|
// tag is deprecated and thus should not be used.
|
||||||
|
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||||
|
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||||
|
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||||
|
// not access to the unsafe package is available.
|
||||||
|
UnsafeDisabled = false
|
||||||
|
|
||||||
|
// ptrSize is the size of a pointer on the current arch.
|
||||||
|
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
type flag uintptr
|
||||||
|
|
||||||
|
var (
|
||||||
|
// flagRO indicates whether the value field of a reflect.Value
|
||||||
|
// is read-only.
|
||||||
|
flagRO flag
|
||||||
|
|
||||||
|
// flagAddr indicates whether the address of the reflect.Value's
|
||||||
|
// value may be taken.
|
||||||
|
flagAddr flag
|
||||||
|
)
|
||||||
|
|
||||||
|
// flagKindMask holds the bits that make up the kind
|
||||||
|
// part of the flags field. In all the supported versions,
|
||||||
|
// it is in the lower 5 bits.
|
||||||
|
const flagKindMask = flag(0x1f)
|
||||||
|
|
||||||
|
// Different versions of Go have used different
|
||||||
|
// bit layouts for the flags type. This table
|
||||||
|
// records the known combinations.
|
||||||
|
var okFlags = []struct {
|
||||||
|
ro, addr flag
|
||||||
|
}{{
|
||||||
|
// From Go 1.4 to 1.5
|
||||||
|
ro: 1 << 5,
|
||||||
|
addr: 1 << 7,
|
||||||
|
}, {
|
||||||
|
// Up to Go tip.
|
||||||
|
ro: 1<<5 | 1<<6,
|
||||||
|
addr: 1 << 8,
|
||||||
|
}}
|
||||||
|
|
||||||
|
var flagValOffset = func() uintptr {
|
||||||
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
|
if !ok {
|
||||||
|
panic("reflect.Value has no flag field")
|
||||||
|
}
|
||||||
|
return field.Offset
|
||||||
|
}()
|
||||||
|
|
||||||
|
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||||
|
func flagField(v *reflect.Value) *flag {
|
||||||
|
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||||
|
// the typical safety restrictions preventing access to unaddressable and
|
||||||
|
// unexported data. It works by digging the raw pointer to the underlying
|
||||||
|
// value out of the protected value and generating a new unprotected (unsafe)
|
||||||
|
// reflect.Value to it.
|
||||||
|
//
|
||||||
|
// This allows us to check for implementations of the Stringer and error
|
||||||
|
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||||
|
// inaccessible values such as unexported struct fields.
|
||||||
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
|
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
flagFieldPtr := flagField(&v)
|
||||||
|
*flagFieldPtr &^= flagRO
|
||||||
|
*flagFieldPtr |= flagAddr
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity checks against future reflect package changes
|
||||||
|
// to the type or semantics of the Value.flag field.
|
||||||
|
func init() {
|
||||||
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
|
if !ok {
|
||||||
|
panic("reflect.Value has no flag field")
|
||||||
|
}
|
||||||
|
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||||
|
panic("reflect.Value flag field has changed kind")
|
||||||
|
}
|
||||||
|
type t0 int
|
||||||
|
var t struct {
|
||||||
|
A t0
|
||||||
|
// t0 will have flagEmbedRO set.
|
||||||
|
t0
|
||||||
|
// a will have flagStickyRO set
|
||||||
|
a t0
|
||||||
|
}
|
||||||
|
vA := reflect.ValueOf(t).FieldByName("A")
|
||||||
|
va := reflect.ValueOf(t).FieldByName("a")
|
||||||
|
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||||
|
|
||||||
|
// Infer flagRO from the difference between the flags
|
||||||
|
// for the (otherwise identical) fields in t.
|
||||||
|
flagPublic := *flagField(&vA)
|
||||||
|
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||||
|
flagRO = flagPublic ^ flagWithRO
|
||||||
|
|
||||||
|
// Infer flagAddr from the difference between a value
|
||||||
|
// taken from a pointer and not.
|
||||||
|
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||||
|
flagNoPtr := *flagField(&vA)
|
||||||
|
flagPtr := *flagField(&vPtrA)
|
||||||
|
flagAddr = flagNoPtr ^ flagPtr
|
||||||
|
|
||||||
|
// Check that the inferred flags tally with one of the known versions.
|
||||||
|
for _, f := range okFlags {
|
||||||
|
if flagRO == f.ro && flagAddr == f.addr {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("reflect.Value read-only flag has changed semantics")
|
||||||
|
}
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose with or without fee is hereby granted, provided that the above
|
||||||
|
// copyright notice and this permission notice appear in all copies.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
|
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||||
|
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||||
|
// tag is deprecated and thus should not be used.
|
||||||
|
// +build js appengine safe disableunsafe !go1.4
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||||
|
// not access to the unsafe package is available.
|
||||||
|
UnsafeDisabled = true
|
||||||
|
)
|
||||||
|
|
||||||
|
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||||
|
// that bypasses the typical safety restrictions preventing access to
|
||||||
|
// unaddressable and unexported data. However, doing this relies on access to
|
||||||
|
// the unsafe package. This is a stub version which simply returns the passed
|
||||||
|
// reflect.Value when the unsafe package is not available.
|
||||||
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
|
return v
|
||||||
|
}
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
|
@ -0,0 +1,341 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||||
|
// the technique used in the fmt package.
|
||||||
|
var (
|
||||||
|
panicBytes = []byte("(PANIC=")
|
||||||
|
plusBytes = []byte("+")
|
||||||
|
iBytes = []byte("i")
|
||||||
|
trueBytes = []byte("true")
|
||||||
|
falseBytes = []byte("false")
|
||||||
|
interfaceBytes = []byte("(interface {})")
|
||||||
|
commaNewlineBytes = []byte(",\n")
|
||||||
|
newlineBytes = []byte("\n")
|
||||||
|
openBraceBytes = []byte("{")
|
||||||
|
openBraceNewlineBytes = []byte("{\n")
|
||||||
|
closeBraceBytes = []byte("}")
|
||||||
|
asteriskBytes = []byte("*")
|
||||||
|
colonBytes = []byte(":")
|
||||||
|
colonSpaceBytes = []byte(": ")
|
||||||
|
openParenBytes = []byte("(")
|
||||||
|
closeParenBytes = []byte(")")
|
||||||
|
spaceBytes = []byte(" ")
|
||||||
|
pointerChainBytes = []byte("->")
|
||||||
|
nilAngleBytes = []byte("<nil>")
|
||||||
|
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||||
|
maxShortBytes = []byte("<max>")
|
||||||
|
circularBytes = []byte("<already shown>")
|
||||||
|
circularShortBytes = []byte("<shown>")
|
||||||
|
invalidAngleBytes = []byte("<invalid>")
|
||||||
|
openBracketBytes = []byte("[")
|
||||||
|
closeBracketBytes = []byte("]")
|
||||||
|
percentBytes = []byte("%")
|
||||||
|
precisionBytes = []byte(".")
|
||||||
|
openAngleBytes = []byte("<")
|
||||||
|
closeAngleBytes = []byte(">")
|
||||||
|
openMapBytes = []byte("map[")
|
||||||
|
closeMapBytes = []byte("]")
|
||||||
|
lenEqualsBytes = []byte("len=")
|
||||||
|
capEqualsBytes = []byte("cap=")
|
||||||
|
)
|
||||||
|
|
||||||
|
// hexDigits is used to map a decimal value to a hex digit.
|
||||||
|
var hexDigits = "0123456789abcdef"
|
||||||
|
|
||||||
|
// catchPanic handles any panics that might occur during the handleMethods
|
||||||
|
// calls.
|
||||||
|
func catchPanic(w io.Writer, v reflect.Value) {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
w.Write(panicBytes)
|
||||||
|
fmt.Fprintf(w, "%v", err)
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleMethods attempts to call the Error and String methods on the underlying
|
||||||
|
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||||
|
//
|
||||||
|
// It handles panics in any called methods by catching and displaying the error
|
||||||
|
// as the formatted value.
|
||||||
|
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||||
|
// We need an interface to check if the type implements the error or
|
||||||
|
// Stringer interface. However, the reflect package won't give us an
|
||||||
|
// interface on certain things like unexported struct fields in order
|
||||||
|
// to enforce visibility rules. We use unsafe, when it's available,
|
||||||
|
// to bypass these restrictions since this package does not mutate the
|
||||||
|
// values.
|
||||||
|
if !v.CanInterface() {
|
||||||
|
if UnsafeDisabled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
v = unsafeReflectValue(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Choose whether or not to do error and Stringer interface lookups against
|
||||||
|
// the base type or a pointer to the base type depending on settings.
|
||||||
|
// Technically calling one of these methods with a pointer receiver can
|
||||||
|
// mutate the value, however, types which choose to satisify an error or
|
||||||
|
// Stringer interface with a pointer receiver should not be mutating their
|
||||||
|
// state inside these interface methods.
|
||||||
|
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||||
|
v = unsafeReflectValue(v)
|
||||||
|
}
|
||||||
|
if v.CanAddr() {
|
||||||
|
v = v.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is it an error or Stringer?
|
||||||
|
switch iface := v.Interface().(type) {
|
||||||
|
case error:
|
||||||
|
defer catchPanic(w, v)
|
||||||
|
if cs.ContinueOnMethod {
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(iface.Error()))
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte(iface.Error()))
|
||||||
|
return true
|
||||||
|
|
||||||
|
case fmt.Stringer:
|
||||||
|
defer catchPanic(w, v)
|
||||||
|
if cs.ContinueOnMethod {
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(iface.String()))
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
w.Write([]byte(iface.String()))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// printBool outputs a boolean value as true or false to Writer w.
|
||||||
|
func printBool(w io.Writer, val bool) {
|
||||||
|
if val {
|
||||||
|
w.Write(trueBytes)
|
||||||
|
} else {
|
||||||
|
w.Write(falseBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// printInt outputs a signed integer value to Writer w.
|
||||||
|
func printInt(w io.Writer, val int64, base int) {
|
||||||
|
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printUint outputs an unsigned integer value to Writer w.
|
||||||
|
func printUint(w io.Writer, val uint64, base int) {
|
||||||
|
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printFloat outputs a floating point value using the specified precision,
|
||||||
|
// which is expected to be 32 or 64bit, to Writer w.
|
||||||
|
func printFloat(w io.Writer, val float64, precision int) {
|
||||||
|
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printComplex outputs a complex value using the specified float precision
|
||||||
|
// for the real and imaginary parts to Writer w.
|
||||||
|
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||||
|
r := real(c)
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||||
|
i := imag(c)
|
||||||
|
if i >= 0 {
|
||||||
|
w.Write(plusBytes)
|
||||||
|
}
|
||||||
|
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||||
|
w.Write(iBytes)
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||||
|
// prefix to Writer w.
|
||||||
|
func printHexPtr(w io.Writer, p uintptr) {
|
||||||
|
// Null pointer.
|
||||||
|
num := uint64(p)
|
||||||
|
if num == 0 {
|
||||||
|
w.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||||
|
buf := make([]byte, 18)
|
||||||
|
|
||||||
|
// It's simpler to construct the hex string right to left.
|
||||||
|
base := uint64(16)
|
||||||
|
i := len(buf) - 1
|
||||||
|
for num >= base {
|
||||||
|
buf[i] = hexDigits[num%base]
|
||||||
|
num /= base
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
buf[i] = hexDigits[num]
|
||||||
|
|
||||||
|
// Add '0x' prefix.
|
||||||
|
i--
|
||||||
|
buf[i] = 'x'
|
||||||
|
i--
|
||||||
|
buf[i] = '0'
|
||||||
|
|
||||||
|
// Strip unused leading bytes.
|
||||||
|
buf = buf[i:]
|
||||||
|
w.Write(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||||
|
// elements to be sorted.
|
||||||
|
type valuesSorter struct {
|
||||||
|
values []reflect.Value
|
||||||
|
strings []string // either nil or same len and values
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||||
|
// surrogate keys on which the data should be sorted. It uses flags in
|
||||||
|
// ConfigState to decide if and how to populate those surrogate keys.
|
||||||
|
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||||
|
vs := &valuesSorter{values: values, cs: cs}
|
||||||
|
if canSortSimply(vs.values[0].Kind()) {
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
if !cs.DisableMethods {
|
||||||
|
vs.strings = make([]string, len(values))
|
||||||
|
for i := range vs.values {
|
||||||
|
b := bytes.Buffer{}
|
||||||
|
if !handleMethods(cs, &b, vs.values[i]) {
|
||||||
|
vs.strings = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
vs.strings[i] = b.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if vs.strings == nil && cs.SpewKeys {
|
||||||
|
vs.strings = make([]string, len(values))
|
||||||
|
for i := range vs.values {
|
||||||
|
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
|
||||||
|
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||||
|
// directly, or whether it should be considered for sorting by surrogate keys
|
||||||
|
// (if the ConfigState allows it).
|
||||||
|
func canSortSimply(kind reflect.Kind) bool {
|
||||||
|
// This switch parallels valueSortLess, except for the default case.
|
||||||
|
switch kind {
|
||||||
|
case reflect.Bool:
|
||||||
|
return true
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return true
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
return true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return true
|
||||||
|
case reflect.String:
|
||||||
|
return true
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return true
|
||||||
|
case reflect.Array:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of values in the slice. It is part of the
|
||||||
|
// sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Len() int {
|
||||||
|
return len(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap swaps the values at the passed indices. It is part of the
|
||||||
|
// sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Swap(i, j int) {
|
||||||
|
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||||
|
if s.strings != nil {
|
||||||
|
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// valueSortLess returns whether the first value should sort before the second
|
||||||
|
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||||
|
// implementation.
|
||||||
|
func valueSortLess(a, b reflect.Value) bool {
|
||||||
|
switch a.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return !a.Bool() && b.Bool()
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return a.Int() < b.Int()
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return a.Float() < b.Float()
|
||||||
|
case reflect.String:
|
||||||
|
return a.String() < b.String()
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Array:
|
||||||
|
// Compare the contents of both arrays.
|
||||||
|
l := a.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
av := a.Index(i)
|
||||||
|
bv := b.Index(i)
|
||||||
|
if av.Interface() == bv.Interface() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return valueSortLess(av, bv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a.String() < b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less returns whether the value at index i should sort before the
|
||||||
|
// value at index j. It is part of the sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Less(i, j int) bool {
|
||||||
|
if s.strings == nil {
|
||||||
|
return valueSortLess(s.values[i], s.values[j])
|
||||||
|
}
|
||||||
|
return s.strings[i] < s.strings[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortValues is a sort function that handles both native types and any type that
|
||||||
|
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||||
|
// their Value.String() value to ensure display stability.
|
||||||
|
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||||
|
if len(values) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sort.Sort(newValuesSorter(values, cs))
|
||||||
|
}
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigState houses the configuration options used by spew to format and
|
||||||
|
// display values. There is a global instance, Config, that is used to control
|
||||||
|
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||||
|
// provides methods equivalent to the top-level functions.
|
||||||
|
//
|
||||||
|
// The zero value for ConfigState provides no indentation. You would typically
|
||||||
|
// want to set it to a space or a tab.
|
||||||
|
//
|
||||||
|
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||||
|
// with default settings. See the documentation of NewDefaultConfig for default
|
||||||
|
// values.
|
||||||
|
type ConfigState struct {
|
||||||
|
// Indent specifies the string to use for each indentation level. The
|
||||||
|
// global config instance that all top-level functions use set this to a
|
||||||
|
// single space by default. If you would like more indentation, you might
|
||||||
|
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// MaxDepth controls the maximum number of levels to descend into nested
|
||||||
|
// data structures. The default, 0, means there is no limit.
|
||||||
|
//
|
||||||
|
// NOTE: Circular data structures are properly detected, so it is not
|
||||||
|
// necessary to set this value unless you specifically want to limit deeply
|
||||||
|
// nested data structures.
|
||||||
|
MaxDepth int
|
||||||
|
|
||||||
|
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||||
|
// invoked for types that implement them.
|
||||||
|
DisableMethods bool
|
||||||
|
|
||||||
|
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||||
|
// error and Stringer interfaces on types which only accept a pointer
|
||||||
|
// receiver when the current type is not a pointer.
|
||||||
|
//
|
||||||
|
// NOTE: This might be an unsafe action since calling one of these methods
|
||||||
|
// with a pointer receiver could technically mutate the value, however,
|
||||||
|
// in practice, types which choose to satisify an error or Stringer
|
||||||
|
// interface with a pointer receiver should not be mutating their state
|
||||||
|
// inside these interface methods. As a result, this option relies on
|
||||||
|
// access to the unsafe package, so it will not have any effect when
|
||||||
|
// running in environments without access to the unsafe package such as
|
||||||
|
// Google App Engine or with the "safe" build tag specified.
|
||||||
|
DisablePointerMethods bool
|
||||||
|
|
||||||
|
// DisablePointerAddresses specifies whether to disable the printing of
|
||||||
|
// pointer addresses. This is useful when diffing data structures in tests.
|
||||||
|
DisablePointerAddresses bool
|
||||||
|
|
||||||
|
// DisableCapacities specifies whether to disable the printing of capacities
|
||||||
|
// for arrays, slices, maps and channels. This is useful when diffing
|
||||||
|
// data structures in tests.
|
||||||
|
DisableCapacities bool
|
||||||
|
|
||||||
|
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||||
|
// a custom error or Stringer interface is invoked. The default, false,
|
||||||
|
// means it will print the results of invoking the custom error or Stringer
|
||||||
|
// interface and return immediately instead of continuing to recurse into
|
||||||
|
// the internals of the data type.
|
||||||
|
//
|
||||||
|
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||||
|
// via the DisableMethods or DisablePointerMethods options.
|
||||||
|
ContinueOnMethod bool
|
||||||
|
|
||||||
|
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||||
|
// this to have a more deterministic, diffable output. Note that only
|
||||||
|
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||||
|
// that support the error or Stringer interfaces (if methods are
|
||||||
|
// enabled) are supported, with other types sorted according to the
|
||||||
|
// reflect.Value.String() output which guarantees display stability.
|
||||||
|
SortKeys bool
|
||||||
|
|
||||||
|
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||||
|
// be spewed to strings and sorted by those strings. This is only
|
||||||
|
// considered if SortKeys is true.
|
||||||
|
SpewKeys bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is the active configuration of the top-level functions.
|
||||||
|
// The configuration can be changed by modifying the contents of spew.Config.
|
||||||
|
var Config = ConfigState{Indent: " "}
|
||||||
|
|
||||||
|
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the formatted string as a value that satisfies error. See NewFormatter
|
||||||
|
// for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||||
|
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Print(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Printf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Println(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||||
|
return fmt.Sprint(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||||
|
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||||
|
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||||
|
return fmt.Sprintln(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||||
|
interface. As a result, it integrates cleanly with standard fmt package
|
||||||
|
printing functions. The formatter is useful for inline printing of smaller data
|
||||||
|
types similar to the standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Typically this function shouldn't be called directly. It is much easier to make
|
||||||
|
use of the custom formatter by calling one of the convenience functions such as
|
||||||
|
c.Printf, c.Println, or c.Printf.
|
||||||
|
*/
|
||||||
|
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||||
|
return newFormatter(c, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||||
|
// exactly the same as Dump.
|
||||||
|
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||||
|
fdump(c, w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dump displays the passed parameters to standard out with newlines, customizable
|
||||||
|
indentation, and additional debug information such as complete types and all
|
||||||
|
pointer addresses used to indirect to the final value. It provides the
|
||||||
|
following features over the built-in printing facilities provided by the fmt
|
||||||
|
package:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output
|
||||||
|
|
||||||
|
The configuration options are controlled by modifying the public members
|
||||||
|
of c. See ConfigState for options documentation.
|
||||||
|
|
||||||
|
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||||
|
get the formatted result as a string.
|
||||||
|
*/
|
||||||
|
func (c *ConfigState) Dump(a ...interface{}) {
|
||||||
|
fdump(c, os.Stdout, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||||
|
// as Dump.
|
||||||
|
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fdump(c, &buf, a...)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||||
|
// length with each argument converted to a spew Formatter interface using
|
||||||
|
// the ConfigState associated with s.
|
||||||
|
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||||
|
formatters = make([]interface{}, len(args))
|
||||||
|
for index, arg := range args {
|
||||||
|
formatters[index] = newFormatter(c, arg)
|
||||||
|
}
|
||||||
|
return formatters
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||||
|
//
|
||||||
|
// Indent: " "
|
||||||
|
// MaxDepth: 0
|
||||||
|
// DisableMethods: false
|
||||||
|
// DisablePointerMethods: false
|
||||||
|
// ContinueOnMethod: false
|
||||||
|
// SortKeys: false
|
||||||
|
func NewDefaultConfig() *ConfigState {
|
||||||
|
return &ConfigState{Indent: " "}
|
||||||
|
}
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||||
|
debugging.
|
||||||
|
|
||||||
|
A quick overview of the additional features spew provides over the built-in
|
||||||
|
printing facilities for Go data types are as follows:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output (only when using
|
||||||
|
Dump style)
|
||||||
|
|
||||||
|
There are two different approaches spew allows for dumping Go data structures:
|
||||||
|
|
||||||
|
* Dump style which prints with newlines, customizable indentation,
|
||||||
|
and additional debug information such as types and all pointer addresses
|
||||||
|
used to indirect to the final value
|
||||||
|
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||||
|
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||||
|
similar to the default %v while providing the additional functionality
|
||||||
|
outlined above and passing unsupported format verbs such as %x and %q
|
||||||
|
along to fmt
|
||||||
|
|
||||||
|
Quick Start
|
||||||
|
|
||||||
|
This section demonstrates how to quickly get started with spew. See the
|
||||||
|
sections below for further details on formatting and configuration options.
|
||||||
|
|
||||||
|
To dump a variable with full newlines, indentation, type, and pointer
|
||||||
|
information use Dump, Fdump, or Sdump:
|
||||||
|
spew.Dump(myVar1, myVar2, ...)
|
||||||
|
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||||
|
str := spew.Sdump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||||
|
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||||
|
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||||
|
%#+v (adds types and pointer addresses):
|
||||||
|
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
|
||||||
|
Configuration Options
|
||||||
|
|
||||||
|
Configuration of spew is handled by fields in the ConfigState type. For
|
||||||
|
convenience, all of the top-level functions use a global state available
|
||||||
|
via the spew.Config global.
|
||||||
|
|
||||||
|
It is also possible to create a ConfigState instance that provides methods
|
||||||
|
equivalent to the top-level functions. This allows concurrent configuration
|
||||||
|
options. See the ConfigState documentation for more details.
|
||||||
|
|
||||||
|
The following configuration options are available:
|
||||||
|
* Indent
|
||||||
|
String to use for each indentation level for Dump functions.
|
||||||
|
It is a single space by default. A popular alternative is "\t".
|
||||||
|
|
||||||
|
* MaxDepth
|
||||||
|
Maximum number of levels to descend into nested data structures.
|
||||||
|
There is no limit by default.
|
||||||
|
|
||||||
|
* DisableMethods
|
||||||
|
Disables invocation of error and Stringer interface methods.
|
||||||
|
Method invocation is enabled by default.
|
||||||
|
|
||||||
|
* DisablePointerMethods
|
||||||
|
Disables invocation of error and Stringer interface methods on types
|
||||||
|
which only accept pointer receivers from non-pointer variables.
|
||||||
|
Pointer method invocation is enabled by default.
|
||||||
|
|
||||||
|
* DisablePointerAddresses
|
||||||
|
DisablePointerAddresses specifies whether to disable the printing of
|
||||||
|
pointer addresses. This is useful when diffing data structures in tests.
|
||||||
|
|
||||||
|
* DisableCapacities
|
||||||
|
DisableCapacities specifies whether to disable the printing of
|
||||||
|
capacities for arrays, slices, maps and channels. This is useful when
|
||||||
|
diffing data structures in tests.
|
||||||
|
|
||||||
|
* ContinueOnMethod
|
||||||
|
Enables recursion into types after invoking error and Stringer interface
|
||||||
|
methods. Recursion after method invocation is disabled by default.
|
||||||
|
|
||||||
|
* SortKeys
|
||||||
|
Specifies map keys should be sorted before being printed. Use
|
||||||
|
this to have a more deterministic, diffable output. Note that
|
||||||
|
only native types (bool, int, uint, floats, uintptr and string)
|
||||||
|
and types which implement error or Stringer interfaces are
|
||||||
|
supported with other types sorted according to the
|
||||||
|
reflect.Value.String() output which guarantees display
|
||||||
|
stability. Natural map order is used by default.
|
||||||
|
|
||||||
|
* SpewKeys
|
||||||
|
Specifies that, as a last resort attempt, map keys should be
|
||||||
|
spewed to strings and sorted by those strings. This is only
|
||||||
|
considered if SortKeys is true.
|
||||||
|
|
||||||
|
Dump Usage
|
||||||
|
|
||||||
|
Simply call spew.Dump with a list of variables you want to dump:
|
||||||
|
|
||||||
|
spew.Dump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||||
|
io.Writer. For example, to dump to standard error:
|
||||||
|
|
||||||
|
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||||
|
|
||||||
|
str := spew.Sdump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
Sample Dump Output
|
||||||
|
|
||||||
|
See the Dump example for details on the setup of the types and variables being
|
||||||
|
shown here.
|
||||||
|
|
||||||
|
(main.Foo) {
|
||||||
|
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||||
|
flag: (main.Flag) flagTwo,
|
||||||
|
data: (uintptr) <nil>
|
||||||
|
}),
|
||||||
|
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||||
|
(string) (len=3) "one": (bool) true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||||
|
command as shown.
|
||||||
|
([]uint8) (len=32 cap=32) {
|
||||||
|
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||||
|
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||||
|
00000020 31 32 |12|
|
||||||
|
}
|
||||||
|
|
||||||
|
Custom Formatter
|
||||||
|
|
||||||
|
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||||
|
so that it integrates cleanly with standard fmt package printing functions. The
|
||||||
|
formatter is useful for inline printing of smaller data types similar to the
|
||||||
|
standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Custom Formatter Usage
|
||||||
|
|
||||||
|
The simplest way to make use of the spew custom formatter is to call one of the
|
||||||
|
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||||
|
functions have syntax you are most likely already familiar with:
|
||||||
|
|
||||||
|
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
spew.Println(myVar, myVar2)
|
||||||
|
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
|
||||||
|
See the Index for the full list convenience functions.
|
||||||
|
|
||||||
|
Sample Formatter Output
|
||||||
|
|
||||||
|
Double pointer to a uint8:
|
||||||
|
%v: <**>5
|
||||||
|
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||||
|
%#v: (**uint8)5
|
||||||
|
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||||
|
|
||||||
|
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||||
|
%v: <*>{1 <*><shown>}
|
||||||
|
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||||
|
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||||
|
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||||
|
|
||||||
|
See the Printf example for details on the setup of variables being shown
|
||||||
|
here.
|
||||||
|
|
||||||
|
Errors
|
||||||
|
|
||||||
|
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||||
|
detects them and handles them internally by printing the panic information
|
||||||
|
inline with the output. Since spew is intended to provide deep pretty printing
|
||||||
|
capabilities on structures, it intentionally does not return any errors.
|
||||||
|
*/
|
||||||
|
package spew
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
|
@ -0,0 +1,509 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||||
|
// convert cgo types to uint8 slices for hexdumping.
|
||||||
|
uint8Type = reflect.TypeOf(uint8(0))
|
||||||
|
|
||||||
|
// cCharRE is a regular expression that matches a cgo char.
|
||||||
|
// It is used to detect character arrays to hexdump them.
|
||||||
|
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||||
|
|
||||||
|
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||||
|
// char. It is used to detect unsigned character arrays to hexdump
|
||||||
|
// them.
|
||||||
|
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||||
|
|
||||||
|
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||||
|
// It is used to detect uint8_t arrays to hexdump them.
|
||||||
|
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// dumpState contains information about the state of a dump operation.
|
||||||
|
type dumpState struct {
|
||||||
|
w io.Writer
|
||||||
|
depth int
|
||||||
|
pointers map[uintptr]int
|
||||||
|
ignoreNextType bool
|
||||||
|
ignoreNextIndent bool
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// indent performs indentation according to the depth level and cs.Indent
|
||||||
|
// option.
|
||||||
|
func (d *dumpState) indent() {
|
||||||
|
if d.ignoreNextIndent {
|
||||||
|
d.ignoreNextIndent = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||||
|
// This is useful for data types like structs, arrays, slices, and maps which
|
||||||
|
// can contain varying types packed inside an interface.
|
||||||
|
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||||
|
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||||
|
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||||
|
// Remove pointers at or below the current depth from map used to detect
|
||||||
|
// circular refs.
|
||||||
|
for k, depth := range d.pointers {
|
||||||
|
if depth >= d.depth {
|
||||||
|
delete(d.pointers, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep list of all dereferenced pointers to show later.
|
||||||
|
pointerChain := make([]uintptr, 0)
|
||||||
|
|
||||||
|
// Figure out how many levels of indirection there are by dereferencing
|
||||||
|
// pointers and unpacking interfaces down the chain while detecting circular
|
||||||
|
// references.
|
||||||
|
nilFound := false
|
||||||
|
cycleFound := false
|
||||||
|
indirects := 0
|
||||||
|
ve := v
|
||||||
|
for ve.Kind() == reflect.Ptr {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indirects++
|
||||||
|
addr := ve.Pointer()
|
||||||
|
pointerChain = append(pointerChain, addr)
|
||||||
|
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||||
|
cycleFound = true
|
||||||
|
indirects--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d.pointers[addr] = d.depth
|
||||||
|
|
||||||
|
ve = ve.Elem()
|
||||||
|
if ve.Kind() == reflect.Interface {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ve = ve.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display type information.
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||||
|
d.w.Write([]byte(ve.Type().String()))
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
|
||||||
|
// Display pointer information.
|
||||||
|
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
for i, addr := range pointerChain {
|
||||||
|
if i > 0 {
|
||||||
|
d.w.Write(pointerChainBytes)
|
||||||
|
}
|
||||||
|
printHexPtr(d.w, addr)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display dereferenced value.
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
switch {
|
||||||
|
case nilFound:
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
|
||||||
|
case cycleFound:
|
||||||
|
d.w.Write(circularBytes)
|
||||||
|
|
||||||
|
default:
|
||||||
|
d.ignoreNextType = true
|
||||||
|
d.dump(ve)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||||
|
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||||
|
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||||
|
// Determine whether this type should be hex dumped or not. Also,
|
||||||
|
// for types which should be hexdumped, try to use the underlying data
|
||||||
|
// first, then fall back to trying to convert them to a uint8 slice.
|
||||||
|
var buf []uint8
|
||||||
|
doConvert := false
|
||||||
|
doHexDump := false
|
||||||
|
numEntries := v.Len()
|
||||||
|
if numEntries > 0 {
|
||||||
|
vt := v.Index(0).Type()
|
||||||
|
vts := vt.String()
|
||||||
|
switch {
|
||||||
|
// C types that need to be converted.
|
||||||
|
case cCharRE.MatchString(vts):
|
||||||
|
fallthrough
|
||||||
|
case cUnsignedCharRE.MatchString(vts):
|
||||||
|
fallthrough
|
||||||
|
case cUint8tCharRE.MatchString(vts):
|
||||||
|
doConvert = true
|
||||||
|
|
||||||
|
// Try to use existing uint8 slices and fall back to converting
|
||||||
|
// and copying if that fails.
|
||||||
|
case vt.Kind() == reflect.Uint8:
|
||||||
|
// We need an addressable interface to convert the type
|
||||||
|
// to a byte slice. However, the reflect package won't
|
||||||
|
// give us an interface on certain things like
|
||||||
|
// unexported struct fields in order to enforce
|
||||||
|
// visibility rules. We use unsafe, when available, to
|
||||||
|
// bypass these restrictions since this package does not
|
||||||
|
// mutate the values.
|
||||||
|
vs := v
|
||||||
|
if !vs.CanInterface() || !vs.CanAddr() {
|
||||||
|
vs = unsafeReflectValue(vs)
|
||||||
|
}
|
||||||
|
if !UnsafeDisabled {
|
||||||
|
vs = vs.Slice(0, numEntries)
|
||||||
|
|
||||||
|
// Use the existing uint8 slice if it can be
|
||||||
|
// type asserted.
|
||||||
|
iface := vs.Interface()
|
||||||
|
if slice, ok := iface.([]uint8); ok {
|
||||||
|
buf = slice
|
||||||
|
doHexDump = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The underlying data needs to be converted if it can't
|
||||||
|
// be type asserted to a uint8 slice.
|
||||||
|
doConvert = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy and convert the underlying type if needed.
|
||||||
|
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||||
|
// Convert and copy each element into a uint8 byte
|
||||||
|
// slice.
|
||||||
|
buf = make([]uint8, numEntries)
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
vv := v.Index(i)
|
||||||
|
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||||
|
}
|
||||||
|
doHexDump = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hexdump the entire slice as needed.
|
||||||
|
if doHexDump {
|
||||||
|
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||||
|
str := indent + hex.Dump(buf)
|
||||||
|
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||||
|
str = strings.TrimRight(str, d.cs.Indent)
|
||||||
|
d.w.Write([]byte(str))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively call dump for each item.
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
d.dump(d.unpackValue(v.Index(i)))
|
||||||
|
if i < (numEntries - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||||
|
// value to figure out what kind of object we are dealing with and formats it
|
||||||
|
// appropriately. It is a recursive function, however circular data structures
|
||||||
|
// are detected and handled properly.
|
||||||
|
func (d *dumpState) dump(v reflect.Value) {
|
||||||
|
// Handle invalid reflect values immediately.
|
||||||
|
kind := v.Kind()
|
||||||
|
if kind == reflect.Invalid {
|
||||||
|
d.w.Write(invalidAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pointers specially.
|
||||||
|
if kind == reflect.Ptr {
|
||||||
|
d.indent()
|
||||||
|
d.dumpPtr(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print type information unless already handled elsewhere.
|
||||||
|
if !d.ignoreNextType {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
d.w.Write([]byte(v.Type().String()))
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
d.ignoreNextType = false
|
||||||
|
|
||||||
|
// Display length and capacity if the built-in len and cap functions
|
||||||
|
// work with the value's kind and the len/cap itself is non-zero.
|
||||||
|
valueLen, valueCap := 0, 0
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||||
|
valueLen, valueCap = v.Len(), v.Cap()
|
||||||
|
case reflect.Map, reflect.String:
|
||||||
|
valueLen = v.Len()
|
||||||
|
}
|
||||||
|
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
if valueLen != 0 {
|
||||||
|
d.w.Write(lenEqualsBytes)
|
||||||
|
printInt(d.w, int64(valueLen), 10)
|
||||||
|
}
|
||||||
|
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||||
|
if valueLen != 0 {
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
d.w.Write(capEqualsBytes)
|
||||||
|
printInt(d.w, int64(valueCap), 10)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||||
|
// is enabled
|
||||||
|
if !d.cs.DisableMethods {
|
||||||
|
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||||
|
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Do nothing. We should never get here since invalid has already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
printBool(d.w, v.Bool())
|
||||||
|
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
printInt(d.w, v.Int(), 10)
|
||||||
|
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
printUint(d.w, v.Uint(), 10)
|
||||||
|
|
||||||
|
case reflect.Float32:
|
||||||
|
printFloat(d.w, v.Float(), 32)
|
||||||
|
|
||||||
|
case reflect.Float64:
|
||||||
|
printFloat(d.w, v.Float(), 64)
|
||||||
|
|
||||||
|
case reflect.Complex64:
|
||||||
|
printComplex(d.w, v.Complex(), 32)
|
||||||
|
|
||||||
|
case reflect.Complex128:
|
||||||
|
printComplex(d.w, v.Complex(), 64)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.dumpSlice(v)
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// The only time we should get here is for nil interfaces due to
|
||||||
|
// unpackValue calls.
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Do nothing. We should never get here since pointers have already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// nil maps should be indicated as different than empty maps
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
numEntries := v.Len()
|
||||||
|
keys := v.MapKeys()
|
||||||
|
if d.cs.SortKeys {
|
||||||
|
sortValues(keys, d.cs)
|
||||||
|
}
|
||||||
|
for i, key := range keys {
|
||||||
|
d.dump(d.unpackValue(key))
|
||||||
|
d.w.Write(colonSpaceBytes)
|
||||||
|
d.ignoreNextIndent = true
|
||||||
|
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||||
|
if i < (numEntries - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
vt := v.Type()
|
||||||
|
numFields := v.NumField()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
d.indent()
|
||||||
|
vtf := vt.Field(i)
|
||||||
|
d.w.Write([]byte(vtf.Name))
|
||||||
|
d.w.Write(colonSpaceBytes)
|
||||||
|
d.ignoreNextIndent = true
|
||||||
|
d.dump(d.unpackValue(v.Field(i)))
|
||||||
|
if i < (numFields - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Uintptr:
|
||||||
|
printHexPtr(d.w, uintptr(v.Uint()))
|
||||||
|
|
||||||
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
|
printHexPtr(d.w, v.Pointer())
|
||||||
|
|
||||||
|
// There were not any other types at the time this code was written, but
|
||||||
|
// fall back to letting the default fmt package handle it in case any new
|
||||||
|
// types are added.
|
||||||
|
default:
|
||||||
|
if v.CanInterface() {
|
||||||
|
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(d.w, "%v", v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fdump is a helper function to consolidate the logic from the various public
|
||||||
|
// methods which take varying writers and config states.
|
||||||
|
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||||
|
for _, arg := range a {
|
||||||
|
if arg == nil {
|
||||||
|
w.Write(interfaceBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
w.Write(nilAngleBytes)
|
||||||
|
w.Write(newlineBytes)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
d := dumpState{w: w, cs: cs}
|
||||||
|
d.pointers = make(map[uintptr]int)
|
||||||
|
d.dump(reflect.ValueOf(arg))
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||||
|
// exactly the same as Dump.
|
||||||
|
func Fdump(w io.Writer, a ...interface{}) {
|
||||||
|
fdump(&Config, w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||||
|
// as Dump.
|
||||||
|
func Sdump(a ...interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fdump(&Config, &buf, a...)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dump displays the passed parameters to standard out with newlines, customizable
|
||||||
|
indentation, and additional debug information such as complete types and all
|
||||||
|
pointer addresses used to indirect to the final value. It provides the
|
||||||
|
following features over the built-in printing facilities provided by the fmt
|
||||||
|
package:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output
|
||||||
|
|
||||||
|
The configuration options are controlled by an exported package global,
|
||||||
|
spew.Config. See ConfigState for options documentation.
|
||||||
|
|
||||||
|
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||||
|
get the formatted result as a string.
|
||||||
|
*/
|
||||||
|
func Dump(a ...interface{}) {
|
||||||
|
fdump(&Config, os.Stdout, a...)
|
||||||
|
}
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
|
@ -0,0 +1,419 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||||
|
const supportedFlags = "0-+# "
|
||||||
|
|
||||||
|
// formatState implements the fmt.Formatter interface and contains information
|
||||||
|
// about the state of a formatting operation. The NewFormatter function can
|
||||||
|
// be used to get a new Formatter which can be used directly as arguments
|
||||||
|
// in standard fmt package printing calls.
|
||||||
|
type formatState struct {
|
||||||
|
value interface{}
|
||||||
|
fs fmt.State
|
||||||
|
depth int
|
||||||
|
pointers map[uintptr]int
|
||||||
|
ignoreNextType bool
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildDefaultFormat recreates the original format string without precision
|
||||||
|
// and width information to pass in to fmt.Sprintf in the case of an
|
||||||
|
// unrecognized type. Unless new types are added to the language, this
|
||||||
|
// function won't ever be called.
|
||||||
|
func (f *formatState) buildDefaultFormat() (format string) {
|
||||||
|
buf := bytes.NewBuffer(percentBytes)
|
||||||
|
|
||||||
|
for _, flag := range supportedFlags {
|
||||||
|
if f.fs.Flag(int(flag)) {
|
||||||
|
buf.WriteRune(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteRune('v')
|
||||||
|
|
||||||
|
format = buf.String()
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
// constructOrigFormat recreates the original format string including precision
|
||||||
|
// and width information to pass along to the standard fmt package. This allows
|
||||||
|
// automatic deferral of all format strings this package doesn't support.
|
||||||
|
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||||
|
buf := bytes.NewBuffer(percentBytes)
|
||||||
|
|
||||||
|
for _, flag := range supportedFlags {
|
||||||
|
if f.fs.Flag(int(flag)) {
|
||||||
|
buf.WriteRune(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if width, ok := f.fs.Width(); ok {
|
||||||
|
buf.WriteString(strconv.Itoa(width))
|
||||||
|
}
|
||||||
|
|
||||||
|
if precision, ok := f.fs.Precision(); ok {
|
||||||
|
buf.Write(precisionBytes)
|
||||||
|
buf.WriteString(strconv.Itoa(precision))
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteRune(verb)
|
||||||
|
|
||||||
|
format = buf.String()
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||||
|
// ensures that types for values which have been unpacked from an interface
|
||||||
|
// are displayed when the show types flag is also set.
|
||||||
|
// This is useful for data types like structs, arrays, slices, and maps which
|
||||||
|
// can contain varying types packed inside an interface.
|
||||||
|
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||||
|
if v.Kind() == reflect.Interface {
|
||||||
|
f.ignoreNextType = false
|
||||||
|
if !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||||
|
func (f *formatState) formatPtr(v reflect.Value) {
|
||||||
|
// Display nil if top level pointer is nil.
|
||||||
|
showTypes := f.fs.Flag('#')
|
||||||
|
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove pointers at or below the current depth from map used to detect
|
||||||
|
// circular refs.
|
||||||
|
for k, depth := range f.pointers {
|
||||||
|
if depth >= f.depth {
|
||||||
|
delete(f.pointers, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep list of all dereferenced pointers to possibly show later.
|
||||||
|
pointerChain := make([]uintptr, 0)
|
||||||
|
|
||||||
|
// Figure out how many levels of indirection there are by derferencing
|
||||||
|
// pointers and unpacking interfaces down the chain while detecting circular
|
||||||
|
// references.
|
||||||
|
nilFound := false
|
||||||
|
cycleFound := false
|
||||||
|
indirects := 0
|
||||||
|
ve := v
|
||||||
|
for ve.Kind() == reflect.Ptr {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indirects++
|
||||||
|
addr := ve.Pointer()
|
||||||
|
pointerChain = append(pointerChain, addr)
|
||||||
|
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||||
|
cycleFound = true
|
||||||
|
indirects--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
f.pointers[addr] = f.depth
|
||||||
|
|
||||||
|
ve = ve.Elem()
|
||||||
|
if ve.Kind() == reflect.Interface {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ve = ve.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display type or indirection level depending on flags.
|
||||||
|
if showTypes && !f.ignoreNextType {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||||
|
f.fs.Write([]byte(ve.Type().String()))
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
} else {
|
||||||
|
if nilFound || cycleFound {
|
||||||
|
indirects += strings.Count(ve.Type().String(), "*")
|
||||||
|
}
|
||||||
|
f.fs.Write(openAngleBytes)
|
||||||
|
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||||
|
f.fs.Write(closeAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display pointer information depending on flags.
|
||||||
|
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
for i, addr := range pointerChain {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(pointerChainBytes)
|
||||||
|
}
|
||||||
|
printHexPtr(f.fs, addr)
|
||||||
|
}
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display dereferenced value.
|
||||||
|
switch {
|
||||||
|
case nilFound:
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
|
||||||
|
case cycleFound:
|
||||||
|
f.fs.Write(circularShortBytes)
|
||||||
|
|
||||||
|
default:
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(ve)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// format is the main workhorse for providing the Formatter interface. It
|
||||||
|
// uses the passed reflect value to figure out what kind of object we are
|
||||||
|
// dealing with and formats it appropriately. It is a recursive function,
|
||||||
|
// however circular data structures are detected and handled properly.
|
||||||
|
func (f *formatState) format(v reflect.Value) {
|
||||||
|
// Handle invalid reflect values immediately.
|
||||||
|
kind := v.Kind()
|
||||||
|
if kind == reflect.Invalid {
|
||||||
|
f.fs.Write(invalidAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pointers specially.
|
||||||
|
if kind == reflect.Ptr {
|
||||||
|
f.formatPtr(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print type information unless already handled elsewhere.
|
||||||
|
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
f.fs.Write([]byte(v.Type().String()))
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = false
|
||||||
|
|
||||||
|
// Call Stringer/error interfaces if they exist and the handle methods
|
||||||
|
// flag is enabled.
|
||||||
|
if !f.cs.DisableMethods {
|
||||||
|
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||||
|
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Do nothing. We should never get here since invalid has already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
printBool(f.fs, v.Bool())
|
||||||
|
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
printInt(f.fs, v.Int(), 10)
|
||||||
|
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
printUint(f.fs, v.Uint(), 10)
|
||||||
|
|
||||||
|
case reflect.Float32:
|
||||||
|
printFloat(f.fs, v.Float(), 32)
|
||||||
|
|
||||||
|
case reflect.Float64:
|
||||||
|
printFloat(f.fs, v.Float(), 64)
|
||||||
|
|
||||||
|
case reflect.Complex64:
|
||||||
|
printComplex(f.fs, v.Complex(), 32)
|
||||||
|
|
||||||
|
case reflect.Complex128:
|
||||||
|
printComplex(f.fs, v.Complex(), 64)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
f.fs.Write(openBracketBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
numEntries := v.Len()
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(v.Index(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeBracketBytes)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
f.fs.Write([]byte(v.String()))
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// The only time we should get here is for nil interfaces due to
|
||||||
|
// unpackValue calls.
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Do nothing. We should never get here since pointers have already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// nil maps should be indicated as different than empty maps
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fs.Write(openMapBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
keys := v.MapKeys()
|
||||||
|
if f.cs.SortKeys {
|
||||||
|
sortValues(keys, f.cs)
|
||||||
|
}
|
||||||
|
for i, key := range keys {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(key))
|
||||||
|
f.fs.Write(colonBytes)
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(v.MapIndex(key)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeMapBytes)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
numFields := v.NumField()
|
||||||
|
f.fs.Write(openBraceBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
vt := v.Type()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
vtf := vt.Field(i)
|
||||||
|
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||||
|
f.fs.Write([]byte(vtf.Name))
|
||||||
|
f.fs.Write(colonBytes)
|
||||||
|
}
|
||||||
|
f.format(f.unpackValue(v.Field(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Uintptr:
|
||||||
|
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||||
|
|
||||||
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
|
printHexPtr(f.fs, v.Pointer())
|
||||||
|
|
||||||
|
// There were not any other types at the time this code was written, but
|
||||||
|
// fall back to letting the default fmt package handle it if any get added.
|
||||||
|
default:
|
||||||
|
format := f.buildDefaultFormat()
|
||||||
|
if v.CanInterface() {
|
||||||
|
fmt.Fprintf(f.fs, format, v.Interface())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(f.fs, format, v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||||
|
// details.
|
||||||
|
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||||
|
f.fs = fs
|
||||||
|
|
||||||
|
// Use standard formatting for verbs that are not v.
|
||||||
|
if verb != 'v' {
|
||||||
|
format := f.constructOrigFormat(verb)
|
||||||
|
fmt.Fprintf(fs, format, f.value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.value == nil {
|
||||||
|
if fs.Flag('#') {
|
||||||
|
fs.Write(interfaceBytes)
|
||||||
|
}
|
||||||
|
fs.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f.format(reflect.ValueOf(f.value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFormatter is a helper function to consolidate the logic from the various
|
||||||
|
// public methods which take varying config states.
|
||||||
|
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||||
|
fs := &formatState{value: v, cs: cs}
|
||||||
|
fs.pointers = make(map[uintptr]int)
|
||||||
|
return fs
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||||
|
interface. As a result, it integrates cleanly with standard fmt package
|
||||||
|
printing functions. The formatter is useful for inline printing of smaller data
|
||||||
|
types similar to the standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Typically this function shouldn't be called directly. It is much easier to make
|
||||||
|
use of the custom formatter by calling one of the convenience functions such as
|
||||||
|
Printf, Println, or Fprintf.
|
||||||
|
*/
|
||||||
|
func NewFormatter(v interface{}) fmt.Formatter {
|
||||||
|
return newFormatter(&Config, v)
|
||||||
|
}
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the formatted string as a value that satisfies error. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Errorf(format string, a ...interface{}) (err error) {
|
||||||
|
return fmt.Errorf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprint(w, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintln(w, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Print(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Print(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Printf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Println(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Println(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprint(a ...interface{}) string {
|
||||||
|
return fmt.Sprint(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprintf(format string, a ...interface{}) string {
|
||||||
|
return fmt.Sprintf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||||
|
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprintln(a ...interface{}) string {
|
||||||
|
return fmt.Sprintln(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||||
|
// length with each argument converted to a default spew Formatter interface.
|
||||||
|
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||||
|
formatters = make([]interface{}, len(args))
|
||||||
|
for index, arg := range args {
|
||||||
|
formatters[index] = NewFormatter(arg)
|
||||||
|
}
|
||||||
|
return formatters
|
||||||
|
}
|
4
vendor/github.com/dgrijalva/jwt-go/.gitignore
generated
vendored
Normal file
4
vendor/github.com/dgrijalva/jwt-go/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
.DS_Store
|
||||||
|
bin
|
||||||
|
|
||||||
|
|
13
vendor/github.com/dgrijalva/jwt-go/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/dgrijalva/jwt-go/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go vet ./...
|
||||||
|
- go test -v ./...
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.3
|
||||||
|
- 1.4
|
||||||
|
- 1.5
|
||||||
|
- 1.6
|
||||||
|
- 1.7
|
||||||
|
- tip
|
8
vendor/github.com/dgrijalva/jwt-go/LICENSE
generated
vendored
Normal file
8
vendor/github.com/dgrijalva/jwt-go/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
Copyright (c) 2012 Dave Grijalva
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
97
vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
generated
vendored
Normal file
97
vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
## Migration Guide from v2 -> v3
|
||||||
|
|
||||||
|
Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
|
||||||
|
|
||||||
|
### `Token.Claims` is now an interface type
|
||||||
|
|
||||||
|
The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
|
||||||
|
|
||||||
|
`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
|
||||||
|
|
||||||
|
The old example for parsing a token looked like this..
|
||||||
|
|
||||||
|
```go
|
||||||
|
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
||||||
|
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
is now directly mapped to...
|
||||||
|
|
||||||
|
```go
|
||||||
|
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
||||||
|
claims := token.Claims.(jwt.MapClaims)
|
||||||
|
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type MyCustomClaims struct {
|
||||||
|
User string
|
||||||
|
*StandardClaims
|
||||||
|
}
|
||||||
|
|
||||||
|
if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
|
||||||
|
claims := token.Claims.(*MyCustomClaims)
|
||||||
|
fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### `ParseFromRequest` has been moved
|
||||||
|
|
||||||
|
To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
|
||||||
|
|
||||||
|
`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
|
||||||
|
|
||||||
|
This simple parsing example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
|
||||||
|
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
is directly mapped to:
|
||||||
|
|
||||||
|
```go
|
||||||
|
if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
|
||||||
|
claims := token.Claims.(jwt.MapClaims)
|
||||||
|
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
There are several concrete `Extractor` types provided for your convenience:
|
||||||
|
|
||||||
|
* `HeaderExtractor` will search a list of headers until one contains content.
|
||||||
|
* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
|
||||||
|
* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
|
||||||
|
* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
|
||||||
|
* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
|
||||||
|
* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
|
||||||
|
|
||||||
|
|
||||||
|
### RSA signing methods no longer accept `[]byte` keys
|
||||||
|
|
||||||
|
Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
|
||||||
|
|
||||||
|
To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func keyLookupFunc(*Token) (interface{}, error) {
|
||||||
|
// Don't forget to validate the alg is what you expect:
|
||||||
|
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
|
||||||
|
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up key
|
||||||
|
key, err := lookupPublicKey(token.Header["kid"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpack key from PEM encoded PKCS8
|
||||||
|
return jwt.ParseRSAPublicKeyFromPEM(key)
|
||||||
|
}
|
||||||
|
```
|
100
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
Normal file
100
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
# jwt-go
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go)
|
||||||
|
|
||||||
|
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
||||||
|
|
||||||
|
**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
|
||||||
|
|
||||||
|
**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
|
||||||
|
|
||||||
|
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
|
||||||
|
|
||||||
|
## What the heck is a JWT?
|
||||||
|
|
||||||
|
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
|
||||||
|
|
||||||
|
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
|
||||||
|
|
||||||
|
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
|
||||||
|
|
||||||
|
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
|
||||||
|
|
||||||
|
## What's in the box?
|
||||||
|
|
||||||
|
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
|
||||||
|
|
||||||
|
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
|
||||||
|
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
|
||||||
|
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
|
||||||
|
|
||||||
|
## Extensions
|
||||||
|
|
||||||
|
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
|
||||||
|
|
||||||
|
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
|
||||||
|
|
||||||
|
## Compliance
|
||||||
|
|
||||||
|
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
||||||
|
|
||||||
|
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
||||||
|
|
||||||
|
## Project Status & Versioning
|
||||||
|
|
||||||
|
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
|
||||||
|
|
||||||
|
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
|
||||||
|
|
||||||
|
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
|
||||||
|
|
||||||
|
**BREAKING CHANGES:***
|
||||||
|
* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||||
|
|
||||||
|
## Usage Tips
|
||||||
|
|
||||||
|
### Signing vs Encryption
|
||||||
|
|
||||||
|
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
|
||||||
|
|
||||||
|
* The author of the token was in the possession of the signing secret
|
||||||
|
* The data has not been modified since it was signed
|
||||||
|
|
||||||
|
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
|
||||||
|
|
||||||
|
### Choosing a Signing Method
|
||||||
|
|
||||||
|
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
|
||||||
|
|
||||||
|
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
|
||||||
|
|
||||||
|
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
|
||||||
|
|
||||||
|
### Signing Methods and Key Types
|
||||||
|
|
||||||
|
Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
|
||||||
|
|
||||||
|
* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
|
||||||
|
* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
|
||||||
|
* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
|
||||||
|
|
||||||
|
### JWT and OAuth
|
||||||
|
|
||||||
|
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
|
||||||
|
|
||||||
|
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
||||||
|
|
||||||
|
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||||
|
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
||||||
|
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
||||||
|
|
||||||
|
## More
|
||||||
|
|
||||||
|
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
||||||
|
|
||||||
|
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
|
118
vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
generated
vendored
Normal file
118
vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
## `jwt-go` Version History
|
||||||
|
|
||||||
|
#### 3.2.0
|
||||||
|
|
||||||
|
* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
|
||||||
|
* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
|
||||||
|
* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
|
||||||
|
* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
|
||||||
|
|
||||||
|
#### 3.1.0
|
||||||
|
|
||||||
|
* Improvements to `jwt` command line tool
|
||||||
|
* Added `SkipClaimsValidation` option to `Parser`
|
||||||
|
* Documentation updates
|
||||||
|
|
||||||
|
#### 3.0.0
|
||||||
|
|
||||||
|
* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
|
||||||
|
* Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
|
||||||
|
* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
|
||||||
|
* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
|
||||||
|
* Other Additions and Changes
|
||||||
|
* Added `Claims` interface type to allow users to decode the claims into a custom type
|
||||||
|
* Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
|
||||||
|
* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
|
||||||
|
* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
|
||||||
|
* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
|
||||||
|
* Added several new, more specific, validation errors to error type bitmask
|
||||||
|
* Moved examples from README to executable example files
|
||||||
|
* Signing method registry is now thread safe
|
||||||
|
* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
|
||||||
|
|
||||||
|
#### 2.7.0
|
||||||
|
|
||||||
|
This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
|
||||||
|
|
||||||
|
* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
|
||||||
|
* Error text for expired tokens includes how long it's been expired
|
||||||
|
* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
|
||||||
|
* Documentation updates
|
||||||
|
|
||||||
|
#### 2.6.0
|
||||||
|
|
||||||
|
* Exposed inner error within ValidationError
|
||||||
|
* Fixed validation errors when using UseJSONNumber flag
|
||||||
|
* Added several unit tests
|
||||||
|
|
||||||
|
#### 2.5.0
|
||||||
|
|
||||||
|
* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
|
||||||
|
* Updated/fixed some documentation
|
||||||
|
* Added more helpful error message when trying to parse tokens that begin with `BEARER `
|
||||||
|
|
||||||
|
#### 2.4.0
|
||||||
|
|
||||||
|
* Added new type, Parser, to allow for configuration of various parsing parameters
|
||||||
|
* You can now specify a list of valid signing methods. Anything outside this set will be rejected.
|
||||||
|
* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
|
||||||
|
* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
|
||||||
|
* Fixed some bugs with ECDSA parsing
|
||||||
|
|
||||||
|
#### 2.3.0
|
||||||
|
|
||||||
|
* Added support for ECDSA signing methods
|
||||||
|
* Added support for RSA PSS signing methods (requires go v1.4)
|
||||||
|
|
||||||
|
#### 2.2.0
|
||||||
|
|
||||||
|
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
|
||||||
|
|
||||||
|
#### 2.1.0
|
||||||
|
|
||||||
|
Backwards compatible API change that was missed in 2.0.0.
|
||||||
|
|
||||||
|
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
|
||||||
|
|
||||||
|
#### 2.0.0
|
||||||
|
|
||||||
|
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
|
||||||
|
|
||||||
|
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
|
||||||
|
|
||||||
|
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
|
||||||
|
|
||||||
|
* **Compatibility Breaking Changes**
|
||||||
|
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
|
||||||
|
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
|
||||||
|
* `KeyFunc` now returns `interface{}` instead of `[]byte`
|
||||||
|
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
|
||||||
|
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
|
||||||
|
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
|
||||||
|
* Added public package global `SigningMethodHS256`
|
||||||
|
* Added public package global `SigningMethodHS384`
|
||||||
|
* Added public package global `SigningMethodHS512`
|
||||||
|
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
|
||||||
|
* Added public package global `SigningMethodRS256`
|
||||||
|
* Added public package global `SigningMethodRS384`
|
||||||
|
* Added public package global `SigningMethodRS512`
|
||||||
|
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
|
||||||
|
* Refactored the RSA implementation to be easier to read
|
||||||
|
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
|
||||||
|
|
||||||
|
#### 1.0.2
|
||||||
|
|
||||||
|
* Fixed bug in parsing public keys from certificates
|
||||||
|
* Added more tests around the parsing of keys for RS256
|
||||||
|
* Code refactoring in RS256 implementation. No functional changes
|
||||||
|
|
||||||
|
#### 1.0.1
|
||||||
|
|
||||||
|
* Fixed panic if RS256 signing method was passed an invalid key
|
||||||
|
|
||||||
|
#### 1.0.0
|
||||||
|
|
||||||
|
* First versioned release
|
||||||
|
* API stabilized
|
||||||
|
* Supports creating, signing, parsing, and validating JWT tokens
|
||||||
|
* Supports RS256 and HS256 signing methods
|
134
vendor/github.com/dgrijalva/jwt-go/claims.go
generated
vendored
Normal file
134
vendor/github.com/dgrijalva/jwt-go/claims.go
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/subtle"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// For a type to be a Claims object, it must just have a Valid method that determines
|
||||||
|
// if the token is invalid for any supported reason
|
||||||
|
type Claims interface {
|
||||||
|
Valid() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Structured version of Claims Section, as referenced at
|
||||||
|
// https://tools.ietf.org/html/rfc7519#section-4.1
|
||||||
|
// See examples for how to use this with your own claim types
|
||||||
|
type StandardClaims struct {
|
||||||
|
Audience string `json:"aud,omitempty"`
|
||||||
|
ExpiresAt int64 `json:"exp,omitempty"`
|
||||||
|
Id string `json:"jti,omitempty"`
|
||||||
|
IssuedAt int64 `json:"iat,omitempty"`
|
||||||
|
Issuer string `json:"iss,omitempty"`
|
||||||
|
NotBefore int64 `json:"nbf,omitempty"`
|
||||||
|
Subject string `json:"sub,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validates time based claims "exp, iat, nbf".
|
||||||
|
// There is no accounting for clock skew.
|
||||||
|
// As well, if any of the above claims are not in the token, it will still
|
||||||
|
// be considered a valid claim.
|
||||||
|
func (c StandardClaims) Valid() error {
|
||||||
|
vErr := new(ValidationError)
|
||||||
|
now := TimeFunc().Unix()
|
||||||
|
|
||||||
|
// The claims below are optional, by default, so if they are set to the
|
||||||
|
// default value in Go, let's not fail the verification for them.
|
||||||
|
if c.VerifyExpiresAt(now, false) == false {
|
||||||
|
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
|
||||||
|
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
||||||
|
vErr.Errors |= ValidationErrorExpired
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.VerifyIssuedAt(now, false) == false {
|
||||||
|
vErr.Inner = fmt.Errorf("Token used before issued")
|
||||||
|
vErr.Errors |= ValidationErrorIssuedAt
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.VerifyNotBefore(now, false) == false {
|
||||||
|
vErr.Inner = fmt.Errorf("token is not valid yet")
|
||||||
|
vErr.Errors |= ValidationErrorNotValidYet
|
||||||
|
}
|
||||||
|
|
||||||
|
if vErr.valid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return vErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the aud claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
|
||||||
|
return verifyAud(c.Audience, cmp, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the exp claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
||||||
|
return verifyExp(c.ExpiresAt, cmp, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the iat claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
||||||
|
return verifyIat(c.IssuedAt, cmp, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the iss claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
|
||||||
|
return verifyIss(c.Issuer, cmp, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the nbf claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||||
|
return verifyNbf(c.NotBefore, cmp, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----- helpers
|
||||||
|
|
||||||
|
func verifyAud(aud string, cmp string, required bool) bool {
|
||||||
|
if aud == "" {
|
||||||
|
return !required
|
||||||
|
}
|
||||||
|
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyExp(exp int64, now int64, required bool) bool {
|
||||||
|
if exp == 0 {
|
||||||
|
return !required
|
||||||
|
}
|
||||||
|
return now <= exp
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyIat(iat int64, now int64, required bool) bool {
|
||||||
|
if iat == 0 {
|
||||||
|
return !required
|
||||||
|
}
|
||||||
|
return now >= iat
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyIss(iss string, cmp string, required bool) bool {
|
||||||
|
if iss == "" {
|
||||||
|
return !required
|
||||||
|
}
|
||||||
|
if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyNbf(nbf int64, now int64, required bool) bool {
|
||||||
|
if nbf == 0 {
|
||||||
|
return !required
|
||||||
|
}
|
||||||
|
return now >= nbf
|
||||||
|
}
|
4
vendor/github.com/dgrijalva/jwt-go/doc.go
generated
vendored
Normal file
4
vendor/github.com/dgrijalva/jwt-go/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
|
||||||
|
//
|
||||||
|
// See README.md for more info.
|
||||||
|
package jwt
|
148
vendor/github.com/dgrijalva/jwt-go/ecdsa.go
generated
vendored
Normal file
148
vendor/github.com/dgrijalva/jwt-go/ecdsa.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
|
||||||
|
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Implements the ECDSA family of signing methods signing methods
|
||||||
|
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
|
||||||
|
type SigningMethodECDSA struct {
|
||||||
|
Name string
|
||||||
|
Hash crypto.Hash
|
||||||
|
KeySize int
|
||||||
|
CurveBits int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specific instances for EC256 and company
|
||||||
|
var (
|
||||||
|
SigningMethodES256 *SigningMethodECDSA
|
||||||
|
SigningMethodES384 *SigningMethodECDSA
|
||||||
|
SigningMethodES512 *SigningMethodECDSA
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// ES256
|
||||||
|
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
|
||||||
|
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodES256
|
||||||
|
})
|
||||||
|
|
||||||
|
// ES384
|
||||||
|
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
|
||||||
|
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodES384
|
||||||
|
})
|
||||||
|
|
||||||
|
// ES512
|
||||||
|
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
|
||||||
|
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodES512
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SigningMethodECDSA) Alg() string {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements the Verify method from SigningMethod
|
||||||
|
// For this verify method, key must be an ecdsa.PublicKey struct
|
||||||
|
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Decode the signature
|
||||||
|
var sig []byte
|
||||||
|
if sig, err = DecodeSegment(signature); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the key
|
||||||
|
var ecdsaKey *ecdsa.PublicKey
|
||||||
|
switch k := key.(type) {
|
||||||
|
case *ecdsa.PublicKey:
|
||||||
|
ecdsaKey = k
|
||||||
|
default:
|
||||||
|
return ErrInvalidKeyType
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sig) != 2*m.KeySize {
|
||||||
|
return ErrECDSAVerification
|
||||||
|
}
|
||||||
|
|
||||||
|
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
|
||||||
|
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
|
||||||
|
|
||||||
|
// Create hasher
|
||||||
|
if !m.Hash.Available() {
|
||||||
|
return ErrHashUnavailable
|
||||||
|
}
|
||||||
|
hasher := m.Hash.New()
|
||||||
|
hasher.Write([]byte(signingString))
|
||||||
|
|
||||||
|
// Verify the signature
|
||||||
|
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return ErrECDSAVerification
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements the Sign method from SigningMethod
|
||||||
|
// For this signing method, key must be an ecdsa.PrivateKey struct
|
||||||
|
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
|
||||||
|
// Get the key
|
||||||
|
var ecdsaKey *ecdsa.PrivateKey
|
||||||
|
switch k := key.(type) {
|
||||||
|
case *ecdsa.PrivateKey:
|
||||||
|
ecdsaKey = k
|
||||||
|
default:
|
||||||
|
return "", ErrInvalidKeyType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the hasher
|
||||||
|
if !m.Hash.Available() {
|
||||||
|
return "", ErrHashUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := m.Hash.New()
|
||||||
|
hasher.Write([]byte(signingString))
|
||||||
|
|
||||||
|
// Sign the string and return r, s
|
||||||
|
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
|
||||||
|
curveBits := ecdsaKey.Curve.Params().BitSize
|
||||||
|
|
||||||
|
if m.CurveBits != curveBits {
|
||||||
|
return "", ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
keyBytes := curveBits / 8
|
||||||
|
if curveBits%8 > 0 {
|
||||||
|
keyBytes += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// We serialize the outpus (r and s) into big-endian byte arrays and pad
|
||||||
|
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||||
|
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||||
|
rBytes := r.Bytes()
|
||||||
|
rBytesPadded := make([]byte, keyBytes)
|
||||||
|
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||||
|
|
||||||
|
sBytes := s.Bytes()
|
||||||
|
sBytesPadded := make([]byte, keyBytes)
|
||||||
|
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||||
|
|
||||||
|
out := append(rBytesPadded, sBytesPadded...)
|
||||||
|
|
||||||
|
return EncodeSegment(out), nil
|
||||||
|
} else {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
67
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
generated
vendored
Normal file
67
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
|
||||||
|
ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse PEM encoded Elliptic Curve Private Key Structure
|
||||||
|
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Parse PEM block
|
||||||
|
var block *pem.Block
|
||||||
|
if block, _ = pem.Decode(key); block == nil {
|
||||||
|
return nil, ErrKeyMustBePEMEncoded
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the key
|
||||||
|
var parsedKey interface{}
|
||||||
|
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkey *ecdsa.PrivateKey
|
||||||
|
var ok bool
|
||||||
|
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
|
||||||
|
return nil, ErrNotECPrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
return pkey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||||
|
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Parse PEM block
|
||||||
|
var block *pem.Block
|
||||||
|
if block, _ = pem.Decode(key); block == nil {
|
||||||
|
return nil, ErrKeyMustBePEMEncoded
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the key
|
||||||
|
var parsedKey interface{}
|
||||||
|
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||||
|
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||||
|
parsedKey = cert.PublicKey
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkey *ecdsa.PublicKey
|
||||||
|
var ok bool
|
||||||
|
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
|
||||||
|
return nil, ErrNotECPublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
return pkey, nil
|
||||||
|
}
|
59
vendor/github.com/dgrijalva/jwt-go/errors.go
generated
vendored
Normal file
59
vendor/github.com/dgrijalva/jwt-go/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error constants
|
||||||
|
var (
|
||||||
|
ErrInvalidKey = errors.New("key is invalid")
|
||||||
|
ErrInvalidKeyType = errors.New("key is of invalid type")
|
||||||
|
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
|
||||||
|
)
|
||||||
|
|
||||||
|
// The errors that might occur when parsing and validating a token
|
||||||
|
const (
|
||||||
|
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
|
||||||
|
ValidationErrorUnverifiable // Token could not be verified because of signing problems
|
||||||
|
ValidationErrorSignatureInvalid // Signature validation failed
|
||||||
|
|
||||||
|
// Standard Claim validation errors
|
||||||
|
ValidationErrorAudience // AUD validation failed
|
||||||
|
ValidationErrorExpired // EXP validation failed
|
||||||
|
ValidationErrorIssuedAt // IAT validation failed
|
||||||
|
ValidationErrorIssuer // ISS validation failed
|
||||||
|
ValidationErrorNotValidYet // NBF validation failed
|
||||||
|
ValidationErrorId // JTI validation failed
|
||||||
|
ValidationErrorClaimsInvalid // Generic claims validation error
|
||||||
|
)
|
||||||
|
|
||||||
|
// Helper for constructing a ValidationError with a string error message
|
||||||
|
func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
|
||||||
|
return &ValidationError{
|
||||||
|
text: errorText,
|
||||||
|
Errors: errorFlags,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The error from Parse if token is not valid
|
||||||
|
type ValidationError struct {
|
||||||
|
Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
|
||||||
|
Errors uint32 // bitfield. see ValidationError... constants
|
||||||
|
text string // errors that do not have a valid error just have text
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validation error is an error type
|
||||||
|
func (e ValidationError) Error() string {
|
||||||
|
if e.Inner != nil {
|
||||||
|
return e.Inner.Error()
|
||||||
|
} else if e.text != "" {
|
||||||
|
return e.text
|
||||||
|
} else {
|
||||||
|
return "token is invalid"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No errors
|
||||||
|
func (e *ValidationError) valid() bool {
|
||||||
|
return e.Errors == 0
|
||||||
|
}
|
95
vendor/github.com/dgrijalva/jwt-go/hmac.go
generated
vendored
Normal file
95
vendor/github.com/dgrijalva/jwt-go/hmac.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/hmac"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Implements the HMAC-SHA family of signing methods signing methods
|
||||||
|
// Expects key type of []byte for both signing and validation
|
||||||
|
type SigningMethodHMAC struct {
|
||||||
|
Name string
|
||||||
|
Hash crypto.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specific instances for HS256 and company
|
||||||
|
var (
|
||||||
|
SigningMethodHS256 *SigningMethodHMAC
|
||||||
|
SigningMethodHS384 *SigningMethodHMAC
|
||||||
|
SigningMethodHS512 *SigningMethodHMAC
|
||||||
|
ErrSignatureInvalid = errors.New("signature is invalid")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// HS256
|
||||||
|
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
|
||||||
|
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodHS256
|
||||||
|
})
|
||||||
|
|
||||||
|
// HS384
|
||||||
|
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
|
||||||
|
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodHS384
|
||||||
|
})
|
||||||
|
|
||||||
|
// HS512
|
||||||
|
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
|
||||||
|
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodHS512
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SigningMethodHMAC) Alg() string {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
|
||||||
|
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
|
||||||
|
// Verify the key is the right type
|
||||||
|
keyBytes, ok := key.([]byte)
|
||||||
|
if !ok {
|
||||||
|
return ErrInvalidKeyType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode signature, for comparison
|
||||||
|
sig, err := DecodeSegment(signature)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Can we use the specified hashing method?
|
||||||
|
if !m.Hash.Available() {
|
||||||
|
return ErrHashUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
// This signing method is symmetric, so we validate the signature
|
||||||
|
// by reproducing the signature from the signing string and key, then
|
||||||
|
// comparing that against the provided signature.
|
||||||
|
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||||
|
hasher.Write([]byte(signingString))
|
||||||
|
if !hmac.Equal(sig, hasher.Sum(nil)) {
|
||||||
|
return ErrSignatureInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
// No validation errors. Signature is good.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements the Sign method from SigningMethod for this signing method.
|
||||||
|
// Key must be []byte
|
||||||
|
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
|
||||||
|
if keyBytes, ok := key.([]byte); ok {
|
||||||
|
if !m.Hash.Available() {
|
||||||
|
return "", ErrHashUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||||
|
hasher.Write([]byte(signingString))
|
||||||
|
|
||||||
|
return EncodeSegment(hasher.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", ErrInvalidKeyType
|
||||||
|
}
|
94
vendor/github.com/dgrijalva/jwt-go/map_claims.go
generated
vendored
Normal file
94
vendor/github.com/dgrijalva/jwt-go/map_claims.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
// "fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Claims type that uses the map[string]interface{} for JSON decoding
|
||||||
|
// This is the default claims type if you don't supply one
|
||||||
|
type MapClaims map[string]interface{}
|
||||||
|
|
||||||
|
// Compares the aud claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
|
||||||
|
aud, _ := m["aud"].(string)
|
||||||
|
return verifyAud(aud, cmp, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the exp claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
||||||
|
switch exp := m["exp"].(type) {
|
||||||
|
case float64:
|
||||||
|
return verifyExp(int64(exp), cmp, req)
|
||||||
|
case json.Number:
|
||||||
|
v, _ := exp.Int64()
|
||||||
|
return verifyExp(v, cmp, req)
|
||||||
|
}
|
||||||
|
return req == false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the iat claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
||||||
|
switch iat := m["iat"].(type) {
|
||||||
|
case float64:
|
||||||
|
return verifyIat(int64(iat), cmp, req)
|
||||||
|
case json.Number:
|
||||||
|
v, _ := iat.Int64()
|
||||||
|
return verifyIat(v, cmp, req)
|
||||||
|
}
|
||||||
|
return req == false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the iss claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
|
||||||
|
iss, _ := m["iss"].(string)
|
||||||
|
return verifyIss(iss, cmp, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compares the nbf claim against cmp.
|
||||||
|
// If required is false, this method will return true if the value matches or is unset
|
||||||
|
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||||
|
switch nbf := m["nbf"].(type) {
|
||||||
|
case float64:
|
||||||
|
return verifyNbf(int64(nbf), cmp, req)
|
||||||
|
case json.Number:
|
||||||
|
v, _ := nbf.Int64()
|
||||||
|
return verifyNbf(v, cmp, req)
|
||||||
|
}
|
||||||
|
return req == false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validates time based claims "exp, iat, nbf".
|
||||||
|
// There is no accounting for clock skew.
|
||||||
|
// As well, if any of the above claims are not in the token, it will still
|
||||||
|
// be considered a valid claim.
|
||||||
|
func (m MapClaims) Valid() error {
|
||||||
|
vErr := new(ValidationError)
|
||||||
|
now := TimeFunc().Unix()
|
||||||
|
|
||||||
|
if m.VerifyExpiresAt(now, false) == false {
|
||||||
|
vErr.Inner = errors.New("Token is expired")
|
||||||
|
vErr.Errors |= ValidationErrorExpired
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.VerifyIssuedAt(now, false) == false {
|
||||||
|
vErr.Inner = errors.New("Token used before issued")
|
||||||
|
vErr.Errors |= ValidationErrorIssuedAt
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.VerifyNotBefore(now, false) == false {
|
||||||
|
vErr.Inner = errors.New("Token is not valid yet")
|
||||||
|
vErr.Errors |= ValidationErrorNotValidYet
|
||||||
|
}
|
||||||
|
|
||||||
|
if vErr.valid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return vErr
|
||||||
|
}
|
52
vendor/github.com/dgrijalva/jwt-go/none.go
generated
vendored
Normal file
52
vendor/github.com/dgrijalva/jwt-go/none.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
// Implements the none signing method. This is required by the spec
|
||||||
|
// but you probably should never use it.
|
||||||
|
var SigningMethodNone *signingMethodNone
|
||||||
|
|
||||||
|
const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
|
||||||
|
|
||||||
|
var NoneSignatureTypeDisallowedError error
|
||||||
|
|
||||||
|
type signingMethodNone struct{}
|
||||||
|
type unsafeNoneMagicConstant string
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
SigningMethodNone = &signingMethodNone{}
|
||||||
|
NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
|
||||||
|
|
||||||
|
RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodNone
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *signingMethodNone) Alg() string {
|
||||||
|
return "none"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
|
||||||
|
func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
|
||||||
|
// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
|
||||||
|
// accepting 'none' signing method
|
||||||
|
if _, ok := key.(unsafeNoneMagicConstant); !ok {
|
||||||
|
return NoneSignatureTypeDisallowedError
|
||||||
|
}
|
||||||
|
// If signing method is none, signature must be an empty string
|
||||||
|
if signature != "" {
|
||||||
|
return NewValidationError(
|
||||||
|
"'none' signing method with non-empty signature",
|
||||||
|
ValidationErrorSignatureInvalid,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept 'none' signing method.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
|
||||||
|
func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
|
||||||
|
if _, ok := key.(unsafeNoneMagicConstant); ok {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return "", NoneSignatureTypeDisallowedError
|
||||||
|
}
|
148
vendor/github.com/dgrijalva/jwt-go/parser.go
generated
vendored
Normal file
148
vendor/github.com/dgrijalva/jwt-go/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Parser struct {
|
||||||
|
ValidMethods []string // If populated, only these methods will be considered valid
|
||||||
|
UseJSONNumber bool // Use JSON Number format in JSON decoder
|
||||||
|
SkipClaimsValidation bool // Skip claims validation during token parsing
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse, validate, and return a token.
|
||||||
|
// keyFunc will receive the parsed token and should return the key for validating.
|
||||||
|
// If everything is kosher, err will be nil
|
||||||
|
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
||||||
|
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||||
|
token, parts, err := p.ParseUnverified(tokenString, claims)
|
||||||
|
if err != nil {
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify signing method is in the required set
|
||||||
|
if p.ValidMethods != nil {
|
||||||
|
var signingMethodValid = false
|
||||||
|
var alg = token.Method.Alg()
|
||||||
|
for _, m := range p.ValidMethods {
|
||||||
|
if m == alg {
|
||||||
|
signingMethodValid = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !signingMethodValid {
|
||||||
|
// signing method is not in the listed set
|
||||||
|
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup key
|
||||||
|
var key interface{}
|
||||||
|
if keyFunc == nil {
|
||||||
|
// keyFunc was not provided. short circuiting validation
|
||||||
|
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
|
||||||
|
}
|
||||||
|
if key, err = keyFunc(token); err != nil {
|
||||||
|
// keyFunc returned an error
|
||||||
|
if ve, ok := err.(*ValidationError); ok {
|
||||||
|
return token, ve
|
||||||
|
}
|
||||||
|
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
|
||||||
|
}
|
||||||
|
|
||||||
|
vErr := &ValidationError{}
|
||||||
|
|
||||||
|
// Validate Claims
|
||||||
|
if !p.SkipClaimsValidation {
|
||||||
|
if err := token.Claims.Valid(); err != nil {
|
||||||
|
|
||||||
|
// If the Claims Valid returned an error, check if it is a validation error,
|
||||||
|
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
|
||||||
|
if e, ok := err.(*ValidationError); !ok {
|
||||||
|
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
|
||||||
|
} else {
|
||||||
|
vErr = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform validation
|
||||||
|
token.Signature = parts[2]
|
||||||
|
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
|
||||||
|
vErr.Inner = err
|
||||||
|
vErr.Errors |= ValidationErrorSignatureInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
if vErr.valid() {
|
||||||
|
token.Valid = true
|
||||||
|
return token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return token, vErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// WARNING: Don't use this method unless you know what you're doing
|
||||||
|
//
|
||||||
|
// This method parses the token but doesn't validate the signature. It's only
|
||||||
|
// ever useful in cases where you know the signature is valid (because it has
|
||||||
|
// been checked previously in the stack) and you want to extract values from
|
||||||
|
// it.
|
||||||
|
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
|
||||||
|
parts = strings.Split(tokenString, ".")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
||||||
|
}
|
||||||
|
|
||||||
|
token = &Token{Raw: tokenString}
|
||||||
|
|
||||||
|
// parse Header
|
||||||
|
var headerBytes []byte
|
||||||
|
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
|
||||||
|
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
|
||||||
|
return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
|
||||||
|
}
|
||||||
|
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||||
|
}
|
||||||
|
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
|
||||||
|
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse Claims
|
||||||
|
var claimBytes []byte
|
||||||
|
token.Claims = claims
|
||||||
|
|
||||||
|
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
|
||||||
|
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||||
|
}
|
||||||
|
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
|
||||||
|
if p.UseJSONNumber {
|
||||||
|
dec.UseNumber()
|
||||||
|
}
|
||||||
|
// JSON Decode. Special case for map type to avoid weird pointer behavior
|
||||||
|
if c, ok := token.Claims.(MapClaims); ok {
|
||||||
|
err = dec.Decode(&c)
|
||||||
|
} else {
|
||||||
|
err = dec.Decode(&claims)
|
||||||
|
}
|
||||||
|
// Handle decode error
|
||||||
|
if err != nil {
|
||||||
|
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup signature method
|
||||||
|
if method, ok := token.Header["alg"].(string); ok {
|
||||||
|
if token.Method = GetSigningMethod(method); token.Method == nil {
|
||||||
|
return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
|
||||||
|
}
|
||||||
|
|
||||||
|
return token, parts, nil
|
||||||
|
}
|
101
vendor/github.com/dgrijalva/jwt-go/rsa.go
generated
vendored
Normal file
101
vendor/github.com/dgrijalva/jwt-go/rsa.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Implements the RSA family of signing methods signing methods
|
||||||
|
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
|
||||||
|
type SigningMethodRSA struct {
|
||||||
|
Name string
|
||||||
|
Hash crypto.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specific instances for RS256 and company
|
||||||
|
var (
|
||||||
|
SigningMethodRS256 *SigningMethodRSA
|
||||||
|
SigningMethodRS384 *SigningMethodRSA
|
||||||
|
SigningMethodRS512 *SigningMethodRSA
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// RS256
|
||||||
|
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
|
||||||
|
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodRS256
|
||||||
|
})
|
||||||
|
|
||||||
|
// RS384
|
||||||
|
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
|
||||||
|
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodRS384
|
||||||
|
})
|
||||||
|
|
||||||
|
// RS512
|
||||||
|
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
|
||||||
|
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodRS512
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SigningMethodRSA) Alg() string {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements the Verify method from SigningMethod
|
||||||
|
// For this signing method, must be an *rsa.PublicKey structure.
|
||||||
|
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Decode the signature
|
||||||
|
var sig []byte
|
||||||
|
if sig, err = DecodeSegment(signature); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var rsaKey *rsa.PublicKey
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
|
||||||
|
return ErrInvalidKeyType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create hasher
|
||||||
|
if !m.Hash.Available() {
|
||||||
|
return ErrHashUnavailable
|
||||||
|
}
|
||||||
|
hasher := m.Hash.New()
|
||||||
|
hasher.Write([]byte(signingString))
|
||||||
|
|
||||||
|
// Verify the signature
|
||||||
|
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements the Sign method from SigningMethod
|
||||||
|
// For this signing method, must be an *rsa.PrivateKey structure.
|
||||||
|
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
|
||||||
|
var rsaKey *rsa.PrivateKey
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
// Validate type of key
|
||||||
|
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
|
||||||
|
return "", ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the hasher
|
||||||
|
if !m.Hash.Available() {
|
||||||
|
return "", ErrHashUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := m.Hash.New()
|
||||||
|
hasher.Write([]byte(signingString))
|
||||||
|
|
||||||
|
// Sign the string and return the encoded bytes
|
||||||
|
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
|
||||||
|
return EncodeSegment(sigBytes), nil
|
||||||
|
} else {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
126
vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
generated
vendored
Normal file
126
vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
// +build go1.4
|
||||||
|
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Implements the RSAPSS family of signing methods signing methods
|
||||||
|
type SigningMethodRSAPSS struct {
|
||||||
|
*SigningMethodRSA
|
||||||
|
Options *rsa.PSSOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// Specific instances for RS/PS and company
|
||||||
|
var (
|
||||||
|
SigningMethodPS256 *SigningMethodRSAPSS
|
||||||
|
SigningMethodPS384 *SigningMethodRSAPSS
|
||||||
|
SigningMethodPS512 *SigningMethodRSAPSS
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// PS256
|
||||||
|
SigningMethodPS256 = &SigningMethodRSAPSS{
|
||||||
|
&SigningMethodRSA{
|
||||||
|
Name: "PS256",
|
||||||
|
Hash: crypto.SHA256,
|
||||||
|
},
|
||||||
|
&rsa.PSSOptions{
|
||||||
|
SaltLength: rsa.PSSSaltLengthAuto,
|
||||||
|
Hash: crypto.SHA256,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodPS256
|
||||||
|
})
|
||||||
|
|
||||||
|
// PS384
|
||||||
|
SigningMethodPS384 = &SigningMethodRSAPSS{
|
||||||
|
&SigningMethodRSA{
|
||||||
|
Name: "PS384",
|
||||||
|
Hash: crypto.SHA384,
|
||||||
|
},
|
||||||
|
&rsa.PSSOptions{
|
||||||
|
SaltLength: rsa.PSSSaltLengthAuto,
|
||||||
|
Hash: crypto.SHA384,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodPS384
|
||||||
|
})
|
||||||
|
|
||||||
|
// PS512
|
||||||
|
SigningMethodPS512 = &SigningMethodRSAPSS{
|
||||||
|
&SigningMethodRSA{
|
||||||
|
Name: "PS512",
|
||||||
|
Hash: crypto.SHA512,
|
||||||
|
},
|
||||||
|
&rsa.PSSOptions{
|
||||||
|
SaltLength: rsa.PSSSaltLengthAuto,
|
||||||
|
Hash: crypto.SHA512,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
|
||||||
|
return SigningMethodPS512
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements the Verify method from SigningMethod
|
||||||
|
// For this verify method, key must be an rsa.PublicKey struct
|
||||||
|
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Decode the signature
|
||||||
|
var sig []byte
|
||||||
|
if sig, err = DecodeSegment(signature); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var rsaKey *rsa.PublicKey
|
||||||
|
switch k := key.(type) {
|
||||||
|
case *rsa.PublicKey:
|
||||||
|
rsaKey = k
|
||||||
|
default:
|
||||||
|
return ErrInvalidKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create hasher
|
||||||
|
if !m.Hash.Available() {
|
||||||
|
return ErrHashUnavailable
|
||||||
|
}
|
||||||
|
hasher := m.Hash.New()
|
||||||
|
hasher.Write([]byte(signingString))
|
||||||
|
|
||||||
|
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements the Sign method from SigningMethod
|
||||||
|
// For this signing method, key must be an rsa.PrivateKey struct
|
||||||
|
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
|
||||||
|
var rsaKey *rsa.PrivateKey
|
||||||
|
|
||||||
|
switch k := key.(type) {
|
||||||
|
case *rsa.PrivateKey:
|
||||||
|
rsaKey = k
|
||||||
|
default:
|
||||||
|
return "", ErrInvalidKeyType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the hasher
|
||||||
|
if !m.Hash.Available() {
|
||||||
|
return "", ErrHashUnavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := m.Hash.New()
|
||||||
|
hasher.Write([]byte(signingString))
|
||||||
|
|
||||||
|
// Sign the string and return the encoded bytes
|
||||||
|
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
|
||||||
|
return EncodeSegment(sigBytes), nil
|
||||||
|
} else {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
101
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
generated
vendored
Normal file
101
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
|
||||||
|
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
|
||||||
|
ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse PEM encoded PKCS1 or PKCS8 private key
|
||||||
|
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Parse PEM block
|
||||||
|
var block *pem.Block
|
||||||
|
if block, _ = pem.Decode(key); block == nil {
|
||||||
|
return nil, ErrKeyMustBePEMEncoded
|
||||||
|
}
|
||||||
|
|
||||||
|
var parsedKey interface{}
|
||||||
|
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
|
||||||
|
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkey *rsa.PrivateKey
|
||||||
|
var ok bool
|
||||||
|
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
|
||||||
|
return nil, ErrNotRSAPrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
return pkey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
|
||||||
|
func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Parse PEM block
|
||||||
|
var block *pem.Block
|
||||||
|
if block, _ = pem.Decode(key); block == nil {
|
||||||
|
return nil, ErrKeyMustBePEMEncoded
|
||||||
|
}
|
||||||
|
|
||||||
|
var parsedKey interface{}
|
||||||
|
|
||||||
|
var blockDecrypted []byte
|
||||||
|
if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
|
||||||
|
if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkey *rsa.PrivateKey
|
||||||
|
var ok bool
|
||||||
|
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
|
||||||
|
return nil, ErrNotRSAPrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
return pkey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||||
|
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Parse PEM block
|
||||||
|
var block *pem.Block
|
||||||
|
if block, _ = pem.Decode(key); block == nil {
|
||||||
|
return nil, ErrKeyMustBePEMEncoded
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the key
|
||||||
|
var parsedKey interface{}
|
||||||
|
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||||
|
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||||
|
parsedKey = cert.PublicKey
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkey *rsa.PublicKey
|
||||||
|
var ok bool
|
||||||
|
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
|
||||||
|
return nil, ErrNotRSAPublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
return pkey, nil
|
||||||
|
}
|
35
vendor/github.com/dgrijalva/jwt-go/signing_method.go
generated
vendored
Normal file
35
vendor/github.com/dgrijalva/jwt-go/signing_method.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var signingMethods = map[string]func() SigningMethod{}
|
||||||
|
var signingMethodLock = new(sync.RWMutex)
|
||||||
|
|
||||||
|
// Implement SigningMethod to add new methods for signing or verifying tokens.
|
||||||
|
type SigningMethod interface {
|
||||||
|
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
|
||||||
|
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
|
||||||
|
Alg() string // returns the alg identifier for this method (example: 'HS256')
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register the "alg" name and a factory function for signing method.
|
||||||
|
// This is typically done during init() in the method's implementation
|
||||||
|
func RegisterSigningMethod(alg string, f func() SigningMethod) {
|
||||||
|
signingMethodLock.Lock()
|
||||||
|
defer signingMethodLock.Unlock()
|
||||||
|
|
||||||
|
signingMethods[alg] = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a signing method from an "alg" string
|
||||||
|
func GetSigningMethod(alg string) (method SigningMethod) {
|
||||||
|
signingMethodLock.RLock()
|
||||||
|
defer signingMethodLock.RUnlock()
|
||||||
|
|
||||||
|
if methodF, ok := signingMethods[alg]; ok {
|
||||||
|
method = methodF()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
108
vendor/github.com/dgrijalva/jwt-go/token.go
generated
vendored
Normal file
108
vendor/github.com/dgrijalva/jwt-go/token.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
package jwt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
|
||||||
|
// You can override it to use another time value. This is useful for testing or if your
|
||||||
|
// server uses a different time zone than your tokens.
|
||||||
|
var TimeFunc = time.Now
|
||||||
|
|
||||||
|
// Parse methods use this callback function to supply
|
||||||
|
// the key for verification. The function receives the parsed,
|
||||||
|
// but unverified Token. This allows you to use properties in the
|
||||||
|
// Header of the token (such as `kid`) to identify which key to use.
|
||||||
|
type Keyfunc func(*Token) (interface{}, error)
|
||||||
|
|
||||||
|
// A JWT Token. Different fields will be used depending on whether you're
|
||||||
|
// creating or parsing/verifying a token.
|
||||||
|
type Token struct {
|
||||||
|
Raw string // The raw token. Populated when you Parse a token
|
||||||
|
Method SigningMethod // The signing method used or to be used
|
||||||
|
Header map[string]interface{} // The first segment of the token
|
||||||
|
Claims Claims // The second segment of the token
|
||||||
|
Signature string // The third segment of the token. Populated when you Parse a token
|
||||||
|
Valid bool // Is the token valid? Populated when you Parse/Verify a token
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new Token. Takes a signing method
|
||||||
|
func New(method SigningMethod) *Token {
|
||||||
|
return NewWithClaims(method, MapClaims{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWithClaims(method SigningMethod, claims Claims) *Token {
|
||||||
|
return &Token{
|
||||||
|
Header: map[string]interface{}{
|
||||||
|
"typ": "JWT",
|
||||||
|
"alg": method.Alg(),
|
||||||
|
},
|
||||||
|
Claims: claims,
|
||||||
|
Method: method,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the complete, signed token
|
||||||
|
func (t *Token) SignedString(key interface{}) (string, error) {
|
||||||
|
var sig, sstr string
|
||||||
|
var err error
|
||||||
|
if sstr, err = t.SigningString(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if sig, err = t.Method.Sign(sstr, key); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.Join([]string{sstr, sig}, "."), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the signing string. This is the
|
||||||
|
// most expensive part of the whole deal. Unless you
|
||||||
|
// need this for something special, just go straight for
|
||||||
|
// the SignedString.
|
||||||
|
func (t *Token) SigningString() (string, error) {
|
||||||
|
var err error
|
||||||
|
parts := make([]string, 2)
|
||||||
|
for i, _ := range parts {
|
||||||
|
var jsonValue []byte
|
||||||
|
if i == 0 {
|
||||||
|
if jsonValue, err = json.Marshal(t.Header); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if jsonValue, err = json.Marshal(t.Claims); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parts[i] = EncodeSegment(jsonValue)
|
||||||
|
}
|
||||||
|
return strings.Join(parts, "."), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse, validate, and return a token.
|
||||||
|
// keyFunc will receive the parsed token and should return the key for validating.
|
||||||
|
// If everything is kosher, err will be nil
|
||||||
|
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
||||||
|
return new(Parser).Parse(tokenString, keyFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||||
|
return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode JWT specific base64url encoding with padding stripped
|
||||||
|
func EncodeSegment(seg []byte) string {
|
||||||
|
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode JWT specific base64url encoding with padding stripped
|
||||||
|
func DecodeSegment(seg string) ([]byte, error) {
|
||||||
|
if l := len(seg) % 4; l > 0 {
|
||||||
|
seg += strings.Repeat("=", 4-l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return base64.URLEncoding.DecodeString(seg)
|
||||||
|
}
|
24
vendor/github.com/dsoprea/go-exif/.travis.yml
generated
vendored
Normal file
24
vendor/github.com/dsoprea/go-exif/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- master
|
||||||
|
- stable
|
||||||
|
- "1.14"
|
||||||
|
- "1.13"
|
||||||
|
- "1.12"
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
|
install:
|
||||||
|
- go get -t ./...
|
||||||
|
script:
|
||||||
|
# v1
|
||||||
|
- go test -v .
|
||||||
|
- go test -v ./exif-read-tool
|
||||||
|
# v2
|
||||||
|
- cd v2
|
||||||
|
- go test -v ./...
|
||||||
|
- cd ..
|
||||||
|
# v3. Coverage reports comes from this.
|
||||||
|
- cd v3
|
||||||
|
- go test -v ./... -coverprofile=coverage.txt -covermode=atomic
|
||||||
|
after_success:
|
||||||
|
- curl -s https://codecov.io/bash | bash
|
9
vendor/github.com/dsoprea/go-exif/LICENSE
generated
vendored
Normal file
9
vendor/github.com/dsoprea/go-exif/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
MIT LICENSE
|
||||||
|
|
||||||
|
Copyright 2019 Dustin Oprea
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
206
vendor/github.com/dsoprea/go-exif/README.md
generated
vendored
Normal file
206
vendor/github.com/dsoprea/go-exif/README.md
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
||||||
|
[![Build Status](https://travis-ci.org/dsoprea/go-exif.svg?branch=master)](https://travis-ci.org/dsoprea/go-exif)
|
||||||
|
[![codecov](https://codecov.io/gh/dsoprea/go-exif/branch/master/graph/badge.svg)](https://codecov.io/gh/dsoprea/go-exif)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-exif/v3)](https://goreportcard.com/report/github.com/dsoprea/go-exif/v3)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/dsoprea/go-exif/v3?status.svg)](https://godoc.org/github.com/dsoprea/go-exif/v3)
|
||||||
|
|
||||||
|
# Overview
|
||||||
|
|
||||||
|
This package provides native Go functionality to parse an existing EXIF block, update an existing EXIF block, or add a new EXIF block.
|
||||||
|
|
||||||
|
|
||||||
|
# Getting
|
||||||
|
|
||||||
|
To get the project and dependencies:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get -t github.com/dsoprea/go-exif/v3
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# Scope
|
||||||
|
|
||||||
|
This project is concerned only with parsing and encoding raw EXIF data. It does
|
||||||
|
not understand specific file-formats. This package assumes you know how to
|
||||||
|
extract the raw EXIF data from a file, such as a JPEG, and, if you want to
|
||||||
|
update it, know how to write it back. File-specific formats are not the concern
|
||||||
|
of *go-exif*, though we provide
|
||||||
|
[exif.SearchAndExtractExif][search-and-extract-exif] and
|
||||||
|
[exif.SearchFileAndExtractExif][search-file-and-extract-exif] as brute-force
|
||||||
|
search mechanisms that will help you explore the EXIF information for newer
|
||||||
|
formats that you might not yet have any way to parse.
|
||||||
|
|
||||||
|
That said, the author also provides the following projects to support the
|
||||||
|
efficient processing of the corresponding image formats:
|
||||||
|
|
||||||
|
- [go-jpeg-image-structure](https://github.com/dsoprea/go-jpeg-image-structure)
|
||||||
|
- [go-png-image-structure](https://github.com/dsoprea/go-png-image-structure)
|
||||||
|
- [go-tiff-image-structure](https://github.com/dsoprea/go-tiff-image-structure)
|
||||||
|
- [go-heic-exif-extractor](https://github.com/dsoprea/go-heic-exif-extractor)
|
||||||
|
|
||||||
|
See the [SetExif example in go-jpeg-image-structure][jpeg-set-exif] for
|
||||||
|
practical information on getting started with JPEG files.
|
||||||
|
|
||||||
|
|
||||||
|
# Usage
|
||||||
|
|
||||||
|
The package provides a set of [working examples][examples] and is covered by
|
||||||
|
unit-tests. Please look to these for getting familiar with how to read and write
|
||||||
|
EXIF.
|
||||||
|
|
||||||
|
Create an instance of the `Exif` type and call `Scan()` with a byte-slice, where
|
||||||
|
the first byte is the beginning of the raw EXIF data. You may pass a callback
|
||||||
|
that will be invoked for every tag or `nil` if you do not want one. If no
|
||||||
|
callback is given, you are effectively just validating the structure or parsing
|
||||||
|
of the image.
|
||||||
|
|
||||||
|
Obviously, it is most efficient to properly parse the media file and then
|
||||||
|
provide the specific EXIF data to be parsed, but there is also a heuristic for
|
||||||
|
finding the EXIF data within the media blob, directly. This means that, at least
|
||||||
|
for testing or curiosity, **you do not have to parse or even understand the
|
||||||
|
format of image or audio file in order to find and decode the EXIF information
|
||||||
|
inside of it.** See the usage of the `SearchAndExtractExif` method in the
|
||||||
|
example.
|
||||||
|
|
||||||
|
The library often refers to an IFD with an "IFD path" (e.g. IFD/Exif,
|
||||||
|
IFD/GPSInfo). A "fully-qualified" IFD-path is one that includes an index
|
||||||
|
describing which specific sibling IFD is being referred to if not the first one
|
||||||
|
(e.g. IFD1, the IFD where the thumbnail is expressed per the TIFF standard).
|
||||||
|
|
||||||
|
There is an "IFD mapping" and a "tag index" that must be created and passed to
|
||||||
|
the library from the top. These contain all of the knowledge of the IFD
|
||||||
|
hierarchies and their tag-IDs (the IFD mapping) and the tags that they are
|
||||||
|
allowed to host (the tag index). There are convenience functions to load them
|
||||||
|
with the standard TIFF information, but you, alternatively, may choose
|
||||||
|
something totally different (to support parsing any kind of EXIF data that does
|
||||||
|
not follow or is not relevant to TIFF at all).
|
||||||
|
|
||||||
|
|
||||||
|
# Standards and Customization
|
||||||
|
|
||||||
|
This project is configuration driven. By default, it has no knowledge of tags
|
||||||
|
and IDs until you load them prior to using (which is incorporated in the
|
||||||
|
examples). You are just as easily able to add additional custom IFDs and custom
|
||||||
|
tags for them. If desired, you could completely ignore the standard information
|
||||||
|
and load *totally* non-standard IFDs and tags.
|
||||||
|
|
||||||
|
This would be useful for divergent implementations that add non-standard
|
||||||
|
information to images. It would also be useful if there is some need to just
|
||||||
|
store a flat list of tags in an image for simplified, proprietary usage.
|
||||||
|
|
||||||
|
|
||||||
|
# Reader Tool
|
||||||
|
|
||||||
|
There is a runnable reading/dumping tool included:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get github.com/dsoprea/go-exif/v3/command/exif-read-tool
|
||||||
|
$ exif-read-tool --filepath "<media file-path>"
|
||||||
|
```
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
|
||||||
|
```
|
||||||
|
IFD-PATH=[IFD] ID=(0x010f) NAME=[Make] COUNT=(6) TYPE=[ASCII] VALUE=[Canon]
|
||||||
|
IFD-PATH=[IFD] ID=(0x0110) NAME=[Model] COUNT=(22) TYPE=[ASCII] VALUE=[Canon EOS 5D Mark III]
|
||||||
|
IFD-PATH=[IFD] ID=(0x0112) NAME=[Orientation] COUNT=(1) TYPE=[SHORT] VALUE=[1]
|
||||||
|
IFD-PATH=[IFD] ID=(0x011a) NAME=[XResolution] COUNT=(1) TYPE=[RATIONAL] VALUE=[72/1]
|
||||||
|
IFD-PATH=[IFD] ID=(0x011b) NAME=[YResolution] COUNT=(1) TYPE=[RATIONAL] VALUE=[72/1]
|
||||||
|
IFD-PATH=[IFD] ID=(0x0128) NAME=[ResolutionUnit] COUNT=(1) TYPE=[SHORT] VALUE=[2]
|
||||||
|
IFD-PATH=[IFD] ID=(0x0132) NAME=[DateTime] COUNT=(20) TYPE=[ASCII] VALUE=[2017:12:02 08:18:50]
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also print the raw, parsed data as JSON:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ exif-read-tool --filepath "<media file-path>" -json
|
||||||
|
```
|
||||||
|
|
||||||
|
Example output:
|
||||||
|
|
||||||
|
```
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"ifd_path": "IFD",
|
||||||
|
"fq_ifd_path": "IFD",
|
||||||
|
"ifd_index": 0,
|
||||||
|
"tag_id": 271,
|
||||||
|
"tag_name": "Make",
|
||||||
|
"tag_type_id": 2,
|
||||||
|
"tag_type_name": "ASCII",
|
||||||
|
"unit_count": 6,
|
||||||
|
"value": "Canon",
|
||||||
|
"value_string": "Canon"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ifd_path": "IFD",
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# Testing
|
||||||
|
|
||||||
|
The traditional method:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go test github.com/dsoprea/go-exif/v3/...
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# Release Notes
|
||||||
|
|
||||||
|
## v3 Release
|
||||||
|
|
||||||
|
This release primarily introduces an interchangeable data-layer, where any
|
||||||
|
`io.ReadSeeker` can be used to read EXIF data rather than necessarily loading
|
||||||
|
the EXIF blob into memory first.
|
||||||
|
|
||||||
|
Several backwards-incompatible clean-ups were also included in this release. See
|
||||||
|
[releases][releases] for more information.
|
||||||
|
|
||||||
|
## v2 Release
|
||||||
|
|
||||||
|
Features a heavily reflowed interface that makes usage much simpler. The
|
||||||
|
undefined-type tag-processing (which affects most photographic images) has also
|
||||||
|
been overhauled and streamlined. It is now complete and stable. Adoption is
|
||||||
|
strongly encouraged.
|
||||||
|
|
||||||
|
|
||||||
|
# *Contributing*
|
||||||
|
|
||||||
|
EXIF has an excellently-documented structure but there are a lot of devices and
|
||||||
|
manufacturers out there. There are only so many files that we can personally
|
||||||
|
find to test against, and most of these are images that have been generated only
|
||||||
|
in the past few years. JPEG, being the largest implementor of EXIF, has been
|
||||||
|
around for even longer (but not much). Therefore, there is a lot of
|
||||||
|
compatibility to test for.
|
||||||
|
|
||||||
|
**If you are able to help by running the included reader-tool against all of the
|
||||||
|
EXIF-compatible files you have, it would be deeply appreciated. This is mostly
|
||||||
|
going to be JPEG files (but not all variations). If you are able to test a large
|
||||||
|
number of files (thousands or millions) then please post an issue mentioning how
|
||||||
|
many files you have processed. If you had failures, then please share them and
|
||||||
|
try to support efforts to understand them.**
|
||||||
|
|
||||||
|
If you are able to test 100K+ files, I will give you credit on the project. The
|
||||||
|
further back in time your images reach, the higher in the list your name/company
|
||||||
|
will go.
|
||||||
|
|
||||||
|
|
||||||
|
# Contributors/Testing
|
||||||
|
|
||||||
|
Thank you to the following users for solving non-trivial issues, supporting the
|
||||||
|
project with solving edge-case problems in specific images, or otherwise
|
||||||
|
providing their non-trivial time or image corpus to test go-exif:
|
||||||
|
|
||||||
|
- [philip-firstorder](https://github.com/philip-firstorder) (200K images)
|
||||||
|
- [matchstick](https://github.com/matchstick) (102K images)
|
||||||
|
|
||||||
|
In addition to these, it has been tested on my own collection, north of 478K
|
||||||
|
images.
|
||||||
|
|
||||||
|
[search-and-extract-exif]: https://godoc.org/github.com/dsoprea/go-exif/v3#SearchAndExtractExif
|
||||||
|
[search-file-and-extract-exif]: https://godoc.org/github.com/dsoprea/go-exif/v3#SearchFileAndExtractExif
|
||||||
|
[jpeg-set-exif]: https://godoc.org/github.com/dsoprea/go-jpeg-image-structure#example-SegmentList-SetExif
|
||||||
|
[examples]: https://godoc.org/github.com/dsoprea/go-exif/v3#pkg-examples
|
||||||
|
[releases]: https://github.com/dsoprea/go-exif/releases
|
10
vendor/github.com/dsoprea/go-exif/error.go
generated
vendored
Normal file
10
vendor/github.com/dsoprea/go-exif/error.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrTagNotFound = errors.New("tag not found")
|
||||||
|
ErrTagNotStandard = errors.New("tag not a standard tag")
|
||||||
|
)
|
247
vendor/github.com/dsoprea/go-exif/exif.go
generated
vendored
Normal file
247
vendor/github.com/dsoprea/go-exif/exif.go
generated
vendored
Normal file
|
@ -0,0 +1,247 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ExifAddressableAreaStart is the absolute offset in the file that all
|
||||||
|
// offsets are relative to.
|
||||||
|
ExifAddressableAreaStart = uint32(0x0)
|
||||||
|
|
||||||
|
// ExifDefaultFirstIfdOffset is essentially the number of bytes in addition
|
||||||
|
// to `ExifAddressableAreaStart` that you have to move in order to escape
|
||||||
|
// the rest of the header and get to the earliest point where we can put
|
||||||
|
// stuff (which has to be the first IFD). This is the size of the header
|
||||||
|
// sequence containing the two-character byte-order, two-character fixed-
|
||||||
|
// bytes, and the four bytes describing the first-IFD offset.
|
||||||
|
ExifDefaultFirstIfdOffset = uint32(2 + 2 + 4)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
exifLogger = log.NewLogger("exif.exif")
|
||||||
|
|
||||||
|
// EncodeDefaultByteOrder is the default byte-order for encoding operations.
|
||||||
|
EncodeDefaultByteOrder = binary.BigEndian
|
||||||
|
|
||||||
|
// Default byte order for tests.
|
||||||
|
TestDefaultByteOrder = binary.BigEndian
|
||||||
|
|
||||||
|
BigEndianBoBytes = [2]byte{'M', 'M'}
|
||||||
|
LittleEndianBoBytes = [2]byte{'I', 'I'}
|
||||||
|
|
||||||
|
ByteOrderLookup = map[[2]byte]binary.ByteOrder{
|
||||||
|
BigEndianBoBytes: binary.BigEndian,
|
||||||
|
LittleEndianBoBytes: binary.LittleEndian,
|
||||||
|
}
|
||||||
|
|
||||||
|
ByteOrderLookupR = map[binary.ByteOrder][2]byte{
|
||||||
|
binary.BigEndian: BigEndianBoBytes,
|
||||||
|
binary.LittleEndian: LittleEndianBoBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
ExifFixedBytesLookup = map[binary.ByteOrder][2]byte{
|
||||||
|
binary.LittleEndian: {0x2a, 0x00},
|
||||||
|
binary.BigEndian: {0x00, 0x2a},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoExif = errors.New("no exif data")
|
||||||
|
ErrExifHeaderError = errors.New("exif header error")
|
||||||
|
)
|
||||||
|
|
||||||
|
// SearchAndExtractExif returns a slice from the beginning of the EXIF data to
|
||||||
|
// end of the file (it's not practical to try and calculate where the data
|
||||||
|
// actually ends; it needs to be formally parsed).
|
||||||
|
func SearchAndExtractExif(data []byte) (rawExif []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err := log.Wrap(state.(error))
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Search for the beginning of the EXIF information. The EXIF is near the
|
||||||
|
// beginning of our/most JPEGs, so this has a very low cost.
|
||||||
|
|
||||||
|
foundAt := -1
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
if _, err := ParseExifHeader(data[i:]); err == nil {
|
||||||
|
foundAt = i
|
||||||
|
break
|
||||||
|
} else if log.Is(err, ErrNoExif) == false {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if foundAt == -1 {
|
||||||
|
return nil, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
return data[foundAt:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchFileAndExtractExif returns a slice from the beginning of the EXIF data
|
||||||
|
// to the end of the file (it's not practical to try and calculate where the
|
||||||
|
// data actually ends).
|
||||||
|
func SearchFileAndExtractExif(filepath string) (rawExif []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err := log.Wrap(state.(error))
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Open the file.
|
||||||
|
|
||||||
|
f, err := os.Open(filepath)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(f)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
rawExif, err = SearchAndExtractExif(data)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return rawExif, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExifHeader struct {
|
||||||
|
ByteOrder binary.ByteOrder
|
||||||
|
FirstIfdOffset uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eh ExifHeader) String() string {
|
||||||
|
return fmt.Sprintf("ExifHeader<BYTE-ORDER=[%v] FIRST-IFD-OFFSET=(0x%02x)>", eh.ByteOrder, eh.FirstIfdOffset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseExifHeader parses the bytes at the very top of the header.
|
||||||
|
//
|
||||||
|
// This will panic with ErrNoExif on any data errors so that we can double as
|
||||||
|
// an EXIF-detection routine.
|
||||||
|
func ParseExifHeader(data []byte) (eh ExifHeader, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Good reference:
|
||||||
|
//
|
||||||
|
// CIPA DC-008-2016; JEITA CP-3451D
|
||||||
|
// -> http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf
|
||||||
|
|
||||||
|
if len(data) < 2 {
|
||||||
|
exifLogger.Warningf(nil, "Not enough data for EXIF header (1): (%d)", len(data))
|
||||||
|
return eh, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
byteOrderBytes := [2]byte{data[0], data[1]}
|
||||||
|
|
||||||
|
byteOrder, found := ByteOrderLookup[byteOrderBytes]
|
||||||
|
if found == false {
|
||||||
|
// exifLogger.Warningf(nil, "EXIF byte-order not recognized: [%v]", byteOrderBytes)
|
||||||
|
return eh, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data) < 4 {
|
||||||
|
exifLogger.Warningf(nil, "Not enough data for EXIF header (2): (%d)", len(data))
|
||||||
|
return eh, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
fixedBytes := [2]byte{data[2], data[3]}
|
||||||
|
expectedFixedBytes := ExifFixedBytesLookup[byteOrder]
|
||||||
|
if fixedBytes != expectedFixedBytes {
|
||||||
|
// exifLogger.Warningf(nil, "EXIF header fixed-bytes should be [%v] but are: [%v]", expectedFixedBytes, fixedBytes)
|
||||||
|
return eh, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data) < 2 {
|
||||||
|
exifLogger.Warningf(nil, "Not enough data for EXIF header (3): (%d)", len(data))
|
||||||
|
return eh, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
firstIfdOffset := byteOrder.Uint32(data[4:8])
|
||||||
|
|
||||||
|
eh = ExifHeader{
|
||||||
|
ByteOrder: byteOrder,
|
||||||
|
FirstIfdOffset: firstIfdOffset,
|
||||||
|
}
|
||||||
|
|
||||||
|
return eh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit recursively invokes a callback for every tag.
|
||||||
|
func Visit(rootIfdName string, ifdMapping *IfdMapping, tagIndex *TagIndex, exifData []byte, visitor RawTagVisitor) (eh ExifHeader, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
eh, err = ParseExifHeader(exifData)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
ie := NewIfdEnumerate(ifdMapping, tagIndex, exifData, eh.ByteOrder)
|
||||||
|
|
||||||
|
err = ie.Scan(rootIfdName, eh.FirstIfdOffset, visitor, true)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return eh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect recursively builds a static structure of all IFDs and tags.
|
||||||
|
func Collect(ifdMapping *IfdMapping, tagIndex *TagIndex, exifData []byte) (eh ExifHeader, index IfdIndex, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
eh, err = ParseExifHeader(exifData)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
ie := NewIfdEnumerate(ifdMapping, tagIndex, exifData, eh.ByteOrder)
|
||||||
|
|
||||||
|
index, err = ie.Collect(eh.FirstIfdOffset, true)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return eh, index, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildExifHeader constructs the bytes that go in the very beginning.
|
||||||
|
func BuildExifHeader(byteOrder binary.ByteOrder, firstIfdOffset uint32) (headerBytes []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
// This is the point in the data that all offsets are relative to.
|
||||||
|
boBytes := ByteOrderLookupR[byteOrder]
|
||||||
|
_, err = b.WriteString(string(boBytes[:]))
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
fixedBytes := ExifFixedBytesLookup[byteOrder]
|
||||||
|
|
||||||
|
_, err = b.Write(fixedBytes[:])
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = binary.Write(b, byteOrder, firstIfdOffset)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
11
vendor/github.com/dsoprea/go-exif/go.mod
generated
vendored
Normal file
11
vendor/github.com/dsoprea/go-exif/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
module github.com/dsoprea/go-exif
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/dsoprea/go-logging v0.0.0-20190624164917-c4f10aab7696
|
||||||
|
github.com/go-errors/errors v1.0.1 // indirect
|
||||||
|
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec
|
||||||
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.2.7
|
||||||
|
)
|
14
vendor/github.com/dsoprea/go-exif/go.sum
generated
vendored
Normal file
14
vendor/github.com/dsoprea/go-exif/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
github.com/dsoprea/go-logging v0.0.0-20190624164917-c4f10aab7696 h1:VGFnZAcLwPpt1sHlAxml+pGLZz9A2s+K/s1YNhPC91Y=
|
||||||
|
github.com/dsoprea/go-logging v0.0.0-20190624164917-c4f10aab7696/go.mod h1:Nm/x2ZUNRW6Fe5C3LxdY1PyZY5wmDv/s5dkPJ/VB3iA=
|
||||||
|
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||||
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
|
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
|
||||||
|
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
||||||
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||||
|
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
56
vendor/github.com/dsoprea/go-exif/gps.go
generated
vendored
Normal file
56
vendor/github.com/dsoprea/go-exif/gps.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/geo/s2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrGpsCoordinatesNotValid = errors.New("GPS coordinates not valid")
|
||||||
|
)
|
||||||
|
|
||||||
|
type GpsDegrees struct {
|
||||||
|
Orientation byte
|
||||||
|
Degrees, Minutes, Seconds float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d GpsDegrees) String() string {
|
||||||
|
return fmt.Sprintf("Degrees<O=[%s] D=(%g) M=(%g) S=(%g)>", string([]byte{d.Orientation}), d.Degrees, d.Minutes, d.Seconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d GpsDegrees) Decimal() float64 {
|
||||||
|
decimal := float64(d.Degrees) + float64(d.Minutes)/60.0 + float64(d.Seconds)/3600.0
|
||||||
|
|
||||||
|
if d.Orientation == 'S' || d.Orientation == 'W' {
|
||||||
|
return -decimal
|
||||||
|
} else {
|
||||||
|
return decimal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type GpsInfo struct {
|
||||||
|
Latitude, Longitude GpsDegrees
|
||||||
|
Altitude int
|
||||||
|
Timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gi *GpsInfo) String() string {
|
||||||
|
return fmt.Sprintf("GpsInfo<LAT=(%.05f) LON=(%.05f) ALT=(%d) TIME=[%s]>", gi.Latitude.Decimal(), gi.Longitude.Decimal(), gi.Altitude, gi.Timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gi *GpsInfo) S2CellId() s2.CellID {
|
||||||
|
latitude := gi.Latitude.Decimal()
|
||||||
|
longitude := gi.Longitude.Decimal()
|
||||||
|
|
||||||
|
ll := s2.LatLngFromDegrees(latitude, longitude)
|
||||||
|
cellId := s2.CellIDFromLatLng(ll)
|
||||||
|
|
||||||
|
if cellId.IsValid() == false {
|
||||||
|
panic(ErrGpsCoordinatesNotValid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cellId
|
||||||
|
}
|
407
vendor/github.com/dsoprea/go-exif/ifd.go
generated
vendored
Normal file
407
vendor/github.com/dsoprea/go-exif/ifd.go
generated
vendored
Normal file
|
@ -0,0 +1,407 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// IFD names. The paths that we referred to the IFDs with are comprised of
|
||||||
|
// these.
|
||||||
|
|
||||||
|
IfdStandard = "IFD"
|
||||||
|
IfdExif = "Exif"
|
||||||
|
IfdGps = "GPSInfo"
|
||||||
|
IfdIop = "Iop"
|
||||||
|
|
||||||
|
// Tag IDs for child IFDs.
|
||||||
|
|
||||||
|
IfdExifId = 0x8769
|
||||||
|
IfdGpsId = 0x8825
|
||||||
|
IfdIopId = 0xA005
|
||||||
|
|
||||||
|
// Just a placeholder.
|
||||||
|
|
||||||
|
IfdRootId = 0x0000
|
||||||
|
|
||||||
|
// The paths of the standard IFDs expressed in the standard IFD-mappings
|
||||||
|
// and as the group-names in the tag data.
|
||||||
|
|
||||||
|
IfdPathStandard = "IFD"
|
||||||
|
IfdPathStandardExif = "IFD/Exif"
|
||||||
|
IfdPathStandardExifIop = "IFD/Exif/Iop"
|
||||||
|
IfdPathStandardGps = "IFD/GPSInfo"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ifdLogger = log.NewLogger("exif.ifd")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrChildIfdNotMapped = errors.New("no child-IFD for that tag-ID under parent")
|
||||||
|
)
|
||||||
|
|
||||||
|
// type IfdIdentity struct {
|
||||||
|
// ParentIfdName string
|
||||||
|
// IfdName string
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (ii IfdIdentity) String() string {
|
||||||
|
// return fmt.Sprintf("IfdIdentity<PARENT-NAME=[%s] NAME=[%s]>", ii.ParentIfdName, ii.IfdName)
|
||||||
|
// }
|
||||||
|
|
||||||
|
type MappedIfd struct {
|
||||||
|
ParentTagId uint16
|
||||||
|
Placement []uint16
|
||||||
|
Path []string
|
||||||
|
|
||||||
|
Name string
|
||||||
|
TagId uint16
|
||||||
|
Children map[uint16]*MappedIfd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mi *MappedIfd) String() string {
|
||||||
|
pathPhrase := mi.PathPhrase()
|
||||||
|
return fmt.Sprintf("MappedIfd<(0x%04X) [%s] PATH=[%s]>", mi.TagId, mi.Name, pathPhrase)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mi *MappedIfd) PathPhrase() string {
|
||||||
|
return strings.Join(mi.Path, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfdMapping describes all of the IFDs that we currently recognize.
|
||||||
|
type IfdMapping struct {
|
||||||
|
rootNode *MappedIfd
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIfdMapping() (ifdMapping *IfdMapping) {
|
||||||
|
rootNode := &MappedIfd{
|
||||||
|
Path: make([]string, 0),
|
||||||
|
Children: make(map[uint16]*MappedIfd),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &IfdMapping{
|
||||||
|
rootNode: rootNode,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIfdMappingWithStandard() (ifdMapping *IfdMapping) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err := log.Wrap(state.(error))
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
im := NewIfdMapping()
|
||||||
|
|
||||||
|
err := LoadStandardIfds(im)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return im
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *IfdMapping) Get(parentPlacement []uint16) (childIfd *MappedIfd, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ptr := im.rootNode
|
||||||
|
for _, tagId := range parentPlacement {
|
||||||
|
if descendantPtr, found := ptr.Children[tagId]; found == false {
|
||||||
|
log.Panicf("ifd child with tag-ID (%04x) not registered: [%s]", tagId, ptr.PathPhrase())
|
||||||
|
} else {
|
||||||
|
ptr = descendantPtr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ptr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *IfdMapping) GetWithPath(pathPhrase string) (mi *MappedIfd, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if pathPhrase == "" {
|
||||||
|
log.Panicf("path-phrase is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
path := strings.Split(pathPhrase, "/")
|
||||||
|
ptr := im.rootNode
|
||||||
|
|
||||||
|
for _, name := range path {
|
||||||
|
var hit *MappedIfd
|
||||||
|
for _, mi := range ptr.Children {
|
||||||
|
if mi.Name == name {
|
||||||
|
hit = mi
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hit == nil {
|
||||||
|
log.Panicf("ifd child with name [%s] not registered: [%s]", name, ptr.PathPhrase())
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr = hit
|
||||||
|
}
|
||||||
|
|
||||||
|
return ptr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChild is a convenience function to get the child path for a given parent
|
||||||
|
// placement and child tag-ID.
|
||||||
|
func (im *IfdMapping) GetChild(parentPathPhrase string, tagId uint16) (mi *MappedIfd, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
mi, err = im.GetWithPath(parentPathPhrase)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
for _, childMi := range mi.Children {
|
||||||
|
if childMi.TagId == tagId {
|
||||||
|
return childMi, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Whether or not an IFD is defined in data, such an IFD is not registered
|
||||||
|
// and would be unknown.
|
||||||
|
log.Panic(ErrChildIfdNotMapped)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type IfdTagIdAndIndex struct {
|
||||||
|
Name string
|
||||||
|
TagId uint16
|
||||||
|
Index int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itii IfdTagIdAndIndex) String() string {
|
||||||
|
return fmt.Sprintf("IfdTagIdAndIndex<NAME=[%s] ID=(%04x) INDEX=(%d)>", itii.Name, itii.TagId, itii.Index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolvePath takes a list of names, which can also be suffixed with indices
|
||||||
|
// (to identify the second, third, etc.. sibling IFD) and returns a list of
|
||||||
|
// tag-IDs and those indices.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// - IFD/Exif/Iop
|
||||||
|
// - IFD0/Exif/Iop
|
||||||
|
//
|
||||||
|
// This is the only call that supports adding the numeric indices.
|
||||||
|
func (im *IfdMapping) ResolvePath(pathPhrase string) (lineage []IfdTagIdAndIndex, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
pathPhrase = strings.TrimSpace(pathPhrase)
|
||||||
|
|
||||||
|
if pathPhrase == "" {
|
||||||
|
log.Panicf("can not resolve empty path-phrase")
|
||||||
|
}
|
||||||
|
|
||||||
|
path := strings.Split(pathPhrase, "/")
|
||||||
|
lineage = make([]IfdTagIdAndIndex, len(path))
|
||||||
|
|
||||||
|
ptr := im.rootNode
|
||||||
|
empty := IfdTagIdAndIndex{}
|
||||||
|
for i, name := range path {
|
||||||
|
indexByte := name[len(name)-1]
|
||||||
|
index := 0
|
||||||
|
if indexByte >= '0' && indexByte <= '9' {
|
||||||
|
index = int(indexByte - '0')
|
||||||
|
name = name[:len(name)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
itii := IfdTagIdAndIndex{}
|
||||||
|
for _, mi := range ptr.Children {
|
||||||
|
if mi.Name != name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
itii.Name = name
|
||||||
|
itii.TagId = mi.TagId
|
||||||
|
itii.Index = index
|
||||||
|
|
||||||
|
ptr = mi
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if itii == empty {
|
||||||
|
log.Panicf("ifd child with name [%s] not registered: [%s]", name, pathPhrase)
|
||||||
|
}
|
||||||
|
|
||||||
|
lineage[i] = itii
|
||||||
|
}
|
||||||
|
|
||||||
|
return lineage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *IfdMapping) FqPathPhraseFromLineage(lineage []IfdTagIdAndIndex) (fqPathPhrase string) {
|
||||||
|
fqPathParts := make([]string, len(lineage))
|
||||||
|
for i, itii := range lineage {
|
||||||
|
if itii.Index > 0 {
|
||||||
|
fqPathParts[i] = fmt.Sprintf("%s%d", itii.Name, itii.Index)
|
||||||
|
} else {
|
||||||
|
fqPathParts[i] = itii.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(fqPathParts, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *IfdMapping) PathPhraseFromLineage(lineage []IfdTagIdAndIndex) (pathPhrase string) {
|
||||||
|
pathParts := make([]string, len(lineage))
|
||||||
|
for i, itii := range lineage {
|
||||||
|
pathParts[i] = itii.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(pathParts, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// StripPathPhraseIndices returns a non-fully-qualified path-phrase (no
|
||||||
|
// indices).
|
||||||
|
func (im *IfdMapping) StripPathPhraseIndices(pathPhrase string) (strippedPathPhrase string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
lineage, err := im.ResolvePath(pathPhrase)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
strippedPathPhrase = im.PathPhraseFromLineage(lineage)
|
||||||
|
return strippedPathPhrase, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add puts the given IFD at the given position of the tree. The position of the
|
||||||
|
// tree is referred to as the placement and is represented by a set of tag-IDs,
|
||||||
|
// where the leftmost is the root tag and the tags going to the right are
|
||||||
|
// progressive descendants.
|
||||||
|
func (im *IfdMapping) Add(parentPlacement []uint16, tagId uint16, name string) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): !! It would be nicer to provide a list of names in the placement rather than tag-IDs.
|
||||||
|
|
||||||
|
ptr, err := im.Get(parentPlacement)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
path := make([]string, len(parentPlacement)+1)
|
||||||
|
if len(parentPlacement) > 0 {
|
||||||
|
copy(path, ptr.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
path[len(path)-1] = name
|
||||||
|
|
||||||
|
placement := make([]uint16, len(parentPlacement)+1)
|
||||||
|
if len(placement) > 0 {
|
||||||
|
copy(placement, ptr.Placement)
|
||||||
|
}
|
||||||
|
|
||||||
|
placement[len(placement)-1] = tagId
|
||||||
|
|
||||||
|
childIfd := &MappedIfd{
|
||||||
|
ParentTagId: ptr.TagId,
|
||||||
|
Path: path,
|
||||||
|
Placement: placement,
|
||||||
|
Name: name,
|
||||||
|
TagId: tagId,
|
||||||
|
Children: make(map[uint16]*MappedIfd),
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := ptr.Children[tagId]; found == true {
|
||||||
|
log.Panicf("child IFD with tag-ID (%04x) already registered under IFD [%s] with tag-ID (%04x)", tagId, ptr.Name, ptr.TagId)
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr.Children[tagId] = childIfd
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *IfdMapping) dumpLineages(stack []*MappedIfd, input []string) (output []string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
currentIfd := stack[len(stack)-1]
|
||||||
|
|
||||||
|
output = input
|
||||||
|
for _, childIfd := range currentIfd.Children {
|
||||||
|
stackCopy := make([]*MappedIfd, len(stack)+1)
|
||||||
|
|
||||||
|
copy(stackCopy, stack)
|
||||||
|
stackCopy[len(stack)] = childIfd
|
||||||
|
|
||||||
|
// Add to output, but don't include the obligatory root node.
|
||||||
|
parts := make([]string, len(stackCopy)-1)
|
||||||
|
for i, mi := range stackCopy[1:] {
|
||||||
|
parts[i] = mi.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
output = append(output, strings.Join(parts, "/"))
|
||||||
|
|
||||||
|
output, err = im.dumpLineages(stackCopy, output)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return output, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *IfdMapping) DumpLineages() (output []string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
stack := []*MappedIfd{im.rootNode}
|
||||||
|
output = make([]string, 0)
|
||||||
|
|
||||||
|
output, err = im.dumpLineages(stack, output)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return output, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadStandardIfds(im *IfdMapping) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = im.Add([]uint16{}, IfdRootId, IfdStandard)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = im.Add([]uint16{IfdRootId}, IfdExifId, IfdExif)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = im.Add([]uint16{IfdRootId, IfdExifId}, IfdIopId, IfdIop)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = im.Add([]uint16{IfdRootId}, IfdGpsId, IfdGps)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
1265
vendor/github.com/dsoprea/go-exif/ifd_builder.go
generated
vendored
Normal file
1265
vendor/github.com/dsoprea/go-exif/ifd_builder.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
530
vendor/github.com/dsoprea/go-exif/ifd_builder_encode.go
generated
vendored
Normal file
530
vendor/github.com/dsoprea/go-exif/ifd_builder_encode.go
generated
vendored
Normal file
|
@ -0,0 +1,530 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Tag-ID + Tag-Type + Unit-Count + Value/Offset.
|
||||||
|
IfdTagEntrySize = uint32(2 + 2 + 4 + 4)
|
||||||
|
)
|
||||||
|
|
||||||
|
type ByteWriter struct {
|
||||||
|
b *bytes.Buffer
|
||||||
|
byteOrder binary.ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewByteWriter(b *bytes.Buffer, byteOrder binary.ByteOrder) (bw *ByteWriter) {
|
||||||
|
return &ByteWriter{
|
||||||
|
b: b,
|
||||||
|
byteOrder: byteOrder,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw ByteWriter) writeAsBytes(value interface{}) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = binary.Write(bw.b, bw.byteOrder, value)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw ByteWriter) WriteUint32(value uint32) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = bw.writeAsBytes(value)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw ByteWriter) WriteUint16(value uint16) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = bw.writeAsBytes(value)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bw ByteWriter) WriteFourBytes(value []byte) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
len_ := len(value)
|
||||||
|
if len_ != 4 {
|
||||||
|
log.Panicf("value is not four-bytes: (%d)", len_)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = bw.b.Write(value)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ifdOffsetIterator keeps track of where the next IFD should be written by
|
||||||
|
// keeping track of where the offsets start, the data that has been added, and
|
||||||
|
// bumping the offset *when* the data is added.
|
||||||
|
type ifdDataAllocator struct {
|
||||||
|
offset uint32
|
||||||
|
b bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIfdDataAllocator(ifdDataAddressableOffset uint32) *ifdDataAllocator {
|
||||||
|
return &ifdDataAllocator{
|
||||||
|
offset: ifdDataAddressableOffset,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ida *ifdDataAllocator) Allocate(value []byte) (offset uint32, err error) {
|
||||||
|
_, err = ida.b.Write(value)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
offset = ida.offset
|
||||||
|
ida.offset += uint32(len(value))
|
||||||
|
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ida *ifdDataAllocator) NextOffset() uint32 {
|
||||||
|
return ida.offset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ida *ifdDataAllocator) Bytes() []byte {
|
||||||
|
return ida.b.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfdByteEncoder converts an IB to raw bytes (for writing) while also figuring
|
||||||
|
// out all of the allocations and indirection that is required for extended
|
||||||
|
// data.
|
||||||
|
type IfdByteEncoder struct {
|
||||||
|
// journal holds a list of actions taken while encoding.
|
||||||
|
journal [][3]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIfdByteEncoder() (ibe *IfdByteEncoder) {
|
||||||
|
return &IfdByteEncoder{
|
||||||
|
journal: make([][3]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ibe *IfdByteEncoder) Journal() [][3]string {
|
||||||
|
return ibe.journal
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ibe *IfdByteEncoder) TableSize(entryCount int) uint32 {
|
||||||
|
// Tag-Count + (Entry-Size * Entry-Count) + Next-IFD-Offset.
|
||||||
|
return uint32(2) + (IfdTagEntrySize * uint32(entryCount)) + uint32(4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ibe *IfdByteEncoder) pushToJournal(where, direction, format string, args ...interface{}) {
|
||||||
|
event := [3]string{
|
||||||
|
direction,
|
||||||
|
where,
|
||||||
|
fmt.Sprintf(format, args...),
|
||||||
|
}
|
||||||
|
|
||||||
|
ibe.journal = append(ibe.journal, event)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintJournal prints a hierarchical representation of the steps taken during
|
||||||
|
// encoding.
|
||||||
|
func (ibe *IfdByteEncoder) PrintJournal() {
|
||||||
|
maxWhereLength := 0
|
||||||
|
for _, event := range ibe.journal {
|
||||||
|
where := event[1]
|
||||||
|
|
||||||
|
len_ := len(where)
|
||||||
|
if len_ > maxWhereLength {
|
||||||
|
maxWhereLength = len_
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
level := 0
|
||||||
|
for i, event := range ibe.journal {
|
||||||
|
direction := event[0]
|
||||||
|
where := event[1]
|
||||||
|
message := event[2]
|
||||||
|
|
||||||
|
if direction != ">" && direction != "<" && direction != "-" {
|
||||||
|
log.Panicf("journal operation not valid: [%s]", direction)
|
||||||
|
}
|
||||||
|
|
||||||
|
if direction == "<" {
|
||||||
|
if level <= 0 {
|
||||||
|
log.Panicf("journal operations unbalanced (too many closes)")
|
||||||
|
}
|
||||||
|
|
||||||
|
level--
|
||||||
|
}
|
||||||
|
|
||||||
|
indent := strings.Repeat(" ", level)
|
||||||
|
|
||||||
|
fmt.Printf("%3d %s%s %s: %s\n", i, indent, direction, where, message)
|
||||||
|
|
||||||
|
if direction == ">" {
|
||||||
|
level++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if level != 0 {
|
||||||
|
log.Panicf("journal operations unbalanced (too many opens)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeTagToBytes encodes the given tag to a byte stream. If
|
||||||
|
// `nextIfdOffsetToWrite` is more than (0), recurse into child IFDs
|
||||||
|
// (`nextIfdOffsetToWrite` is required in order for them to know where the its
|
||||||
|
// IFD data will be written, in order for them to know the offset of where
|
||||||
|
// their allocated-data block will start, which follows right behind).
|
||||||
|
func (ibe *IfdByteEncoder) encodeTagToBytes(ib *IfdBuilder, bt *BuilderTag, bw *ByteWriter, ida *ifdDataAllocator, nextIfdOffsetToWrite uint32) (childIfdBlock []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Write tag-ID.
|
||||||
|
err = bw.WriteUint16(bt.tagId)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
// Works for both values and child IFDs (which have an official size of
|
||||||
|
// LONG).
|
||||||
|
err = bw.WriteUint16(uint16(bt.typeId))
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
// Write unit-count.
|
||||||
|
|
||||||
|
if bt.value.IsBytes() == true {
|
||||||
|
effectiveType := bt.typeId
|
||||||
|
if bt.typeId == TypeUndefined {
|
||||||
|
effectiveType = TypeByte
|
||||||
|
}
|
||||||
|
|
||||||
|
// It's a non-unknown value.Calculate the count of values of
|
||||||
|
// the type that we're writing and the raw bytes for the whole list.
|
||||||
|
|
||||||
|
typeSize := uint32(effectiveType.Size())
|
||||||
|
|
||||||
|
valueBytes := bt.value.Bytes()
|
||||||
|
|
||||||
|
len_ := len(valueBytes)
|
||||||
|
unitCount := uint32(len_) / typeSize
|
||||||
|
|
||||||
|
if _, found := tagsWithoutAlignment[bt.tagId]; found == false {
|
||||||
|
remainder := uint32(len_) % typeSize
|
||||||
|
|
||||||
|
if remainder > 0 {
|
||||||
|
log.Panicf("tag (0x%04x) value of (%d) bytes not evenly divisible by type-size (%d)", bt.tagId, len_, typeSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = bw.WriteUint32(unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
// Write four-byte value/offset.
|
||||||
|
|
||||||
|
if len_ > 4 {
|
||||||
|
offset, err := ida.Allocate(valueBytes)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = bw.WriteUint32(offset)
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else {
|
||||||
|
fourBytes := make([]byte, 4)
|
||||||
|
copy(fourBytes, valueBytes)
|
||||||
|
|
||||||
|
err = bw.WriteFourBytes(fourBytes)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if bt.value.IsIb() == false {
|
||||||
|
log.Panicf("tag value is not a byte-slice but also not a child IB: %v", bt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write unit-count (one LONG representing one offset).
|
||||||
|
err = bw.WriteUint32(1)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if nextIfdOffsetToWrite > 0 {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeTagToBytes", ">", "[%s]->[%s]", ib.ifdPath, bt.value.Ib().ifdPath)
|
||||||
|
|
||||||
|
// Create the block of IFD data and everything it requires.
|
||||||
|
childIfdBlock, err = ibe.encodeAndAttachIfd(bt.value.Ib(), nextIfdOffsetToWrite)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeTagToBytes", "<", "[%s]->[%s]", bt.value.Ib().ifdPath, ib.ifdPath)
|
||||||
|
|
||||||
|
// Use the next-IFD offset for it. The IFD will actually get
|
||||||
|
// attached after we return.
|
||||||
|
err = bw.WriteUint32(nextIfdOffsetToWrite)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// No child-IFDs are to be allocated. Finish the entry with a NULL
|
||||||
|
// pointer.
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeTagToBytes", "-", "*Not* descending to child: [%s]", bt.value.Ib().ifdPath)
|
||||||
|
|
||||||
|
err = bw.WriteUint32(0)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return childIfdBlock, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeIfdToBytes encodes the given IB to a byte-slice. We are given the
|
||||||
|
// offset at which this IFD will be written. This method is used called both to
|
||||||
|
// pre-determine how big the table is going to be (so that we can calculate the
|
||||||
|
// address to allocate data at) as well as to write the final table.
|
||||||
|
//
|
||||||
|
// It is necessary to fully realize the table in order to predetermine its size
|
||||||
|
// because it is not enough to know the size of the table: If there are child
|
||||||
|
// IFDs, we will not be able to allocate them without first knowing how much
|
||||||
|
// data we need to allocate for the current IFD.
|
||||||
|
func (ibe *IfdByteEncoder) encodeIfdToBytes(ib *IfdBuilder, ifdAddressableOffset uint32, nextIfdOffsetToWrite uint32, setNextIb bool) (data []byte, tableSize uint32, dataSize uint32, childIfdSizes []uint32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeIfdToBytes", ">", "%s", ib)
|
||||||
|
|
||||||
|
tableSize = ibe.TableSize(len(ib.tags))
|
||||||
|
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
bw := NewByteWriter(b, ib.byteOrder)
|
||||||
|
|
||||||
|
// Write tag count.
|
||||||
|
err = bw.WriteUint16(uint16(len(ib.tags)))
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
ida := newIfdDataAllocator(ifdAddressableOffset)
|
||||||
|
|
||||||
|
childIfdBlocks := make([][]byte, 0)
|
||||||
|
|
||||||
|
// Write raw bytes for each tag entry. Allocate larger data to be referred
|
||||||
|
// to in the follow-up data-block as required. Any "unknown"-byte tags that
|
||||||
|
// we can't parse will not be present here (using AddTagsFromExisting(), at
|
||||||
|
// least).
|
||||||
|
for _, bt := range ib.tags {
|
||||||
|
childIfdBlock, err := ibe.encodeTagToBytes(ib, bt, bw, ida, nextIfdOffsetToWrite)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if childIfdBlock != nil {
|
||||||
|
// We aren't allowed to have non-nil child IFDs if we're just
|
||||||
|
// sizing things up.
|
||||||
|
if nextIfdOffsetToWrite == 0 {
|
||||||
|
log.Panicf("no IFD offset provided for child-IFDs; no new child-IFDs permitted")
|
||||||
|
}
|
||||||
|
|
||||||
|
nextIfdOffsetToWrite += uint32(len(childIfdBlock))
|
||||||
|
childIfdBlocks = append(childIfdBlocks, childIfdBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dataBytes := ida.Bytes()
|
||||||
|
dataSize = uint32(len(dataBytes))
|
||||||
|
|
||||||
|
childIfdSizes = make([]uint32, len(childIfdBlocks))
|
||||||
|
childIfdsTotalSize := uint32(0)
|
||||||
|
for i, childIfdBlock := range childIfdBlocks {
|
||||||
|
len_ := uint32(len(childIfdBlock))
|
||||||
|
childIfdSizes[i] = len_
|
||||||
|
childIfdsTotalSize += len_
|
||||||
|
}
|
||||||
|
|
||||||
|
// N the link from this IFD to the next IFD that will be written in the
|
||||||
|
// next cycle.
|
||||||
|
if setNextIb == true {
|
||||||
|
// Write address of next IFD in chain. This will be the original
|
||||||
|
// allocation offset plus the size of everything we have allocated for
|
||||||
|
// this IFD and its child-IFDs.
|
||||||
|
//
|
||||||
|
// It is critical that this number is stepped properly. We experienced
|
||||||
|
// an issue whereby it first looked like we were duplicating the IFD and
|
||||||
|
// then that we were duplicating the tags in the wrong IFD, and then
|
||||||
|
// finally we determined that the next-IFD offset for the first IFD was
|
||||||
|
// accidentally pointing back to the EXIF IFD, so we were visiting it
|
||||||
|
// twice when visiting through the tags after decoding. It was an
|
||||||
|
// expensive bug to find.
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeIfdToBytes", "-", "Setting 'next' IFD to (0x%08x).", nextIfdOffsetToWrite)
|
||||||
|
|
||||||
|
err := bw.WriteUint32(nextIfdOffsetToWrite)
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else {
|
||||||
|
err := bw.WriteUint32(0)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = b.Write(dataBytes)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
// Append any child IFD blocks after our table and data blocks. These IFDs
|
||||||
|
// were equipped with the appropriate offset information so it's expected
|
||||||
|
// that all offsets referred to by these will be correct.
|
||||||
|
//
|
||||||
|
// Note that child-IFDs are append after the current IFD and before the
|
||||||
|
// next IFD, as opposed to the root IFDs, which are chained together but
|
||||||
|
// will be interrupted by these child-IFDs (which is expected, per the
|
||||||
|
// standard).
|
||||||
|
|
||||||
|
for _, childIfdBlock := range childIfdBlocks {
|
||||||
|
_, err = b.Write(childIfdBlock)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeIfdToBytes", "<", "%s", ib)
|
||||||
|
|
||||||
|
return b.Bytes(), tableSize, dataSize, childIfdSizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeAndAttachIfd is a reentrant function that processes the IFD chain.
|
||||||
|
func (ibe *IfdByteEncoder) encodeAndAttachIfd(ib *IfdBuilder, ifdAddressableOffset uint32) (data []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", ">", "%s", ib)
|
||||||
|
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
|
||||||
|
for thisIb := ib; thisIb != nil; thisIb = thisIb.nextIb {
|
||||||
|
|
||||||
|
// Do a dry-run in order to pre-determine its size requirement.
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", ">", "Beginning encoding process: (%d) [%s]", i, thisIb.ifdPath)
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", ">", "Calculating size: (%d) [%s]", i, thisIb.ifdPath)
|
||||||
|
|
||||||
|
_, tableSize, allocatedDataSize, _, err := ibe.encodeIfdToBytes(thisIb, ifdAddressableOffset, 0, false)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", "<", "Finished calculating size: (%d) [%s]", i, thisIb.ifdPath)
|
||||||
|
|
||||||
|
ifdAddressableOffset += tableSize
|
||||||
|
nextIfdOffsetToWrite := ifdAddressableOffset + allocatedDataSize
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", ">", "Next IFD will be written at offset (0x%08x)", nextIfdOffsetToWrite)
|
||||||
|
|
||||||
|
// Write our IFD as well as any child-IFDs (now that we know the offset
|
||||||
|
// where new IFDs and their data will be allocated).
|
||||||
|
|
||||||
|
setNextIb := thisIb.nextIb != nil
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", ">", "Encoding starting: (%d) [%s] NEXT-IFD-OFFSET-TO-WRITE=(0x%08x)", i, thisIb.ifdPath, nextIfdOffsetToWrite)
|
||||||
|
|
||||||
|
tableAndAllocated, effectiveTableSize, effectiveAllocatedDataSize, childIfdSizes, err :=
|
||||||
|
ibe.encodeIfdToBytes(thisIb, ifdAddressableOffset, nextIfdOffsetToWrite, setNextIb)
|
||||||
|
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if effectiveTableSize != tableSize {
|
||||||
|
log.Panicf("written table size does not match the pre-calculated table size: (%d) != (%d) %s", effectiveTableSize, tableSize, ib)
|
||||||
|
} else if effectiveAllocatedDataSize != allocatedDataSize {
|
||||||
|
log.Panicf("written allocated-data size does not match the pre-calculated allocated-data size: (%d) != (%d) %s", effectiveAllocatedDataSize, allocatedDataSize, ib)
|
||||||
|
}
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", "<", "Encoding done: (%d) [%s]", i, thisIb.ifdPath)
|
||||||
|
|
||||||
|
totalChildIfdSize := uint32(0)
|
||||||
|
for _, childIfdSize := range childIfdSizes {
|
||||||
|
totalChildIfdSize += childIfdSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tableAndAllocated) != int(tableSize+allocatedDataSize+totalChildIfdSize) {
|
||||||
|
log.Panicf("IFD table and data is not a consistent size: (%d) != (%d)", len(tableAndAllocated), tableSize+allocatedDataSize+totalChildIfdSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): We might want to verify the original tableAndAllocated length, too.
|
||||||
|
|
||||||
|
_, err = b.Write(tableAndAllocated)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
// Advance past what we've allocated, thus far.
|
||||||
|
|
||||||
|
ifdAddressableOffset += allocatedDataSize + totalChildIfdSize
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", "<", "Finishing encoding process: (%d) [%s] [FINAL:] NEXT-IFD-OFFSET-TO-WRITE=(0x%08x)", i, ib.ifdPath, nextIfdOffsetToWrite)
|
||||||
|
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
ibe.pushToJournal("encodeAndAttachIfd", "<", "%s", ib)
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeToExifPayload is the base encoding step that transcribes the entire IB
|
||||||
|
// structure to its on-disk layout.
|
||||||
|
func (ibe *IfdByteEncoder) EncodeToExifPayload(ib *IfdBuilder) (data []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
data, err = ibe.encodeAndAttachIfd(ib, ExifDefaultFirstIfdOffset)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeToExif calls EncodeToExifPayload and then packages the result into a
|
||||||
|
// complete EXIF block.
|
||||||
|
func (ibe *IfdByteEncoder) EncodeToExif(ib *IfdBuilder) (data []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
encodedIfds, err := ibe.EncodeToExifPayload(ib)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
// Wrap the IFD in a formal EXIF block.
|
||||||
|
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
headerBytes, err := BuildExifHeader(ib.byteOrder, ExifDefaultFirstIfdOffset)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
_, err = b.Write(headerBytes)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
_, err = b.Write(encodedIfds)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
1356
vendor/github.com/dsoprea/go-exif/ifd_enumerate.go
generated
vendored
Normal file
1356
vendor/github.com/dsoprea/go-exif/ifd_enumerate.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
233
vendor/github.com/dsoprea/go-exif/ifd_tag_entry.go
generated
vendored
Normal file
233
vendor/github.com/dsoprea/go-exif/ifd_tag_entry.go
generated
vendored
Normal file
|
@ -0,0 +1,233 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
iteLogger = log.NewLogger("exif.ifd_tag_entry")
|
||||||
|
)
|
||||||
|
|
||||||
|
type IfdTagEntry struct {
|
||||||
|
TagId uint16
|
||||||
|
TagIndex int
|
||||||
|
TagType TagTypePrimitive
|
||||||
|
UnitCount uint32
|
||||||
|
ValueOffset uint32
|
||||||
|
RawValueOffset []byte
|
||||||
|
|
||||||
|
// ChildIfdName is the right most atom in the IFD-path. We need this to
|
||||||
|
// construct the fully-qualified IFD-path.
|
||||||
|
ChildIfdName string
|
||||||
|
|
||||||
|
// ChildIfdPath is the IFD-path of the child if this tag represents a child
|
||||||
|
// IFD.
|
||||||
|
ChildIfdPath string
|
||||||
|
|
||||||
|
// ChildFqIfdPath is the IFD-path of the child if this tag represents a
|
||||||
|
// child IFD. Includes indices.
|
||||||
|
ChildFqIfdPath string
|
||||||
|
|
||||||
|
// TODO(dustin): !! IB's host the child-IBs directly in the tag, but that's not the case here. Refactor to accomodate it for a consistent experience.
|
||||||
|
|
||||||
|
// IfdPath is the IFD that this tag belongs to.
|
||||||
|
IfdPath string
|
||||||
|
|
||||||
|
// TODO(dustin): !! We now parse and read the value immediately. Update the rest of the logic to use this and get rid of all of the staggered and different resolution mechanisms.
|
||||||
|
value []byte
|
||||||
|
isUnhandledUnknown bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ite *IfdTagEntry) String() string {
|
||||||
|
return fmt.Sprintf("IfdTagEntry<TAG-IFD-PATH=[%s] TAG-ID=(0x%04x) TAG-TYPE=[%s] UNIT-COUNT=(%d)>", ite.IfdPath, ite.TagId, TypeNames[ite.TagType], ite.UnitCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): TODO(dustin): Stop exporting IfdPath and TagId.
|
||||||
|
//
|
||||||
|
// func (ite *IfdTagEntry) IfdPath() string {
|
||||||
|
// return ite.IfdPath
|
||||||
|
// }
|
||||||
|
|
||||||
|
// TODO(dustin): TODO(dustin): Stop exporting IfdPath and TagId.
|
||||||
|
//
|
||||||
|
// func (ite *IfdTagEntry) TagId() uint16 {
|
||||||
|
// return ite.TagId
|
||||||
|
// }
|
||||||
|
|
||||||
|
// ValueString renders a string from whatever the value in this tag is.
|
||||||
|
func (ite *IfdTagEntry) ValueString(addressableData []byte, byteOrder binary.ByteOrder) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
valueContext :=
|
||||||
|
newValueContextFromTag(
|
||||||
|
ite,
|
||||||
|
addressableData,
|
||||||
|
byteOrder)
|
||||||
|
|
||||||
|
if ite.TagType == TypeUndefined {
|
||||||
|
valueRaw, err := valueContext.Undefined()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value = fmt.Sprintf("%v", valueRaw)
|
||||||
|
} else {
|
||||||
|
value, err = valueContext.Format()
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueBytes renders a specific list of bytes from the value in this tag.
|
||||||
|
func (ite *IfdTagEntry) ValueBytes(addressableData []byte, byteOrder binary.ByteOrder) (value []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Return the exact bytes of the unknown-type value. Returning a string
|
||||||
|
// (`ValueString`) is easy because we can just pass everything to
|
||||||
|
// `Sprintf()`. Returning the raw, typed value (`Value`) is easy
|
||||||
|
// (obviously). However, here, in order to produce the list of bytes, we
|
||||||
|
// need to coerce whatever `Undefined()` returns.
|
||||||
|
if ite.TagType == TypeUndefined {
|
||||||
|
valueContext :=
|
||||||
|
newValueContextFromTag(
|
||||||
|
ite,
|
||||||
|
addressableData,
|
||||||
|
byteOrder)
|
||||||
|
|
||||||
|
value, err := valueContext.Undefined()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
switch value.(type) {
|
||||||
|
case []byte:
|
||||||
|
return value.([]byte), nil
|
||||||
|
case TagUnknownType_UnknownValue:
|
||||||
|
b := []byte(value.(TagUnknownType_UnknownValue))
|
||||||
|
return b, nil
|
||||||
|
case string:
|
||||||
|
return []byte(value.(string)), nil
|
||||||
|
case UnknownTagValue:
|
||||||
|
valueBytes, err := value.(UnknownTagValue).ValueBytes()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return valueBytes, nil
|
||||||
|
default:
|
||||||
|
// TODO(dustin): !! Finish translating the rest of the types (make reusable and replace into other similar implementations?)
|
||||||
|
log.Panicf("can not produce bytes for unknown-type tag (0x%04x) (2): [%s]", ite.TagId, reflect.TypeOf(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
originalType := NewTagType(ite.TagType, byteOrder)
|
||||||
|
byteCount := uint32(originalType.Type().Size()) * ite.UnitCount
|
||||||
|
|
||||||
|
tt := NewTagType(TypeByte, byteOrder)
|
||||||
|
|
||||||
|
if tt.valueIsEmbedded(byteCount) == true {
|
||||||
|
iteLogger.Debugf(nil, "Reading BYTE value (ITE; embedded).")
|
||||||
|
|
||||||
|
// In this case, the bytes normally used for the offset are actually
|
||||||
|
// data.
|
||||||
|
value, err = tt.ParseBytes(ite.RawValueOffset, byteCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else {
|
||||||
|
iteLogger.Debugf(nil, "Reading BYTE value (ITE; at offset).")
|
||||||
|
|
||||||
|
value, err = tt.ParseBytes(addressableData[ite.ValueOffset:], byteCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the specific, parsed, typed value from the tag.
|
||||||
|
func (ite *IfdTagEntry) Value(addressableData []byte, byteOrder binary.ByteOrder) (value interface{}, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
valueContext :=
|
||||||
|
newValueContextFromTag(
|
||||||
|
ite,
|
||||||
|
addressableData,
|
||||||
|
byteOrder)
|
||||||
|
|
||||||
|
if ite.TagType == TypeUndefined {
|
||||||
|
value, err = valueContext.Undefined()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else {
|
||||||
|
tt := NewTagType(ite.TagType, byteOrder)
|
||||||
|
|
||||||
|
value, err = tt.Resolve(valueContext)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfdTagEntryValueResolver instances know how to resolve the values for any
|
||||||
|
// tag for a particular EXIF block.
|
||||||
|
type IfdTagEntryValueResolver struct {
|
||||||
|
addressableData []byte
|
||||||
|
byteOrder binary.ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIfdTagEntryValueResolver(exifData []byte, byteOrder binary.ByteOrder) (itevr *IfdTagEntryValueResolver) {
|
||||||
|
return &IfdTagEntryValueResolver{
|
||||||
|
addressableData: exifData[ExifAddressableAreaStart:],
|
||||||
|
byteOrder: byteOrder,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueBytes will resolve embedded or allocated data from the tag and return the raw bytes.
|
||||||
|
func (itevr *IfdTagEntryValueResolver) ValueBytes(ite *IfdTagEntry) (value []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// OBSOLETE(dustin): This is now redundant. Use `(*ValueContext).readRawEncoded()` instead of this method.
|
||||||
|
|
||||||
|
valueContext := newValueContextFromTag(
|
||||||
|
ite,
|
||||||
|
itevr.addressableData,
|
||||||
|
itevr.byteOrder)
|
||||||
|
|
||||||
|
rawBytes, err := valueContext.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return rawBytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (itevr *IfdTagEntryValueResolver) Value(ite *IfdTagEntry) (value interface{}, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// OBSOLETE(dustin): This is now redundant. Use `(*ValueContext).Values()` instead of this method.
|
||||||
|
|
||||||
|
valueContext := newValueContextFromTag(
|
||||||
|
ite,
|
||||||
|
itevr.addressableData,
|
||||||
|
itevr.byteOrder)
|
||||||
|
|
||||||
|
values, err := valueContext.Values()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return values, nil
|
||||||
|
}
|
4
vendor/github.com/dsoprea/go-exif/package.go
generated
vendored
Normal file
4
vendor/github.com/dsoprea/go-exif/package.go
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
// exif parses raw EXIF information given a block of raw EXIF data.
|
||||||
|
//
|
||||||
|
// v1 of go-exif is now deprecated. Please use v2.
|
||||||
|
package exif
|
190
vendor/github.com/dsoprea/go-exif/parser.go
generated
vendored
Normal file
190
vendor/github.com/dsoprea/go-exif/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,190 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Parser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseBytes(data []byte, unitCount uint32) (value []uint8, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeByte.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = []uint8(data[:count])
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAscii returns a string and auto-strips the trailing NUL character.
|
||||||
|
func (p *Parser) ParseAscii(data []byte, unitCount uint32) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeAscii.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data) == 0 || data[count-1] != 0 {
|
||||||
|
s := string(data[:count])
|
||||||
|
typeLogger.Warningf(nil, "ascii not terminated with nul as expected: [%v]", s)
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
} else {
|
||||||
|
// Auto-strip the NUL from the end. It serves no purpose outside of
|
||||||
|
// encoding semantics.
|
||||||
|
|
||||||
|
return string(data[:count-1]), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAsciiNoNul returns a string without any consideration for a trailing NUL
|
||||||
|
// character.
|
||||||
|
func (p *Parser) ParseAsciiNoNul(data []byte, unitCount uint32) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeAscii.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(data[:count]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseShorts(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint16, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeShort.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = make([]uint16, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
value[i] = byteOrder.Uint16(data[i*2:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeLong.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = make([]uint32, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
value[i] = byteOrder.Uint32(data[i*4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []Rational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeRational.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = make([]Rational, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
value[i].Numerator = byteOrder.Uint32(data[i*8:])
|
||||||
|
value[i].Denominator = byteOrder.Uint32(data[i*8+4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseSignedLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []int32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeSignedLong.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(data)
|
||||||
|
|
||||||
|
value = make([]int32, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
err := binary.Read(b, byteOrder, &value[i])
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) ParseSignedRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []SignedRational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeSignedRational.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(data)
|
||||||
|
|
||||||
|
value = make([]SignedRational, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
err = binary.Read(b, byteOrder, &value[i].Numerator)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = binary.Read(b, byteOrder, &value[i].Denominator)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
397
vendor/github.com/dsoprea/go-exif/tag_type.go
generated
vendored
Normal file
397
vendor/github.com/dsoprea/go-exif/tag_type.go
generated
vendored
Normal file
|
@ -0,0 +1,397 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
// NOTE(dustin): Most of this file encapsulates deprecated functionality and awaits being dumped in a future release.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TagType struct {
|
||||||
|
tagType TagTypePrimitive
|
||||||
|
name string
|
||||||
|
byteOrder binary.ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTagType(tagType TagTypePrimitive, byteOrder binary.ByteOrder) TagType {
|
||||||
|
name, found := TypeNames[tagType]
|
||||||
|
if found == false {
|
||||||
|
log.Panicf("tag-type not valid: 0x%04x", tagType)
|
||||||
|
}
|
||||||
|
|
||||||
|
return TagType{
|
||||||
|
tagType: tagType,
|
||||||
|
name: name,
|
||||||
|
byteOrder: byteOrder,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) String() string {
|
||||||
|
return fmt.Sprintf("TagType<NAME=[%s]>", tt.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) Name() string {
|
||||||
|
return tt.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) Type() TagTypePrimitive {
|
||||||
|
return tt.tagType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ByteOrder() binary.ByteOrder {
|
||||||
|
return tt.byteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) Size() int {
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(TagTypePrimitive).Size()` should be used, directly.
|
||||||
|
|
||||||
|
return tt.Type().Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// valueIsEmbedded will return a boolean indicating whether the value should be
|
||||||
|
// found directly within the IFD entry or an offset to somewhere else.
|
||||||
|
func (tt TagType) valueIsEmbedded(unitCount uint32) bool {
|
||||||
|
return (tt.tagType.Size() * int(unitCount)) <= 4
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) readRawEncoded(valueContext ValueContext) (rawBytes []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
unitSizeRaw := uint32(tt.tagType.Size())
|
||||||
|
|
||||||
|
if tt.valueIsEmbedded(valueContext.UnitCount()) == true {
|
||||||
|
byteLength := unitSizeRaw * valueContext.UnitCount()
|
||||||
|
return valueContext.RawValueOffset()[:byteLength], nil
|
||||||
|
} else {
|
||||||
|
return valueContext.AddressableData()[valueContext.ValueOffset() : valueContext.ValueOffset()+valueContext.UnitCount()*unitSizeRaw], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ParseBytes(data []byte, unitCount uint32) (value []uint8, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(*Parser).ParseBytes()` should be used.
|
||||||
|
|
||||||
|
value, err = parser.ParseBytes(data, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAscii returns a string and auto-strips the trailing NUL character.
|
||||||
|
func (tt TagType) ParseAscii(data []byte, unitCount uint32) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(*Parser).ParseAscii()` should be used.
|
||||||
|
|
||||||
|
value, err = parser.ParseAscii(data, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAsciiNoNul returns a string without any consideration for a trailing NUL
|
||||||
|
// character.
|
||||||
|
func (tt TagType) ParseAsciiNoNul(data []byte, unitCount uint32) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(*Parser).ParseAsciiNoNul()` should be used.
|
||||||
|
|
||||||
|
value, err = parser.ParseAsciiNoNul(data, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ParseShorts(data []byte, unitCount uint32) (value []uint16, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(*Parser).ParseShorts()` should be used.
|
||||||
|
|
||||||
|
value, err = parser.ParseShorts(data, unitCount, tt.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ParseLongs(data []byte, unitCount uint32) (value []uint32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(*Parser).ParseLongs()` should be used.
|
||||||
|
|
||||||
|
value, err = parser.ParseLongs(data, unitCount, tt.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ParseRationals(data []byte, unitCount uint32) (value []Rational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(*Parser).ParseRationals()` should be used.
|
||||||
|
|
||||||
|
value, err = parser.ParseRationals(data, unitCount, tt.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ParseSignedLongs(data []byte, unitCount uint32) (value []int32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(*Parser).ParseSignedLongs()` should be used.
|
||||||
|
|
||||||
|
value, err = parser.ParseSignedLongs(data, unitCount, tt.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ParseSignedRationals(data []byte, unitCount uint32) (value []SignedRational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(*Parser).ParseSignedRationals()` should be used.
|
||||||
|
|
||||||
|
value, err = parser.ParseSignedRationals(data, unitCount, tt.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ReadByteValues(valueContext ValueContext) (value []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).ReadBytes()` should be used.
|
||||||
|
|
||||||
|
value, err = valueContext.ReadBytes()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ReadAsciiValue(valueContext ValueContext) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).ReadAscii()` should be used.
|
||||||
|
|
||||||
|
value, err = valueContext.ReadAscii()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ReadAsciiNoNulValue(valueContext ValueContext) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).ReadAsciiNoNul()` should be used.
|
||||||
|
|
||||||
|
value, err = valueContext.ReadAsciiNoNul()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ReadShortValues(valueContext ValueContext) (value []uint16, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).ReadShorts()` should be used.
|
||||||
|
|
||||||
|
value, err = valueContext.ReadShorts()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ReadLongValues(valueContext ValueContext) (value []uint32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).ReadLongs()` should be used.
|
||||||
|
|
||||||
|
value, err = valueContext.ReadLongs()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ReadRationalValues(valueContext ValueContext) (value []Rational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).ReadRationals()` should be used.
|
||||||
|
|
||||||
|
value, err = valueContext.ReadRationals()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ReadSignedLongValues(valueContext ValueContext) (value []int32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).ReadSignedLongs()` should be used.
|
||||||
|
|
||||||
|
value, err = valueContext.ReadSignedLongs()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) ReadSignedRationalValues(valueContext ValueContext) (value []SignedRational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).ReadSignedRationals()` should be used.
|
||||||
|
|
||||||
|
value, err = valueContext.ReadSignedRationals()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveAsString resolves the given value and returns a flat string.
|
||||||
|
//
|
||||||
|
// Where the type is not ASCII, `justFirst` indicates whether to just stringify
|
||||||
|
// the first item in the slice (or return an empty string if the slice is
|
||||||
|
// empty).
|
||||||
|
//
|
||||||
|
// Since this method lacks the information to process unknown-type tags (e.g.
|
||||||
|
// byte-order, tag-ID, IFD type), it will return an error if attempted. See
|
||||||
|
// `Undefined()`.
|
||||||
|
func (tt TagType) ResolveAsString(valueContext ValueContext, justFirst bool) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
value, err = valueContext.FormatFirst()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else {
|
||||||
|
value, err = valueContext.Format()
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve knows how to resolve the given value.
|
||||||
|
//
|
||||||
|
// Since this method lacks the information to process unknown-type tags (e.g.
|
||||||
|
// byte-order, tag-ID, IFD type), it will return an error if attempted. See
|
||||||
|
// `Undefined()`.
|
||||||
|
func (tt TagType) Resolve(valueContext *ValueContext) (values interface{}, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(ValueContext).Values()` should be used.
|
||||||
|
|
||||||
|
values, err = valueContext.Values()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode knows how to encode the given value to a byte slice.
|
||||||
|
func (tt TagType) Encode(value interface{}) (encoded []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ve := NewValueEncoder(tt.byteOrder)
|
||||||
|
|
||||||
|
ed, err := ve.EncodeWithType(tt, value)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return ed.Encoded, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TagType) FromString(valueString string) (value interface{}, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `EncodeStringToBytes()` should be used.
|
||||||
|
|
||||||
|
value, err = EncodeStringToBytes(tt.tagType, valueString)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
229
vendor/github.com/dsoprea/go-exif/tags.go
generated
vendored
Normal file
229
vendor/github.com/dsoprea/go-exif/tags.go
generated
vendored
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// IFD1
|
||||||
|
|
||||||
|
ThumbnailOffsetTagId = 0x0201
|
||||||
|
ThumbnailSizeTagId = 0x0202
|
||||||
|
|
||||||
|
// Exif
|
||||||
|
|
||||||
|
TagVersionId = 0x0000
|
||||||
|
|
||||||
|
TagLatitudeId = 0x0002
|
||||||
|
TagLatitudeRefId = 0x0001
|
||||||
|
TagLongitudeId = 0x0004
|
||||||
|
TagLongitudeRefId = 0x0003
|
||||||
|
|
||||||
|
TagTimestampId = 0x0007
|
||||||
|
TagDatestampId = 0x001d
|
||||||
|
|
||||||
|
TagAltitudeId = 0x0006
|
||||||
|
TagAltitudeRefId = 0x0005
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// tagsWithoutAlignment is a tag-lookup for tags whose value size won't
|
||||||
|
// necessarily be a multiple of its tag-type.
|
||||||
|
tagsWithoutAlignment = map[uint16]struct{}{
|
||||||
|
// The thumbnail offset is stored as a long, but its data is a binary
|
||||||
|
// blob (not a slice of longs).
|
||||||
|
ThumbnailOffsetTagId: {},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tagsLogger = log.NewLogger("exif.tags")
|
||||||
|
)
|
||||||
|
|
||||||
|
// File structures.
|
||||||
|
|
||||||
|
type encodedTag struct {
|
||||||
|
// id is signed, here, because YAML doesn't have enough information to
|
||||||
|
// support unsigned.
|
||||||
|
Id int `yaml:"id"`
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
TypeName string `yaml:"type_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Indexing structures.
|
||||||
|
|
||||||
|
type IndexedTag struct {
|
||||||
|
Id uint16
|
||||||
|
Name string
|
||||||
|
IfdPath string
|
||||||
|
Type TagTypePrimitive
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *IndexedTag) String() string {
|
||||||
|
return fmt.Sprintf("TAG<ID=(0x%04x) NAME=[%s] IFD=[%s]>", it.Id, it.Name, it.IfdPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *IndexedTag) IsName(ifdPath, name string) bool {
|
||||||
|
return it.Name == name && it.IfdPath == ifdPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *IndexedTag) Is(ifdPath string, id uint16) bool {
|
||||||
|
return it.Id == id && it.IfdPath == ifdPath
|
||||||
|
}
|
||||||
|
|
||||||
|
type TagIndex struct {
|
||||||
|
tagsByIfd map[string]map[uint16]*IndexedTag
|
||||||
|
tagsByIfdR map[string]map[string]*IndexedTag
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTagIndex() *TagIndex {
|
||||||
|
ti := new(TagIndex)
|
||||||
|
|
||||||
|
ti.tagsByIfd = make(map[string]map[uint16]*IndexedTag)
|
||||||
|
ti.tagsByIfdR = make(map[string]map[string]*IndexedTag)
|
||||||
|
|
||||||
|
return ti
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ti *TagIndex) Add(it *IndexedTag) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Store by ID.
|
||||||
|
|
||||||
|
family, found := ti.tagsByIfd[it.IfdPath]
|
||||||
|
if found == false {
|
||||||
|
family = make(map[uint16]*IndexedTag)
|
||||||
|
ti.tagsByIfd[it.IfdPath] = family
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := family[it.Id]; found == true {
|
||||||
|
log.Panicf("tag-ID defined more than once for IFD [%s]: (%02x)", it.IfdPath, it.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
family[it.Id] = it
|
||||||
|
|
||||||
|
// Store by name.
|
||||||
|
|
||||||
|
familyR, found := ti.tagsByIfdR[it.IfdPath]
|
||||||
|
if found == false {
|
||||||
|
familyR = make(map[string]*IndexedTag)
|
||||||
|
ti.tagsByIfdR[it.IfdPath] = familyR
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := familyR[it.Name]; found == true {
|
||||||
|
log.Panicf("tag-name defined more than once for IFD [%s]: (%s)", it.IfdPath, it.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
familyR[it.Name] = it
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns information about the non-IFD tag.
|
||||||
|
func (ti *TagIndex) Get(ifdPath string, id uint16) (it *IndexedTag, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if len(ti.tagsByIfd) == 0 {
|
||||||
|
err := LoadStandardTags(ti)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
family, found := ti.tagsByIfd[ifdPath]
|
||||||
|
if found == false {
|
||||||
|
log.Panic(ErrTagNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
it, found = family[id]
|
||||||
|
if found == false {
|
||||||
|
log.Panic(ErrTagNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
return it, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns information about the non-IFD tag.
|
||||||
|
func (ti *TagIndex) GetWithName(ifdPath string, name string) (it *IndexedTag, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if len(ti.tagsByIfdR) == 0 {
|
||||||
|
err := LoadStandardTags(ti)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
it, found := ti.tagsByIfdR[ifdPath][name]
|
||||||
|
if found != true {
|
||||||
|
log.Panic(ErrTagNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
return it, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadStandardTags registers the tags that all devices/applications should
|
||||||
|
// support.
|
||||||
|
func LoadStandardTags(ti *TagIndex) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Read static data.
|
||||||
|
|
||||||
|
encodedIfds := make(map[string][]encodedTag)
|
||||||
|
|
||||||
|
err = yaml.Unmarshal([]byte(tagsYaml), encodedIfds)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
// Load structure.
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for ifdPath, tags := range encodedIfds {
|
||||||
|
for _, tagInfo := range tags {
|
||||||
|
tagId := uint16(tagInfo.Id)
|
||||||
|
tagName := tagInfo.Name
|
||||||
|
tagTypeName := tagInfo.TypeName
|
||||||
|
|
||||||
|
// TODO(dustin): !! Non-standard types, but found in real data. Ignore for right now.
|
||||||
|
if tagTypeName == "SSHORT" || tagTypeName == "FLOAT" || tagTypeName == "DOUBLE" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tagTypeId, found := TypeNamesR[tagTypeName]
|
||||||
|
if found == false {
|
||||||
|
log.Panicf("type [%s] for [%s] not valid", tagTypeName, tagName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
it := &IndexedTag{
|
||||||
|
IfdPath: ifdPath,
|
||||||
|
Id: tagId,
|
||||||
|
Name: tagName,
|
||||||
|
Type: tagTypeId,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ti.Add(it)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tagsLogger.Debugf(nil, "(%d) tags loaded.", count)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
951
vendor/github.com/dsoprea/go-exif/tags_data.go
generated
vendored
Normal file
951
vendor/github.com/dsoprea/go-exif/tags_data.go
generated
vendored
Normal file
|
@ -0,0 +1,951 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
var (
|
||||||
|
// From assets/tags.yaml . Needs to be here so it's embedded in the binary.
|
||||||
|
tagsYaml = `
|
||||||
|
# Notes:
|
||||||
|
#
|
||||||
|
# This file was produced from http://www.exiv2.org/tags.html, using the included
|
||||||
|
# tool, though that document appears to have some duplicates when all IDs are
|
||||||
|
# supposed to be unique (EXIF information only has IDs, not IFDs; IFDs are
|
||||||
|
# determined by our pre-existing knowledge of those tags).
|
||||||
|
#
|
||||||
|
# The webpage that we've produced this file from appears to indicate that
|
||||||
|
# ImageWidth is represented by both 0x0100 and 0x0001 depending on whether the
|
||||||
|
# encoding is RGB or YCbCr.
|
||||||
|
IFD/Exif:
|
||||||
|
- id: 0x829a
|
||||||
|
name: ExposureTime
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x829d
|
||||||
|
name: FNumber
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x8822
|
||||||
|
name: ExposureProgram
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x8824
|
||||||
|
name: SpectralSensitivity
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x8827
|
||||||
|
name: ISOSpeedRatings
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x8828
|
||||||
|
name: OECF
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x8830
|
||||||
|
name: SensitivityType
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x8831
|
||||||
|
name: StandardOutputSensitivity
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x8832
|
||||||
|
name: RecommendedExposureIndex
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x8833
|
||||||
|
name: ISOSpeed
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x8834
|
||||||
|
name: ISOSpeedLatitudeyyy
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x8835
|
||||||
|
name: ISOSpeedLatitudezzz
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x9000
|
||||||
|
name: ExifVersion
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x9003
|
||||||
|
name: DateTimeOriginal
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x9004
|
||||||
|
name: DateTimeDigitized
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x9101
|
||||||
|
name: ComponentsConfiguration
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x9102
|
||||||
|
name: CompressedBitsPerPixel
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9201
|
||||||
|
name: ShutterSpeedValue
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0x9202
|
||||||
|
name: ApertureValue
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9203
|
||||||
|
name: BrightnessValue
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0x9204
|
||||||
|
name: ExposureBiasValue
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0x9205
|
||||||
|
name: MaxApertureValue
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9206
|
||||||
|
name: SubjectDistance
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9207
|
||||||
|
name: MeteringMode
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x9208
|
||||||
|
name: LightSource
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x9209
|
||||||
|
name: Flash
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x920a
|
||||||
|
name: FocalLength
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9214
|
||||||
|
name: SubjectArea
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x927c
|
||||||
|
name: MakerNote
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x9286
|
||||||
|
name: UserComment
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x9290
|
||||||
|
name: SubSecTime
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x9291
|
||||||
|
name: SubSecTimeOriginal
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x9292
|
||||||
|
name: SubSecTimeDigitized
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xa000
|
||||||
|
name: FlashpixVersion
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xa001
|
||||||
|
name: ColorSpace
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa002
|
||||||
|
name: PixelXDimension
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xa003
|
||||||
|
name: PixelYDimension
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xa004
|
||||||
|
name: RelatedSoundFile
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xa005
|
||||||
|
name: InteroperabilityTag
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xa20b
|
||||||
|
name: FlashEnergy
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xa20c
|
||||||
|
name: SpatialFrequencyResponse
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xa20e
|
||||||
|
name: FocalPlaneXResolution
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xa20f
|
||||||
|
name: FocalPlaneYResolution
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xa210
|
||||||
|
name: FocalPlaneResolutionUnit
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa214
|
||||||
|
name: SubjectLocation
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa215
|
||||||
|
name: ExposureIndex
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xa217
|
||||||
|
name: SensingMethod
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa300
|
||||||
|
name: FileSource
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xa301
|
||||||
|
name: SceneType
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xa302
|
||||||
|
name: CFAPattern
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xa401
|
||||||
|
name: CustomRendered
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa402
|
||||||
|
name: ExposureMode
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa403
|
||||||
|
name: WhiteBalance
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa404
|
||||||
|
name: DigitalZoomRatio
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xa405
|
||||||
|
name: FocalLengthIn35mmFilm
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa406
|
||||||
|
name: SceneCaptureType
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa407
|
||||||
|
name: GainControl
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa408
|
||||||
|
name: Contrast
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa409
|
||||||
|
name: Saturation
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa40a
|
||||||
|
name: Sharpness
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa40b
|
||||||
|
name: DeviceSettingDescription
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xa40c
|
||||||
|
name: SubjectDistanceRange
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xa420
|
||||||
|
name: ImageUniqueID
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xa430
|
||||||
|
name: CameraOwnerName
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xa431
|
||||||
|
name: BodySerialNumber
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xa432
|
||||||
|
name: LensSpecification
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xa433
|
||||||
|
name: LensMake
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xa434
|
||||||
|
name: LensModel
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xa435
|
||||||
|
name: LensSerialNumber
|
||||||
|
type_name: ASCII
|
||||||
|
IFD/GPSInfo:
|
||||||
|
- id: 0x0000
|
||||||
|
name: GPSVersionID
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x0001
|
||||||
|
name: GPSLatitudeRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0002
|
||||||
|
name: GPSLatitude
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0003
|
||||||
|
name: GPSLongitudeRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0004
|
||||||
|
name: GPSLongitude
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0005
|
||||||
|
name: GPSAltitudeRef
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x0006
|
||||||
|
name: GPSAltitude
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0007
|
||||||
|
name: GPSTimeStamp
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0008
|
||||||
|
name: GPSSatellites
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0009
|
||||||
|
name: GPSStatus
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x000a
|
||||||
|
name: GPSMeasureMode
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x000b
|
||||||
|
name: GPSDOP
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x000c
|
||||||
|
name: GPSSpeedRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x000d
|
||||||
|
name: GPSSpeed
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x000e
|
||||||
|
name: GPSTrackRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x000f
|
||||||
|
name: GPSTrack
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0010
|
||||||
|
name: GPSImgDirectionRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0011
|
||||||
|
name: GPSImgDirection
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0012
|
||||||
|
name: GPSMapDatum
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0013
|
||||||
|
name: GPSDestLatitudeRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0014
|
||||||
|
name: GPSDestLatitude
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0015
|
||||||
|
name: GPSDestLongitudeRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0016
|
||||||
|
name: GPSDestLongitude
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0017
|
||||||
|
name: GPSDestBearingRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0018
|
||||||
|
name: GPSDestBearing
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0019
|
||||||
|
name: GPSDestDistanceRef
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x001a
|
||||||
|
name: GPSDestDistance
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x001b
|
||||||
|
name: GPSProcessingMethod
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x001c
|
||||||
|
name: GPSAreaInformation
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x001d
|
||||||
|
name: GPSDateStamp
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x001e
|
||||||
|
name: GPSDifferential
|
||||||
|
type_name: SHORT
|
||||||
|
IFD:
|
||||||
|
- id: 0x000b
|
||||||
|
name: ProcessingSoftware
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x00fe
|
||||||
|
name: NewSubfileType
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x00ff
|
||||||
|
name: SubfileType
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0100
|
||||||
|
name: ImageWidth
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0101
|
||||||
|
name: ImageLength
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0102
|
||||||
|
name: BitsPerSample
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0103
|
||||||
|
name: Compression
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0106
|
||||||
|
name: PhotometricInterpretation
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0107
|
||||||
|
name: Thresholding
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0108
|
||||||
|
name: CellWidth
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0109
|
||||||
|
name: CellLength
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x010a
|
||||||
|
name: FillOrder
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x010d
|
||||||
|
name: DocumentName
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x010e
|
||||||
|
name: ImageDescription
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x010f
|
||||||
|
name: Make
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0110
|
||||||
|
name: Model
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0111
|
||||||
|
name: StripOffsets
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0112
|
||||||
|
name: Orientation
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0115
|
||||||
|
name: SamplesPerPixel
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0116
|
||||||
|
name: RowsPerStrip
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0117
|
||||||
|
name: StripByteCounts
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x011a
|
||||||
|
name: XResolution
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x011b
|
||||||
|
name: YResolution
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x011c
|
||||||
|
name: PlanarConfiguration
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0122
|
||||||
|
name: GrayResponseUnit
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0123
|
||||||
|
name: GrayResponseCurve
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0124
|
||||||
|
name: T4Options
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0125
|
||||||
|
name: T6Options
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0128
|
||||||
|
name: ResolutionUnit
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0129
|
||||||
|
name: PageNumber
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x012d
|
||||||
|
name: TransferFunction
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0131
|
||||||
|
name: Software
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0132
|
||||||
|
name: DateTime
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x013b
|
||||||
|
name: Artist
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x013c
|
||||||
|
name: HostComputer
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x013d
|
||||||
|
name: Predictor
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x013e
|
||||||
|
name: WhitePoint
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x013f
|
||||||
|
name: PrimaryChromaticities
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0140
|
||||||
|
name: ColorMap
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0141
|
||||||
|
name: HalftoneHints
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0142
|
||||||
|
name: TileWidth
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0143
|
||||||
|
name: TileLength
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0144
|
||||||
|
name: TileOffsets
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0145
|
||||||
|
name: TileByteCounts
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x014a
|
||||||
|
name: SubIFDs
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x014c
|
||||||
|
name: InkSet
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x014d
|
||||||
|
name: InkNames
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x014e
|
||||||
|
name: NumberOfInks
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0150
|
||||||
|
name: DotRange
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x0151
|
||||||
|
name: TargetPrinter
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0152
|
||||||
|
name: ExtraSamples
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0153
|
||||||
|
name: SampleFormat
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0154
|
||||||
|
name: SMinSampleValue
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0155
|
||||||
|
name: SMaxSampleValue
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0156
|
||||||
|
name: TransferRange
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0157
|
||||||
|
name: ClipPath
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x0158
|
||||||
|
name: XClipPathUnits
|
||||||
|
type_name: SSHORT
|
||||||
|
- id: 0x0159
|
||||||
|
name: YClipPathUnits
|
||||||
|
type_name: SSHORT
|
||||||
|
- id: 0x015a
|
||||||
|
name: Indexed
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x015b
|
||||||
|
name: JPEGTables
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x015f
|
||||||
|
name: OPIProxy
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0200
|
||||||
|
name: JPEGProc
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0201
|
||||||
|
name: JPEGInterchangeFormat
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0202
|
||||||
|
name: JPEGInterchangeFormatLength
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0203
|
||||||
|
name: JPEGRestartInterval
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0205
|
||||||
|
name: JPEGLosslessPredictors
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0206
|
||||||
|
name: JPEGPointTransforms
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0207
|
||||||
|
name: JPEGQTables
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0208
|
||||||
|
name: JPEGDCTables
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0209
|
||||||
|
name: JPEGACTables
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x0211
|
||||||
|
name: YCbCrCoefficients
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x0212
|
||||||
|
name: YCbCrSubSampling
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0213
|
||||||
|
name: YCbCrPositioning
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x0214
|
||||||
|
name: ReferenceBlackWhite
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x02bc
|
||||||
|
name: XMLPacket
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x4746
|
||||||
|
name: Rating
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x4749
|
||||||
|
name: RatingPercent
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x800d
|
||||||
|
name: ImageID
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x828d
|
||||||
|
name: CFARepeatPatternDim
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x828e
|
||||||
|
name: CFAPattern
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x828f
|
||||||
|
name: BatteryLevel
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x8298
|
||||||
|
name: Copyright
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x829a
|
||||||
|
name: ExposureTime
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x829d
|
||||||
|
name: FNumber
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x83bb
|
||||||
|
name: IPTCNAA
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x8649
|
||||||
|
name: ImageResources
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x8769
|
||||||
|
name: ExifTag
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x8773
|
||||||
|
name: InterColorProfile
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x8822
|
||||||
|
name: ExposureProgram
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x8824
|
||||||
|
name: SpectralSensitivity
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x8825
|
||||||
|
name: GPSTag
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x8827
|
||||||
|
name: ISOSpeedRatings
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x8828
|
||||||
|
name: OECF
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x8829
|
||||||
|
name: Interlace
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x882a
|
||||||
|
name: TimeZoneOffset
|
||||||
|
type_name: SSHORT
|
||||||
|
- id: 0x882b
|
||||||
|
name: SelfTimerMode
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x9003
|
||||||
|
name: DateTimeOriginal
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x9102
|
||||||
|
name: CompressedBitsPerPixel
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9201
|
||||||
|
name: ShutterSpeedValue
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0x9202
|
||||||
|
name: ApertureValue
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9203
|
||||||
|
name: BrightnessValue
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0x9204
|
||||||
|
name: ExposureBiasValue
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0x9205
|
||||||
|
name: MaxApertureValue
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9206
|
||||||
|
name: SubjectDistance
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0x9207
|
||||||
|
name: MeteringMode
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x9208
|
||||||
|
name: LightSource
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x9209
|
||||||
|
name: Flash
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x920a
|
||||||
|
name: FocalLength
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x920b
|
||||||
|
name: FlashEnergy
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x920c
|
||||||
|
name: SpatialFrequencyResponse
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x920d
|
||||||
|
name: Noise
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x920e
|
||||||
|
name: FocalPlaneXResolution
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x920f
|
||||||
|
name: FocalPlaneYResolution
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9210
|
||||||
|
name: FocalPlaneResolutionUnit
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x9211
|
||||||
|
name: ImageNumber
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x9212
|
||||||
|
name: SecurityClassification
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x9213
|
||||||
|
name: ImageHistory
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x9214
|
||||||
|
name: SubjectLocation
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x9215
|
||||||
|
name: ExposureIndex
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0x9216
|
||||||
|
name: TIFFEPStandardID
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x9217
|
||||||
|
name: SensingMethod
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0x9c9b
|
||||||
|
name: XPTitle
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x9c9c
|
||||||
|
name: XPComment
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x9c9d
|
||||||
|
name: XPAuthor
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x9c9e
|
||||||
|
name: XPKeywords
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0x9c9f
|
||||||
|
name: XPSubject
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc4a5
|
||||||
|
name: PrintImageMatching
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc612
|
||||||
|
name: DNGVersion
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc613
|
||||||
|
name: DNGBackwardVersion
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc614
|
||||||
|
name: UniqueCameraModel
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xc615
|
||||||
|
name: LocalizedCameraModel
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc616
|
||||||
|
name: CFAPlaneColor
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc617
|
||||||
|
name: CFALayout
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc618
|
||||||
|
name: LinearizationTable
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc619
|
||||||
|
name: BlackLevelRepeatDim
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc61a
|
||||||
|
name: BlackLevel
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc61b
|
||||||
|
name: BlackLevelDeltaH
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc61c
|
||||||
|
name: BlackLevelDeltaV
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc61d
|
||||||
|
name: WhiteLevel
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc61e
|
||||||
|
name: DefaultScale
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc61f
|
||||||
|
name: DefaultCropOrigin
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc620
|
||||||
|
name: DefaultCropSize
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc621
|
||||||
|
name: ColorMatrix1
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc622
|
||||||
|
name: ColorMatrix2
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc623
|
||||||
|
name: CameraCalibration1
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc624
|
||||||
|
name: CameraCalibration2
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc625
|
||||||
|
name: ReductionMatrix1
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc626
|
||||||
|
name: ReductionMatrix2
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc627
|
||||||
|
name: AnalogBalance
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc628
|
||||||
|
name: AsShotNeutral
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc629
|
||||||
|
name: AsShotWhiteXY
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc62a
|
||||||
|
name: BaselineExposure
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc62b
|
||||||
|
name: BaselineNoise
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc62c
|
||||||
|
name: BaselineSharpness
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc62d
|
||||||
|
name: BayerGreenSplit
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xc62e
|
||||||
|
name: LinearResponseLimit
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc62f
|
||||||
|
name: CameraSerialNumber
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xc630
|
||||||
|
name: LensInfo
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc631
|
||||||
|
name: ChromaBlurRadius
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc632
|
||||||
|
name: AntiAliasStrength
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc633
|
||||||
|
name: ShadowScale
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc634
|
||||||
|
name: DNGPrivateData
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc635
|
||||||
|
name: MakerNoteSafety
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc65a
|
||||||
|
name: CalibrationIlluminant1
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc65b
|
||||||
|
name: CalibrationIlluminant2
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc65c
|
||||||
|
name: BestQualityScale
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc65d
|
||||||
|
name: RawDataUniqueID
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc68b
|
||||||
|
name: OriginalRawFileName
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc68c
|
||||||
|
name: OriginalRawFileData
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc68d
|
||||||
|
name: ActiveArea
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc68e
|
||||||
|
name: MaskedAreas
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc68f
|
||||||
|
name: AsShotICCProfile
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc690
|
||||||
|
name: AsShotPreProfileMatrix
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc691
|
||||||
|
name: CurrentICCProfile
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc692
|
||||||
|
name: CurrentPreProfileMatrix
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc6bf
|
||||||
|
name: ColorimetricReference
|
||||||
|
type_name: SHORT
|
||||||
|
- id: 0xc6f3
|
||||||
|
name: CameraCalibrationSignature
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc6f4
|
||||||
|
name: ProfileCalibrationSignature
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc6f6
|
||||||
|
name: AsShotProfileName
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc6f7
|
||||||
|
name: NoiseReductionApplied
|
||||||
|
type_name: RATIONAL
|
||||||
|
- id: 0xc6f8
|
||||||
|
name: ProfileName
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc6f9
|
||||||
|
name: ProfileHueSatMapDims
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xc6fa
|
||||||
|
name: ProfileHueSatMapData1
|
||||||
|
type_name: FLOAT
|
||||||
|
- id: 0xc6fb
|
||||||
|
name: ProfileHueSatMapData2
|
||||||
|
type_name: FLOAT
|
||||||
|
- id: 0xc6fc
|
||||||
|
name: ProfileToneCurve
|
||||||
|
type_name: FLOAT
|
||||||
|
- id: 0xc6fd
|
||||||
|
name: ProfileEmbedPolicy
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xc6fe
|
||||||
|
name: ProfileCopyright
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc714
|
||||||
|
name: ForwardMatrix1
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc715
|
||||||
|
name: ForwardMatrix2
|
||||||
|
type_name: SRATIONAL
|
||||||
|
- id: 0xc716
|
||||||
|
name: PreviewApplicationName
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc717
|
||||||
|
name: PreviewApplicationVersion
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc718
|
||||||
|
name: PreviewSettingsName
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc719
|
||||||
|
name: PreviewSettingsDigest
|
||||||
|
type_name: BYTE
|
||||||
|
- id: 0xc71a
|
||||||
|
name: PreviewColorSpace
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xc71b
|
||||||
|
name: PreviewDateTime
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0xc71c
|
||||||
|
name: RawImageDigest
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc71d
|
||||||
|
name: OriginalRawFileDigest
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc71e
|
||||||
|
name: SubTileBlockSize
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xc71f
|
||||||
|
name: RowInterleaveFactor
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xc725
|
||||||
|
name: ProfileLookTableDims
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0xc726
|
||||||
|
name: ProfileLookTableData
|
||||||
|
type_name: FLOAT
|
||||||
|
- id: 0xc740
|
||||||
|
name: OpcodeList1
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc741
|
||||||
|
name: OpcodeList2
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc74e
|
||||||
|
name: OpcodeList3
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0xc761
|
||||||
|
name: NoiseProfile
|
||||||
|
type_name: DOUBLE
|
||||||
|
IFD/Exif/Iop:
|
||||||
|
- id: 0x0001
|
||||||
|
name: InteroperabilityIndex
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x0002
|
||||||
|
name: InteroperabilityVersion
|
||||||
|
type_name: UNDEFINED
|
||||||
|
- id: 0x1000
|
||||||
|
name: RelatedImageFileFormat
|
||||||
|
type_name: ASCII
|
||||||
|
- id: 0x1001
|
||||||
|
name: RelatedImageWidth
|
||||||
|
type_name: LONG
|
||||||
|
- id: 0x1002
|
||||||
|
name: RelatedImageLength
|
||||||
|
type_name: LONG
|
||||||
|
`
|
||||||
|
)
|
417
vendor/github.com/dsoprea/go-exif/tags_undefined.go
generated
vendored
Normal file
417
vendor/github.com/dsoprea/go-exif/tags_undefined.go
generated
vendored
Normal file
|
@ -0,0 +1,417 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
UnparseableUnknownTagValuePlaceholder = "!UNKNOWN"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(dustin): Rename "unknown" in symbol names to "undefined" in the next release.
|
||||||
|
//
|
||||||
|
// See https://github.com/dsoprea/go-exif/issues/27 .
|
||||||
|
|
||||||
|
const (
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_ASCII = iota
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_JIS = iota
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_UNICODE = iota
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_UNDEFINED = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_Y = 0x1
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_Cb = 0x2
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_Cr = 0x3
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_R = 0x4
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_G = 0x5
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_B = 0x6
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_OTHER = iota
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_RGB = iota
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_YCBCR = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_Names = map[int]string{
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_ASCII: "ASCII",
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_JIS: "JIS",
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_UNICODE: "UNICODE",
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_UNDEFINED: "UNDEFINED",
|
||||||
|
}
|
||||||
|
|
||||||
|
TagUnknownType_9298_UserComment_Encodings = map[int][]byte{
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_ASCII: {'A', 'S', 'C', 'I', 'I', 0, 0, 0},
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_JIS: {'J', 'I', 'S', 0, 0, 0, 0, 0},
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_UNICODE: {'U', 'n', 'i', 'c', 'o', 'd', 'e', 0},
|
||||||
|
TagUnknownType_9298_UserComment_Encoding_UNDEFINED: {0, 0, 0, 0, 0, 0, 0, 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Names = map[int]string{
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_OTHER: "OTHER",
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_RGB: "RGB",
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_YCBCR: "YCBCR",
|
||||||
|
}
|
||||||
|
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Configurations = map[int][]byte{
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_RGB: {
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_R,
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_G,
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_B,
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_YCBCR: {
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_Y,
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_Cb,
|
||||||
|
TagUnknownType_9101_ComponentsConfiguration_Channel_Cr,
|
||||||
|
0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(dustin): Rename `UnknownTagValue` to `UndefinedTagValue`.
|
||||||
|
|
||||||
|
type UnknownTagValue interface {
|
||||||
|
ValueBytes() ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): Rename `TagUnknownType_GeneralString` to `TagUnknownType_GeneralString`.
|
||||||
|
|
||||||
|
type TagUnknownType_GeneralString string
|
||||||
|
|
||||||
|
func (gs TagUnknownType_GeneralString) ValueBytes() (value []byte, err error) {
|
||||||
|
return []byte(gs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): Rename `TagUnknownType_9298_UserComment` to `TagUndefinedType_9298_UserComment`.
|
||||||
|
|
||||||
|
type TagUnknownType_9298_UserComment struct {
|
||||||
|
EncodingType int
|
||||||
|
EncodingBytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc TagUnknownType_9298_UserComment) String() string {
|
||||||
|
var valuePhrase string
|
||||||
|
|
||||||
|
if len(uc.EncodingBytes) <= 8 {
|
||||||
|
valuePhrase = fmt.Sprintf("%v", uc.EncodingBytes)
|
||||||
|
} else {
|
||||||
|
valuePhrase = fmt.Sprintf("%v...", uc.EncodingBytes[:8])
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("UserComment<SIZE=(%d) ENCODING=[%s] V=%v LEN=(%d)>", len(uc.EncodingBytes), TagUnknownType_9298_UserComment_Encoding_Names[uc.EncodingType], valuePhrase, len(uc.EncodingBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc TagUnknownType_9298_UserComment) ValueBytes() (value []byte, err error) {
|
||||||
|
encodingTypeBytes, found := TagUnknownType_9298_UserComment_Encodings[uc.EncodingType]
|
||||||
|
if found == false {
|
||||||
|
log.Panicf("encoding-type not valid for unknown-type tag 9298 (UserComment): (%d)", uc.EncodingType)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = make([]byte, len(uc.EncodingBytes)+8)
|
||||||
|
|
||||||
|
copy(value[:8], encodingTypeBytes)
|
||||||
|
copy(value[8:], uc.EncodingBytes)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): Rename `TagUnknownType_927C_MakerNote` to `TagUndefinedType_927C_MakerNote`.
|
||||||
|
|
||||||
|
type TagUnknownType_927C_MakerNote struct {
|
||||||
|
MakerNoteType []byte
|
||||||
|
MakerNoteBytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mn TagUnknownType_927C_MakerNote) String() string {
|
||||||
|
parts := make([]string, 20)
|
||||||
|
for i, c := range mn.MakerNoteType {
|
||||||
|
parts[i] = fmt.Sprintf("%02x", c)
|
||||||
|
}
|
||||||
|
|
||||||
|
h := sha1.New()
|
||||||
|
|
||||||
|
_, err := h.Write(mn.MakerNoteBytes)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
digest := h.Sum(nil)
|
||||||
|
|
||||||
|
return fmt.Sprintf("MakerNote<TYPE-ID=[%s] LEN=(%d) SHA1=[%020x]>", strings.Join(parts, " "), len(mn.MakerNoteBytes), digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc TagUnknownType_927C_MakerNote) ValueBytes() (value []byte, err error) {
|
||||||
|
return uc.MakerNoteBytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): Rename `TagUnknownType_9101_ComponentsConfiguration` to `TagUndefinedType_9101_ComponentsConfiguration`.
|
||||||
|
|
||||||
|
type TagUnknownType_9101_ComponentsConfiguration struct {
|
||||||
|
ConfigurationId int
|
||||||
|
ConfigurationBytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc TagUnknownType_9101_ComponentsConfiguration) String() string {
|
||||||
|
return fmt.Sprintf("ComponentsConfiguration<ID=[%s] BYTES=%v>", TagUnknownType_9101_ComponentsConfiguration_Names[cc.ConfigurationId], cc.ConfigurationBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc TagUnknownType_9101_ComponentsConfiguration) ValueBytes() (value []byte, err error) {
|
||||||
|
return uc.ConfigurationBytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): Rename `EncodeUnknown_9286` to `EncodeUndefined_9286`.
|
||||||
|
|
||||||
|
func EncodeUnknown_9286(uc TagUnknownType_9298_UserComment) (encoded []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
encodingTypeBytes := TagUnknownType_9298_UserComment_Encodings[uc.EncodingType]
|
||||||
|
|
||||||
|
_, err = b.Write(encodingTypeBytes)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
_, err = b.Write(uc.EncodingBytes)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type EncodeableUndefinedValue struct {
|
||||||
|
IfdPath string
|
||||||
|
TagId uint16
|
||||||
|
Parameters interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func EncodeUndefined(ifdPath string, tagId uint16, value interface{}) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): !! Finish implementing these.
|
||||||
|
if ifdPath == IfdPathStandardExif {
|
||||||
|
if tagId == 0x9286 {
|
||||||
|
encoded, err := EncodeUnknown_9286(value.(TagUnknownType_9298_UserComment))
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
ed.Type = TypeUndefined
|
||||||
|
ed.Encoded = encoded
|
||||||
|
ed.UnitCount = uint32(len(encoded))
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Panicf("undefined value not encodable: %s (0x%02x)", ifdPath, tagId)
|
||||||
|
|
||||||
|
// Never called.
|
||||||
|
return EncodedData{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): Rename `TagUnknownType_UnknownValue` to `TagUndefinedType_UnknownValue`.
|
||||||
|
|
||||||
|
type TagUnknownType_UnknownValue []byte
|
||||||
|
|
||||||
|
func (tutuv TagUnknownType_UnknownValue) String() string {
|
||||||
|
parts := make([]string, len(tutuv))
|
||||||
|
for i, c := range tutuv {
|
||||||
|
parts[i] = fmt.Sprintf("%02x", c)
|
||||||
|
}
|
||||||
|
|
||||||
|
h := sha1.New()
|
||||||
|
|
||||||
|
_, err := h.Write(tutuv)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
digest := h.Sum(nil)
|
||||||
|
|
||||||
|
return fmt.Sprintf("Unknown<DATA=[%s] LEN=(%d) SHA1=[%020x]>", strings.Join(parts, " "), len(tutuv), digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UndefinedValue knows how to resolve the value for most unknown-type tags.
|
||||||
|
func UndefinedValue(ifdPath string, tagId uint16, valueContext interface{}, byteOrder binary.ByteOrder) (value interface{}, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Stop exporting this. Use `(*ValueContext).Undefined()`.
|
||||||
|
|
||||||
|
var valueContextPtr *ValueContext
|
||||||
|
|
||||||
|
if vc, ok := valueContext.(*ValueContext); ok == true {
|
||||||
|
// Legacy usage.
|
||||||
|
|
||||||
|
valueContextPtr = vc
|
||||||
|
} else {
|
||||||
|
// Standard usage.
|
||||||
|
|
||||||
|
valueContextValue := valueContext.(ValueContext)
|
||||||
|
valueContextPtr = &valueContextValue
|
||||||
|
}
|
||||||
|
|
||||||
|
typeLogger.Debugf(nil, "UndefinedValue: IFD-PATH=[%s] TAG-ID=(0x%02x)", ifdPath, tagId)
|
||||||
|
|
||||||
|
if ifdPath == IfdPathStandardExif {
|
||||||
|
if tagId == 0x9000 {
|
||||||
|
// ExifVersion
|
||||||
|
|
||||||
|
valueContextPtr.SetUnknownValueType(TypeAsciiNoNul)
|
||||||
|
|
||||||
|
valueString, err := valueContextPtr.ReadAsciiNoNul()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return TagUnknownType_GeneralString(valueString), nil
|
||||||
|
} else if tagId == 0xa000 {
|
||||||
|
// FlashpixVersion
|
||||||
|
|
||||||
|
valueContextPtr.SetUnknownValueType(TypeAsciiNoNul)
|
||||||
|
|
||||||
|
valueString, err := valueContextPtr.ReadAsciiNoNul()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return TagUnknownType_GeneralString(valueString), nil
|
||||||
|
} else if tagId == 0x9286 {
|
||||||
|
// UserComment
|
||||||
|
|
||||||
|
valueContextPtr.SetUnknownValueType(TypeByte)
|
||||||
|
|
||||||
|
valueBytes, err := valueContextPtr.ReadBytes()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
unknownUc := TagUnknownType_9298_UserComment{
|
||||||
|
EncodingType: TagUnknownType_9298_UserComment_Encoding_UNDEFINED,
|
||||||
|
EncodingBytes: []byte{},
|
||||||
|
}
|
||||||
|
|
||||||
|
encoding := valueBytes[:8]
|
||||||
|
for encodingIndex, encodingBytes := range TagUnknownType_9298_UserComment_Encodings {
|
||||||
|
if bytes.Compare(encoding, encodingBytes) == 0 {
|
||||||
|
uc := TagUnknownType_9298_UserComment{
|
||||||
|
EncodingType: encodingIndex,
|
||||||
|
EncodingBytes: valueBytes[8:],
|
||||||
|
}
|
||||||
|
|
||||||
|
return uc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typeLogger.Warningf(nil, "User-comment encoding not valid. Returning 'unknown' type (the default).")
|
||||||
|
return unknownUc, nil
|
||||||
|
} else if tagId == 0x927c {
|
||||||
|
// MakerNote
|
||||||
|
// TODO(dustin): !! This is the Wild Wild West. This very well might be a child IFD, but any and all OEM's define their own formats. If we're going to be writing changes and this is complete EXIF (which may not have the first eight bytes), it might be fine. However, if these are just IFDs they'll be relative to the main EXIF, this will invalidate the MakerNote data for IFDs and any other implementations that use offsets unless we can interpret them all. It be best to return to this later and just exclude this from being written for now, though means a loss of a wealth of image metadata.
|
||||||
|
// -> We can also just blindly try to interpret as an IFD and just validate that it's looks good (maybe it will even have a 'next ifd' pointer that we can validate is 0x0).
|
||||||
|
|
||||||
|
valueContextPtr.SetUnknownValueType(TypeByte)
|
||||||
|
|
||||||
|
valueBytes, err := valueContextPtr.ReadBytes()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
// TODO(dustin): Doesn't work, but here as an example.
|
||||||
|
// ie := NewIfdEnumerate(valueBytes, byteOrder)
|
||||||
|
|
||||||
|
// // TODO(dustin): !! Validate types (might have proprietary types, but it might be worth splitting the list between valid and not valid; maybe fail if a certain proportion are invalid, or maybe aren't less then a certain small integer)?
|
||||||
|
// ii, err := ie.Collect(0x0)
|
||||||
|
|
||||||
|
// for _, entry := range ii.RootIfd.Entries {
|
||||||
|
// fmt.Printf("ENTRY: 0x%02x %d\n", entry.TagId, entry.TagType)
|
||||||
|
// }
|
||||||
|
|
||||||
|
mn := TagUnknownType_927C_MakerNote{
|
||||||
|
MakerNoteType: valueBytes[:20],
|
||||||
|
|
||||||
|
// MakerNoteBytes has the whole length of bytes. There's always
|
||||||
|
// the chance that the first 20 bytes includes actual data.
|
||||||
|
MakerNoteBytes: valueBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
return mn, nil
|
||||||
|
} else if tagId == 0x9101 {
|
||||||
|
// ComponentsConfiguration
|
||||||
|
|
||||||
|
valueContextPtr.SetUnknownValueType(TypeByte)
|
||||||
|
|
||||||
|
valueBytes, err := valueContextPtr.ReadBytes()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
for configurationId, configurationBytes := range TagUnknownType_9101_ComponentsConfiguration_Configurations {
|
||||||
|
if bytes.Compare(valueBytes, configurationBytes) == 0 {
|
||||||
|
cc := TagUnknownType_9101_ComponentsConfiguration{
|
||||||
|
ConfigurationId: configurationId,
|
||||||
|
ConfigurationBytes: valueBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
return cc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cc := TagUnknownType_9101_ComponentsConfiguration{
|
||||||
|
ConfigurationId: TagUnknownType_9101_ComponentsConfiguration_OTHER,
|
||||||
|
ConfigurationBytes: valueBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
return cc, nil
|
||||||
|
}
|
||||||
|
} else if ifdPath == IfdPathStandardGps {
|
||||||
|
if tagId == 0x001c {
|
||||||
|
// GPSAreaInformation
|
||||||
|
|
||||||
|
valueContextPtr.SetUnknownValueType(TypeAsciiNoNul)
|
||||||
|
|
||||||
|
valueString, err := valueContextPtr.ReadAsciiNoNul()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return TagUnknownType_GeneralString(valueString), nil
|
||||||
|
} else if tagId == 0x001b {
|
||||||
|
// GPSProcessingMethod
|
||||||
|
|
||||||
|
valueContextPtr.SetUnknownValueType(TypeAsciiNoNul)
|
||||||
|
|
||||||
|
valueString, err := valueContextPtr.ReadAsciiNoNul()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return TagUnknownType_GeneralString(valueString), nil
|
||||||
|
}
|
||||||
|
} else if ifdPath == IfdPathStandardExifIop {
|
||||||
|
if tagId == 0x0002 {
|
||||||
|
// InteropVersion
|
||||||
|
|
||||||
|
valueContextPtr.SetUnknownValueType(TypeAsciiNoNul)
|
||||||
|
|
||||||
|
valueString, err := valueContextPtr.ReadAsciiNoNul()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return TagUnknownType_GeneralString(valueString), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): !! Still need to do:
|
||||||
|
//
|
||||||
|
// complex: 0xa302, 0xa20c, 0x8828
|
||||||
|
// long: 0xa301, 0xa300
|
||||||
|
//
|
||||||
|
// 0xa40b is device-specific and unhandled.
|
||||||
|
//
|
||||||
|
// See https://github.com/dsoprea/go-exif/issues/26.
|
||||||
|
|
||||||
|
// We have no choice but to return the error. We have no way of knowing how
|
||||||
|
// much data there is without already knowing what data-type this tag is.
|
||||||
|
return nil, ErrUnhandledUnknownTypedTag
|
||||||
|
}
|
310
vendor/github.com/dsoprea/go-exif/type.go
generated
vendored
Normal file
310
vendor/github.com/dsoprea/go-exif/type.go
generated
vendored
Normal file
|
@ -0,0 +1,310 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TagTypePrimitive uint16
|
||||||
|
|
||||||
|
func (typeType TagTypePrimitive) String() string {
|
||||||
|
return TypeNames[typeType]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tagType TagTypePrimitive) Size() int {
|
||||||
|
if tagType == TypeByte {
|
||||||
|
return 1
|
||||||
|
} else if tagType == TypeAscii || tagType == TypeAsciiNoNul {
|
||||||
|
return 1
|
||||||
|
} else if tagType == TypeShort {
|
||||||
|
return 2
|
||||||
|
} else if tagType == TypeLong {
|
||||||
|
return 4
|
||||||
|
} else if tagType == TypeRational {
|
||||||
|
return 8
|
||||||
|
} else if tagType == TypeSignedLong {
|
||||||
|
return 4
|
||||||
|
} else if tagType == TypeSignedRational {
|
||||||
|
return 8
|
||||||
|
} else {
|
||||||
|
log.Panicf("can not determine tag-value size for type (%d): [%s]", tagType, TypeNames[tagType])
|
||||||
|
|
||||||
|
// Never called.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
TypeByte TagTypePrimitive = 1
|
||||||
|
TypeAscii TagTypePrimitive = 2
|
||||||
|
TypeShort TagTypePrimitive = 3
|
||||||
|
TypeLong TagTypePrimitive = 4
|
||||||
|
TypeRational TagTypePrimitive = 5
|
||||||
|
TypeUndefined TagTypePrimitive = 7
|
||||||
|
TypeSignedLong TagTypePrimitive = 9
|
||||||
|
TypeSignedRational TagTypePrimitive = 10
|
||||||
|
|
||||||
|
// TypeAsciiNoNul is just a pseudo-type, for our own purposes.
|
||||||
|
TypeAsciiNoNul TagTypePrimitive = 0xf0
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeLogger = log.NewLogger("exif.type")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO(dustin): Rename TypeNames() to typeNames() and add getter.
|
||||||
|
TypeNames = map[TagTypePrimitive]string{
|
||||||
|
TypeByte: "BYTE",
|
||||||
|
TypeAscii: "ASCII",
|
||||||
|
TypeShort: "SHORT",
|
||||||
|
TypeLong: "LONG",
|
||||||
|
TypeRational: "RATIONAL",
|
||||||
|
TypeUndefined: "UNDEFINED",
|
||||||
|
TypeSignedLong: "SLONG",
|
||||||
|
TypeSignedRational: "SRATIONAL",
|
||||||
|
|
||||||
|
TypeAsciiNoNul: "_ASCII_NO_NUL",
|
||||||
|
}
|
||||||
|
|
||||||
|
TypeNamesR = map[string]TagTypePrimitive{}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotEnoughData is used when there isn't enough data to accomodate what
|
||||||
|
// we're trying to parse (sizeof(type) * unit_count).
|
||||||
|
ErrNotEnoughData = errors.New("not enough data for type")
|
||||||
|
|
||||||
|
// ErrWrongType is used when we try to parse anything other than the
|
||||||
|
// current type.
|
||||||
|
ErrWrongType = errors.New("wrong type, can not parse")
|
||||||
|
|
||||||
|
// ErrUnhandledUnknownTag is used when we try to parse a tag that's
|
||||||
|
// recorded as an "unknown" type but not a documented tag (therefore
|
||||||
|
// leaving us not knowning how to read it).
|
||||||
|
ErrUnhandledUnknownTypedTag = errors.New("not a standard unknown-typed tag")
|
||||||
|
)
|
||||||
|
|
||||||
|
type Rational struct {
|
||||||
|
Numerator uint32
|
||||||
|
Denominator uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type SignedRational struct {
|
||||||
|
Numerator int32
|
||||||
|
Denominator int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func TagTypeSize(tagType TagTypePrimitive) int {
|
||||||
|
|
||||||
|
// DEPRECATED(dustin): `(TagTypePrimitive).Size()` should be used, directly.
|
||||||
|
|
||||||
|
return tagType.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format returns a stringified value for the given bytes. Automatically
|
||||||
|
// calculates count based on type size.
|
||||||
|
func Format(rawBytes []byte, tagType TagTypePrimitive, justFirst bool, byteOrder binary.ByteOrder) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): !! Add tests
|
||||||
|
|
||||||
|
typeSize := tagType.Size()
|
||||||
|
|
||||||
|
if len(rawBytes)%typeSize != 0 {
|
||||||
|
log.Panicf("byte-count (%d) does not align for [%s] type with a size of (%d) bytes", len(rawBytes), TypeNames[tagType], typeSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unitCount is the calculated unit-count. This should equal the original
|
||||||
|
// value from the tag (pre-resolution).
|
||||||
|
unitCount := uint32(len(rawBytes) / typeSize)
|
||||||
|
|
||||||
|
// Truncate the items if it's not bytes or a string and we just want the first.
|
||||||
|
|
||||||
|
valueSuffix := ""
|
||||||
|
if justFirst == true && unitCount > 1 && tagType != TypeByte && tagType != TypeAscii && tagType != TypeAsciiNoNul {
|
||||||
|
unitCount = 1
|
||||||
|
valueSuffix = "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
if tagType == TypeByte {
|
||||||
|
items, err := parser.ParseBytes(rawBytes, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return DumpBytesToString(items), nil
|
||||||
|
} else if tagType == TypeAscii {
|
||||||
|
phrase, err := parser.ParseAscii(rawBytes, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return phrase, nil
|
||||||
|
} else if tagType == TypeAsciiNoNul {
|
||||||
|
phrase, err := parser.ParseAsciiNoNul(rawBytes, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return phrase, nil
|
||||||
|
} else if tagType == TypeShort {
|
||||||
|
items, err := parser.ParseShorts(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if len(items) > 0 {
|
||||||
|
if justFirst == true {
|
||||||
|
return fmt.Sprintf("%v%s", items[0], valueSuffix), nil
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%v", items), nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
} else if tagType == TypeLong {
|
||||||
|
items, err := parser.ParseLongs(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if len(items) > 0 {
|
||||||
|
if justFirst == true {
|
||||||
|
return fmt.Sprintf("%v%s", items[0], valueSuffix), nil
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%v", items), nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
} else if tagType == TypeRational {
|
||||||
|
items, err := parser.ParseRationals(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if len(items) > 0 {
|
||||||
|
parts := make([]string, len(items))
|
||||||
|
for i, r := range items {
|
||||||
|
parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator)
|
||||||
|
}
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%v", parts), nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
} else if tagType == TypeSignedLong {
|
||||||
|
items, err := parser.ParseSignedLongs(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if len(items) > 0 {
|
||||||
|
if justFirst == true {
|
||||||
|
return fmt.Sprintf("%v%s", items[0], valueSuffix), nil
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%v", items), nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
} else if tagType == TypeSignedRational {
|
||||||
|
items, err := parser.ParseSignedRationals(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
parts := make([]string, len(items))
|
||||||
|
for i, r := range items {
|
||||||
|
parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(items) > 0 {
|
||||||
|
if justFirst == true {
|
||||||
|
return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%v", parts), nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Affects only "unknown" values, in general.
|
||||||
|
log.Panicf("value of type [%s] can not be formatted into string", tagType.String())
|
||||||
|
|
||||||
|
// Never called.
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func EncodeStringToBytes(tagType TagTypePrimitive, valueString string) (value interface{}, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if tagType == TypeUndefined {
|
||||||
|
// TODO(dustin): Circle back to this.
|
||||||
|
log.Panicf("undefined-type values are not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
if tagType == TypeByte {
|
||||||
|
return []byte(valueString), nil
|
||||||
|
} else if tagType == TypeAscii || tagType == TypeAsciiNoNul {
|
||||||
|
// Whether or not we're putting an NUL on the end is only relevant for
|
||||||
|
// byte-level encoding. This function really just supports a user
|
||||||
|
// interface.
|
||||||
|
|
||||||
|
return valueString, nil
|
||||||
|
} else if tagType == TypeShort {
|
||||||
|
n, err := strconv.ParseUint(valueString, 10, 16)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return uint16(n), nil
|
||||||
|
} else if tagType == TypeLong {
|
||||||
|
n, err := strconv.ParseUint(valueString, 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return uint32(n), nil
|
||||||
|
} else if tagType == TypeRational {
|
||||||
|
parts := strings.SplitN(valueString, "/", 2)
|
||||||
|
|
||||||
|
numerator, err := strconv.ParseUint(parts[0], 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
denominator, err := strconv.ParseUint(parts[1], 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return Rational{
|
||||||
|
Numerator: uint32(numerator),
|
||||||
|
Denominator: uint32(denominator),
|
||||||
|
}, nil
|
||||||
|
} else if tagType == TypeSignedLong {
|
||||||
|
n, err := strconv.ParseInt(valueString, 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return int32(n), nil
|
||||||
|
} else if tagType == TypeSignedRational {
|
||||||
|
parts := strings.SplitN(valueString, "/", 2)
|
||||||
|
|
||||||
|
numerator, err := strconv.ParseInt(parts[0], 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
denominator, err := strconv.ParseInt(parts[1], 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return SignedRational{
|
||||||
|
Numerator: int32(numerator),
|
||||||
|
Denominator: int32(denominator),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Panicf("from-string encoding for type not supported; this shouldn't happen: [%s]", tagType.String())
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for typeId, typeName := range TypeNames {
|
||||||
|
TypeNamesR[typeName] = typeId
|
||||||
|
}
|
||||||
|
}
|
262
vendor/github.com/dsoprea/go-exif/type_encode.go
generated
vendored
Normal file
262
vendor/github.com/dsoprea/go-exif/type_encode.go
generated
vendored
Normal file
|
@ -0,0 +1,262 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeEncodeLogger = log.NewLogger("exif.type_encode")
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodedData encapsulates the compound output of an encoding operation.
|
||||||
|
type EncodedData struct {
|
||||||
|
Type TagTypePrimitive
|
||||||
|
Encoded []byte
|
||||||
|
|
||||||
|
// TODO(dustin): Is this really necessary? We might have this just to correlate to the incoming stream format (raw bytes and a unit-count both for incoming and outgoing).
|
||||||
|
UnitCount uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type ValueEncoder struct {
|
||||||
|
byteOrder binary.ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewValueEncoder(byteOrder binary.ByteOrder) *ValueEncoder {
|
||||||
|
return &ValueEncoder{
|
||||||
|
byteOrder: byteOrder,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeBytes(value []uint8) (ed EncodedData, err error) {
|
||||||
|
ed.Type = TypeByte
|
||||||
|
ed.Encoded = []byte(value)
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeAscii(value string) (ed EncodedData, err error) {
|
||||||
|
ed.Type = TypeAscii
|
||||||
|
|
||||||
|
ed.Encoded = []byte(value)
|
||||||
|
ed.Encoded = append(ed.Encoded, 0)
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(ed.Encoded))
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeAsciiNoNul returns a string encoded as a byte-string without a trailing
|
||||||
|
// NUL byte.
|
||||||
|
//
|
||||||
|
// Note that:
|
||||||
|
//
|
||||||
|
// 1. This type can not be automatically encoded using `Encode()`. The default
|
||||||
|
// mode is to encode *with* a trailing NUL byte using `encodeAscii`. Only
|
||||||
|
// certain undefined-type tags using an unterminated ASCII string and these
|
||||||
|
// are exceptional in nature.
|
||||||
|
//
|
||||||
|
// 2. The presence of this method allows us to completely test the complimentary
|
||||||
|
// no-nul parser.
|
||||||
|
//
|
||||||
|
func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) {
|
||||||
|
ed.Type = TypeAsciiNoNul
|
||||||
|
ed.Encoded = []byte(value)
|
||||||
|
ed.UnitCount = uint32(len(ed.Encoded))
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeShorts(value []uint16) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
ed.Encoded = make([]byte, ed.UnitCount*2)
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
ve.byteOrder.PutUint16(ed.Encoded[i*2:(i+1)*2], value[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeShort
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeLongs(value []uint32) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
ed.Encoded = make([]byte, ed.UnitCount*4)
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], value[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeLong
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeRationals(value []Rational) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
ed.Encoded = make([]byte, ed.UnitCount*8)
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
ve.byteOrder.PutUint32(ed.Encoded[i*8+0:i*8+4], value[i].Numerator)
|
||||||
|
ve.byteOrder.PutUint32(ed.Encoded[i*8+4:i*8+8], value[i].Denominator)
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeRational
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeSignedLongs(value []int32) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount))
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
err := binary.Write(b, ve.byteOrder, value[i])
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeSignedLong
|
||||||
|
ed.Encoded = b.Bytes()
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeSignedRationals(value []SignedRational) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount))
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
err := binary.Write(b, ve.byteOrder, value[i].Numerator)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = binary.Write(b, ve.byteOrder, value[i].Denominator)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeSignedRational
|
||||||
|
ed.Encoded = b.Bytes()
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode returns bytes for the given value, infering type from the actual
|
||||||
|
// value. This does not support `TypeAsciiNoNull` (all strings are encoded as
|
||||||
|
// `TypeAscii`).
|
||||||
|
func (ve *ValueEncoder) Encode(value interface{}) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): This is redundant with EncodeWithType. Refactor one to use the other.
|
||||||
|
|
||||||
|
switch value.(type) {
|
||||||
|
case []byte:
|
||||||
|
ed, err = ve.encodeBytes(value.([]byte))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case string:
|
||||||
|
ed, err = ve.encodeAscii(value.(string))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []uint16:
|
||||||
|
ed, err = ve.encodeShorts(value.([]uint16))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []uint32:
|
||||||
|
ed, err = ve.encodeLongs(value.([]uint32))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []Rational:
|
||||||
|
ed, err = ve.encodeRationals(value.([]Rational))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []int32:
|
||||||
|
ed, err = ve.encodeSignedLongs(value.([]int32))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []SignedRational:
|
||||||
|
ed, err = ve.encodeSignedRationals(value.([]SignedRational))
|
||||||
|
log.PanicIf(err)
|
||||||
|
default:
|
||||||
|
log.Panicf("value not encodable: [%s] [%v]", reflect.TypeOf(value), value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeWithType returns bytes for the given value, using the given `TagType`
|
||||||
|
// value to determine how to encode. This supports `TypeAsciiNoNul`.
|
||||||
|
func (ve *ValueEncoder) EncodeWithType(tt TagType, value interface{}) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): This is redundant with Encode. Refactor one to use the other.
|
||||||
|
|
||||||
|
switch tt.Type() {
|
||||||
|
case TypeByte:
|
||||||
|
ed, err = ve.encodeBytes(value.([]byte))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeAscii:
|
||||||
|
ed, err = ve.encodeAscii(value.(string))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeAsciiNoNul:
|
||||||
|
ed, err = ve.encodeAsciiNoNul(value.(string))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeShort:
|
||||||
|
ed, err = ve.encodeShorts(value.([]uint16))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeLong:
|
||||||
|
ed, err = ve.encodeLongs(value.([]uint32))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeRational:
|
||||||
|
ed, err = ve.encodeRationals(value.([]Rational))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeSignedLong:
|
||||||
|
ed, err = ve.encodeSignedLongs(value.([]int32))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeSignedRational:
|
||||||
|
ed, err = ve.encodeSignedRationals(value.([]SignedRational))
|
||||||
|
log.PanicIf(err)
|
||||||
|
default:
|
||||||
|
log.Panicf("value not encodable (with type): %v [%v]", tt, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
222
vendor/github.com/dsoprea/go-exif/utility.go
generated
vendored
Normal file
222
vendor/github.com/dsoprea/go-exif/utility.go
generated
vendored
Normal file
|
@ -0,0 +1,222 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DumpBytes(data []byte) {
|
||||||
|
fmt.Printf("DUMP: ")
|
||||||
|
for _, x := range data {
|
||||||
|
fmt.Printf("%02x ", x)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func DumpBytesClause(data []byte) {
|
||||||
|
fmt.Printf("DUMP: ")
|
||||||
|
|
||||||
|
fmt.Printf("[]byte { ")
|
||||||
|
|
||||||
|
for i, x := range data {
|
||||||
|
fmt.Printf("0x%02x", x)
|
||||||
|
|
||||||
|
if i < len(data)-1 {
|
||||||
|
fmt.Printf(", ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" }\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func DumpBytesToString(data []byte) string {
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
for i, x := range data {
|
||||||
|
_, err := b.WriteString(fmt.Sprintf("%02x", x))
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if i < len(data)-1 {
|
||||||
|
_, err := b.WriteRune(' ')
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DumpBytesClauseToString(data []byte) string {
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
for i, x := range data {
|
||||||
|
_, err := b.WriteString(fmt.Sprintf("0x%02x", x))
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if i < len(data)-1 {
|
||||||
|
_, err := b.WriteString(", ")
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseExifFullTimestamp parses dates like "2018:11:30 13:01:49" into a UTC
|
||||||
|
// `time.Time` struct.
|
||||||
|
func ParseExifFullTimestamp(fullTimestampPhrase string) (timestamp time.Time, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
parts := strings.Split(fullTimestampPhrase, " ")
|
||||||
|
datestampValue, timestampValue := parts[0], parts[1]
|
||||||
|
|
||||||
|
dateParts := strings.Split(datestampValue, ":")
|
||||||
|
|
||||||
|
year, err := strconv.ParseUint(dateParts[0], 10, 16)
|
||||||
|
if err != nil {
|
||||||
|
log.Panicf("could not parse year")
|
||||||
|
}
|
||||||
|
|
||||||
|
month, err := strconv.ParseUint(dateParts[1], 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
log.Panicf("could not parse month")
|
||||||
|
}
|
||||||
|
|
||||||
|
day, err := strconv.ParseUint(dateParts[2], 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
log.Panicf("could not parse day")
|
||||||
|
}
|
||||||
|
|
||||||
|
timeParts := strings.Split(timestampValue, ":")
|
||||||
|
|
||||||
|
hour, err := strconv.ParseUint(timeParts[0], 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
log.Panicf("could not parse hour")
|
||||||
|
}
|
||||||
|
|
||||||
|
minute, err := strconv.ParseUint(timeParts[1], 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
log.Panicf("could not parse minute")
|
||||||
|
}
|
||||||
|
|
||||||
|
second, err := strconv.ParseUint(timeParts[2], 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
log.Panicf("could not parse second")
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp = time.Date(int(year), time.Month(month), int(day), int(hour), int(minute), int(second), 0, time.UTC)
|
||||||
|
return timestamp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExifFullTimestampString produces a string like "2018:11:30 13:01:49" from a
|
||||||
|
// `time.Time` struct. It will attempt to convert to UTC first.
|
||||||
|
func ExifFullTimestampString(t time.Time) (fullTimestampPhrase string) {
|
||||||
|
t = t.UTC()
|
||||||
|
|
||||||
|
return fmt.Sprintf("%04d:%02d:%02d %02d:%02d:%02d", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExifTag is one simple representation of a tag in a flat list of all of them.
|
||||||
|
type ExifTag struct {
|
||||||
|
IfdPath string `json:"ifd_path"`
|
||||||
|
|
||||||
|
TagId uint16 `json:"id"`
|
||||||
|
TagName string `json:"name"`
|
||||||
|
|
||||||
|
TagTypeId TagTypePrimitive `json:"type_id"`
|
||||||
|
TagTypeName string `json:"type_name"`
|
||||||
|
Value interface{} `json:"value"`
|
||||||
|
ValueBytes []byte `json:"value_bytes"`
|
||||||
|
|
||||||
|
ChildIfdPath string `json:"child_ifd_path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation.
|
||||||
|
func (et ExifTag) String() string {
|
||||||
|
return fmt.Sprintf("ExifTag<IFD-PATH=[%s] TAG-ID=(0x%02x) TAG-NAME=[%s] TAG-TYPE=[%s] VALUE=[%v] VALUE-BYTES=(%d) CHILD-IFD-PATH=[%s]", et.IfdPath, et.TagId, et.TagName, et.TagTypeName, et.Value, len(et.ValueBytes), et.ChildIfdPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFlatExifData returns a simple, flat representation of all tags.
|
||||||
|
func GetFlatExifData(exifData []byte) (exifTags []ExifTag, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
im := NewIfdMappingWithStandard()
|
||||||
|
ti := NewTagIndex()
|
||||||
|
|
||||||
|
_, index, err := Collect(im, ti, exifData)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
q := []*Ifd{index.RootIfd}
|
||||||
|
|
||||||
|
exifTags = make([]ExifTag, 0)
|
||||||
|
|
||||||
|
for len(q) > 0 {
|
||||||
|
var ifd *Ifd
|
||||||
|
ifd, q = q[0], q[1:]
|
||||||
|
|
||||||
|
ti := NewTagIndex()
|
||||||
|
for _, ite := range ifd.Entries {
|
||||||
|
tagName := ""
|
||||||
|
|
||||||
|
it, err := ti.Get(ifd.IfdPath, ite.TagId)
|
||||||
|
if err != nil {
|
||||||
|
// If it's a non-standard tag, just leave the name blank.
|
||||||
|
if log.Is(err, ErrTagNotFound) != true {
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tagName = it.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := ifd.TagValue(ite)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrUnhandledUnknownTypedTag {
|
||||||
|
value = UnparseableUnknownTagValuePlaceholder
|
||||||
|
} else {
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
valueBytes, err := ifd.TagValueBytes(ite)
|
||||||
|
if err != nil && err != ErrUnhandledUnknownTypedTag {
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
et := ExifTag{
|
||||||
|
IfdPath: ifd.IfdPath,
|
||||||
|
TagId: ite.TagId,
|
||||||
|
TagName: tagName,
|
||||||
|
TagTypeId: ite.TagType,
|
||||||
|
TagTypeName: TypeNames[ite.TagType],
|
||||||
|
Value: value,
|
||||||
|
ValueBytes: valueBytes,
|
||||||
|
ChildIfdPath: ite.ChildIfdPath,
|
||||||
|
}
|
||||||
|
|
||||||
|
exifTags = append(exifTags, et)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, childIfd := range ifd.Children {
|
||||||
|
q = append(q, childIfd)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ifd.NextIfd != nil {
|
||||||
|
q = append(q, ifd.NextIfd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return exifTags, nil
|
||||||
|
}
|
0
vendor/github.com/dsoprea/go-exif/v2/.MODULE_ROOT
generated
vendored
Normal file
0
vendor/github.com/dsoprea/go-exif/v2/.MODULE_ROOT
generated
vendored
Normal file
9
vendor/github.com/dsoprea/go-exif/v2/LICENSE
generated
vendored
Normal file
9
vendor/github.com/dsoprea/go-exif/v2/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
MIT LICENSE
|
||||||
|
|
||||||
|
Copyright 2019 Dustin Oprea
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
659
vendor/github.com/dsoprea/go-exif/v2/common/ifd.go
generated
vendored
Normal file
659
vendor/github.com/dsoprea/go-exif/v2/common/ifd.go
generated
vendored
Normal file
|
@ -0,0 +1,659 @@
|
||||||
|
package exifcommon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ifdLogger = log.NewLogger("exifcommon.ifd")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrChildIfdNotMapped = errors.New("no child-IFD for that tag-ID under parent")
|
||||||
|
)
|
||||||
|
|
||||||
|
// MappedIfd is one node in the IFD-mapping.
|
||||||
|
type MappedIfd struct {
|
||||||
|
ParentTagId uint16
|
||||||
|
Placement []uint16
|
||||||
|
Path []string
|
||||||
|
|
||||||
|
Name string
|
||||||
|
TagId uint16
|
||||||
|
Children map[uint16]*MappedIfd
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a descriptive string.
|
||||||
|
func (mi *MappedIfd) String() string {
|
||||||
|
pathPhrase := mi.PathPhrase()
|
||||||
|
return fmt.Sprintf("MappedIfd<(0x%04X) [%s] PATH=[%s]>", mi.TagId, mi.Name, pathPhrase)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PathPhrase returns a non-fully-qualified IFD path.
|
||||||
|
func (mi *MappedIfd) PathPhrase() string {
|
||||||
|
return strings.Join(mi.Path, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): Refactor this to use IfdIdentity structs.
|
||||||
|
|
||||||
|
// IfdMapping describes all of the IFDs that we currently recognize.
|
||||||
|
type IfdMapping struct {
|
||||||
|
rootNode *MappedIfd
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIfdMapping returns a new IfdMapping struct.
|
||||||
|
func NewIfdMapping() (ifdMapping *IfdMapping) {
|
||||||
|
rootNode := &MappedIfd{
|
||||||
|
Path: make([]string, 0),
|
||||||
|
Children: make(map[uint16]*MappedIfd),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &IfdMapping{
|
||||||
|
rootNode: rootNode,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIfdMappingWithStandard retruns a new IfdMapping struct preloaded with the
|
||||||
|
// standard IFDs.
|
||||||
|
func NewIfdMappingWithStandard() (ifdMapping *IfdMapping) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err := log.Wrap(state.(error))
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
im := NewIfdMapping()
|
||||||
|
|
||||||
|
err := LoadStandardIfds(im)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return im
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the node given the path slice.
|
||||||
|
func (im *IfdMapping) Get(parentPlacement []uint16) (childIfd *MappedIfd, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ptr := im.rootNode
|
||||||
|
for _, tagId := range parentPlacement {
|
||||||
|
if descendantPtr, found := ptr.Children[tagId]; found == false {
|
||||||
|
log.Panicf("ifd child with tag-ID (%04x) not registered: [%s]", tagId, ptr.PathPhrase())
|
||||||
|
} else {
|
||||||
|
ptr = descendantPtr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ptr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetWithPath returns the node given the path string.
|
||||||
|
func (im *IfdMapping) GetWithPath(pathPhrase string) (mi *MappedIfd, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if pathPhrase == "" {
|
||||||
|
log.Panicf("path-phrase is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
path := strings.Split(pathPhrase, "/")
|
||||||
|
ptr := im.rootNode
|
||||||
|
|
||||||
|
for _, name := range path {
|
||||||
|
var hit *MappedIfd
|
||||||
|
for _, mi := range ptr.Children {
|
||||||
|
if mi.Name == name {
|
||||||
|
hit = mi
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hit == nil {
|
||||||
|
log.Panicf("ifd child with name [%s] not registered: [%s]", name, ptr.PathPhrase())
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr = hit
|
||||||
|
}
|
||||||
|
|
||||||
|
return ptr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChild is a convenience function to get the child path for a given parent
|
||||||
|
// placement and child tag-ID.
|
||||||
|
func (im *IfdMapping) GetChild(parentPathPhrase string, tagId uint16) (mi *MappedIfd, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
mi, err = im.GetWithPath(parentPathPhrase)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
for _, childMi := range mi.Children {
|
||||||
|
if childMi.TagId == tagId {
|
||||||
|
return childMi, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Whether or not an IFD is defined in data, such an IFD is not registered
|
||||||
|
// and would be unknown.
|
||||||
|
log.Panic(ErrChildIfdNotMapped)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfdTagIdAndIndex represents a specific part of the IFD path.
|
||||||
|
//
|
||||||
|
// This is a legacy type.
|
||||||
|
type IfdTagIdAndIndex struct {
|
||||||
|
Name string
|
||||||
|
TagId uint16
|
||||||
|
Index int
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a descriptive string.
|
||||||
|
func (itii IfdTagIdAndIndex) String() string {
|
||||||
|
return fmt.Sprintf("IfdTagIdAndIndex<NAME=[%s] ID=(%04x) INDEX=(%d)>", itii.Name, itii.TagId, itii.Index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolvePath takes a list of names, which can also be suffixed with indices
|
||||||
|
// (to identify the second, third, etc.. sibling IFD) and returns a list of
|
||||||
|
// tag-IDs and those indices.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// - IFD/Exif/Iop
|
||||||
|
// - IFD0/Exif/Iop
|
||||||
|
//
|
||||||
|
// This is the only call that supports adding the numeric indices.
|
||||||
|
func (im *IfdMapping) ResolvePath(pathPhrase string) (lineage []IfdTagIdAndIndex, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
pathPhrase = strings.TrimSpace(pathPhrase)
|
||||||
|
|
||||||
|
if pathPhrase == "" {
|
||||||
|
log.Panicf("can not resolve empty path-phrase")
|
||||||
|
}
|
||||||
|
|
||||||
|
path := strings.Split(pathPhrase, "/")
|
||||||
|
lineage = make([]IfdTagIdAndIndex, len(path))
|
||||||
|
|
||||||
|
ptr := im.rootNode
|
||||||
|
empty := IfdTagIdAndIndex{}
|
||||||
|
for i, name := range path {
|
||||||
|
indexByte := name[len(name)-1]
|
||||||
|
index := 0
|
||||||
|
if indexByte >= '0' && indexByte <= '9' {
|
||||||
|
index = int(indexByte - '0')
|
||||||
|
name = name[:len(name)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
itii := IfdTagIdAndIndex{}
|
||||||
|
for _, mi := range ptr.Children {
|
||||||
|
if mi.Name != name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
itii.Name = name
|
||||||
|
itii.TagId = mi.TagId
|
||||||
|
itii.Index = index
|
||||||
|
|
||||||
|
ptr = mi
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if itii == empty {
|
||||||
|
log.Panicf("ifd child with name [%s] not registered: [%s]", name, pathPhrase)
|
||||||
|
}
|
||||||
|
|
||||||
|
lineage[i] = itii
|
||||||
|
}
|
||||||
|
|
||||||
|
return lineage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FqPathPhraseFromLineage returns the fully-qualified IFD path from the slice.
|
||||||
|
func (im *IfdMapping) FqPathPhraseFromLineage(lineage []IfdTagIdAndIndex) (fqPathPhrase string) {
|
||||||
|
fqPathParts := make([]string, len(lineage))
|
||||||
|
for i, itii := range lineage {
|
||||||
|
if itii.Index > 0 {
|
||||||
|
fqPathParts[i] = fmt.Sprintf("%s%d", itii.Name, itii.Index)
|
||||||
|
} else {
|
||||||
|
fqPathParts[i] = itii.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(fqPathParts, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PathPhraseFromLineage returns the non-fully-qualified IFD path from the
|
||||||
|
// slice.
|
||||||
|
func (im *IfdMapping) PathPhraseFromLineage(lineage []IfdTagIdAndIndex) (pathPhrase string) {
|
||||||
|
pathParts := make([]string, len(lineage))
|
||||||
|
for i, itii := range lineage {
|
||||||
|
pathParts[i] = itii.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(pathParts, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// StripPathPhraseIndices returns a non-fully-qualified path-phrase (no
|
||||||
|
// indices).
|
||||||
|
func (im *IfdMapping) StripPathPhraseIndices(pathPhrase string) (strippedPathPhrase string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
lineage, err := im.ResolvePath(pathPhrase)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
strippedPathPhrase = im.PathPhraseFromLineage(lineage)
|
||||||
|
return strippedPathPhrase, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add puts the given IFD at the given position of the tree. The position of the
|
||||||
|
// tree is referred to as the placement and is represented by a set of tag-IDs,
|
||||||
|
// where the leftmost is the root tag and the tags going to the right are
|
||||||
|
// progressive descendants.
|
||||||
|
func (im *IfdMapping) Add(parentPlacement []uint16, tagId uint16, name string) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): !! It would be nicer to provide a list of names in the placement rather than tag-IDs.
|
||||||
|
|
||||||
|
ptr, err := im.Get(parentPlacement)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
path := make([]string, len(parentPlacement)+1)
|
||||||
|
if len(parentPlacement) > 0 {
|
||||||
|
copy(path, ptr.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
path[len(path)-1] = name
|
||||||
|
|
||||||
|
placement := make([]uint16, len(parentPlacement)+1)
|
||||||
|
if len(placement) > 0 {
|
||||||
|
copy(placement, ptr.Placement)
|
||||||
|
}
|
||||||
|
|
||||||
|
placement[len(placement)-1] = tagId
|
||||||
|
|
||||||
|
childIfd := &MappedIfd{
|
||||||
|
ParentTagId: ptr.TagId,
|
||||||
|
Path: path,
|
||||||
|
Placement: placement,
|
||||||
|
Name: name,
|
||||||
|
TagId: tagId,
|
||||||
|
Children: make(map[uint16]*MappedIfd),
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, found := ptr.Children[tagId]; found == true {
|
||||||
|
log.Panicf("child IFD with tag-ID (%04x) already registered under IFD [%s] with tag-ID (%04x)", tagId, ptr.Name, ptr.TagId)
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr.Children[tagId] = childIfd
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *IfdMapping) dumpLineages(stack []*MappedIfd, input []string) (output []string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
currentIfd := stack[len(stack)-1]
|
||||||
|
|
||||||
|
output = input
|
||||||
|
for _, childIfd := range currentIfd.Children {
|
||||||
|
stackCopy := make([]*MappedIfd, len(stack)+1)
|
||||||
|
|
||||||
|
copy(stackCopy, stack)
|
||||||
|
stackCopy[len(stack)] = childIfd
|
||||||
|
|
||||||
|
// Add to output, but don't include the obligatory root node.
|
||||||
|
parts := make([]string, len(stackCopy)-1)
|
||||||
|
for i, mi := range stackCopy[1:] {
|
||||||
|
parts[i] = mi.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
output = append(output, strings.Join(parts, "/"))
|
||||||
|
|
||||||
|
output, err = im.dumpLineages(stackCopy, output)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return output, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DumpLineages returns a slice of strings representing all mappings.
|
||||||
|
func (im *IfdMapping) DumpLineages() (output []string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
stack := []*MappedIfd{im.rootNode}
|
||||||
|
output = make([]string, 0)
|
||||||
|
|
||||||
|
output, err = im.dumpLineages(stack, output)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return output, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadStandardIfds loads the standard IFDs into the mapping.
|
||||||
|
func LoadStandardIfds(im *IfdMapping) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = im.Add(
|
||||||
|
[]uint16{},
|
||||||
|
IfdStandardIfdIdentity.TagId(), IfdStandardIfdIdentity.Name())
|
||||||
|
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = im.Add(
|
||||||
|
[]uint16{IfdStandardIfdIdentity.TagId()},
|
||||||
|
IfdExifStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.Name())
|
||||||
|
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = im.Add(
|
||||||
|
[]uint16{IfdStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.TagId()},
|
||||||
|
IfdExifIopStandardIfdIdentity.TagId(), IfdExifIopStandardIfdIdentity.Name())
|
||||||
|
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = im.Add(
|
||||||
|
[]uint16{IfdStandardIfdIdentity.TagId()},
|
||||||
|
IfdGpsInfoStandardIfdIdentity.TagId(), IfdGpsInfoStandardIfdIdentity.Name())
|
||||||
|
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfdTag describes a single IFD tag and its parent (if any).
|
||||||
|
type IfdTag struct {
|
||||||
|
parentIfdTag *IfdTag
|
||||||
|
tagId uint16
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIfdTag(parentIfdTag *IfdTag, tagId uint16, name string) IfdTag {
|
||||||
|
return IfdTag{
|
||||||
|
parentIfdTag: parentIfdTag,
|
||||||
|
tagId: tagId,
|
||||||
|
name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParentIfd returns the IfdTag of this IFD's parent.
|
||||||
|
func (it IfdTag) ParentIfd() *IfdTag {
|
||||||
|
return it.parentIfdTag
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagId returns the tag-ID of this IFD.
|
||||||
|
func (it IfdTag) TagId() uint16 {
|
||||||
|
return it.tagId
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the simple name of this IFD.
|
||||||
|
func (it IfdTag) Name() string {
|
||||||
|
return it.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a descriptive string.
|
||||||
|
func (it IfdTag) String() string {
|
||||||
|
parentIfdPhrase := ""
|
||||||
|
if it.parentIfdTag != nil {
|
||||||
|
parentIfdPhrase = fmt.Sprintf(" PARENT=(0x%04x)[%s]", it.parentIfdTag.tagId, it.parentIfdTag.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("IfdTag<TAG-ID=(0x%04x) NAME=[%s]%s>", it.tagId, it.name, parentIfdPhrase)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// rootStandardIfd is the standard root IFD.
|
||||||
|
rootStandardIfd = NewIfdTag(nil, 0x0000, "IFD") // IFD
|
||||||
|
|
||||||
|
// exifStandardIfd is the standard "Exif" IFD.
|
||||||
|
exifStandardIfd = NewIfdTag(&rootStandardIfd, 0x8769, "Exif") // IFD/Exif
|
||||||
|
|
||||||
|
// iopStandardIfd is the standard "Iop" IFD.
|
||||||
|
iopStandardIfd = NewIfdTag(&exifStandardIfd, 0xA005, "Iop") // IFD/Exif/Iop
|
||||||
|
|
||||||
|
// gpsInfoStandardIfd is the standard "GPS" IFD.
|
||||||
|
gpsInfoStandardIfd = NewIfdTag(&rootStandardIfd, 0x8825, "GPSInfo") // IFD/GPSInfo
|
||||||
|
)
|
||||||
|
|
||||||
|
// IfdIdentityPart represents one component in an IFD path.
|
||||||
|
type IfdIdentityPart struct {
|
||||||
|
Name string
|
||||||
|
Index int
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a fully-qualified IFD path.
|
||||||
|
func (iip IfdIdentityPart) String() string {
|
||||||
|
if iip.Index > 0 {
|
||||||
|
return fmt.Sprintf("%s%d", iip.Name, iip.Index)
|
||||||
|
} else {
|
||||||
|
return iip.Name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnindexedString returned a non-fully-qualified IFD path.
|
||||||
|
func (iip IfdIdentityPart) UnindexedString() string {
|
||||||
|
return iip.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfdIdentity represents a single IFD path and provides access to various
|
||||||
|
// information and representations.
|
||||||
|
//
|
||||||
|
// Only global instances can be used for equality checks.
|
||||||
|
type IfdIdentity struct {
|
||||||
|
ifdTag IfdTag
|
||||||
|
parts []IfdIdentityPart
|
||||||
|
ifdPath string
|
||||||
|
fqIfdPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIfdIdentity returns a new IfdIdentity struct.
|
||||||
|
func NewIfdIdentity(ifdTag IfdTag, parts ...IfdIdentityPart) (ii *IfdIdentity) {
|
||||||
|
ii = &IfdIdentity{
|
||||||
|
ifdTag: ifdTag,
|
||||||
|
parts: parts,
|
||||||
|
}
|
||||||
|
|
||||||
|
ii.ifdPath = ii.getIfdPath()
|
||||||
|
ii.fqIfdPath = ii.getFqIfdPath()
|
||||||
|
|
||||||
|
return ii
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIfdIdentityFromString parses a string like "IFD/Exif" or "IFD1" or
|
||||||
|
// something more exotic with custom IFDs ("SomeIFD4/SomeChildIFD6"). Note that
|
||||||
|
// this will valid the unindexed IFD structure (because the standard tags from
|
||||||
|
// the specification are unindexed), but not, obviously, any indices (e.g.
|
||||||
|
// the numbers in "IFD0", "IFD1", "SomeIFD4/SomeChildIFD6"). It is
|
||||||
|
// required for the caller to check whether these specific instances
|
||||||
|
// were actually parsed out of the stream.
|
||||||
|
func NewIfdIdentityFromString(im *IfdMapping, fqIfdPath string) (ii *IfdIdentity, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
lineage, err := im.ResolvePath(fqIfdPath)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
var lastIt *IfdTag
|
||||||
|
identityParts := make([]IfdIdentityPart, len(lineage))
|
||||||
|
for i, itii := range lineage {
|
||||||
|
// Build out the tag that will eventually point to the IFD represented
|
||||||
|
// by the right-most part in the IFD path.
|
||||||
|
|
||||||
|
it := &IfdTag{
|
||||||
|
parentIfdTag: lastIt,
|
||||||
|
tagId: itii.TagId,
|
||||||
|
name: itii.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
lastIt = it
|
||||||
|
|
||||||
|
// Create the next IfdIdentity part.
|
||||||
|
|
||||||
|
iip := IfdIdentityPart{
|
||||||
|
Name: itii.Name,
|
||||||
|
Index: itii.Index,
|
||||||
|
}
|
||||||
|
|
||||||
|
identityParts[i] = iip
|
||||||
|
}
|
||||||
|
|
||||||
|
ii = NewIfdIdentity(*lastIt, identityParts...)
|
||||||
|
return ii, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ii *IfdIdentity) getFqIfdPath() string {
|
||||||
|
partPhrases := make([]string, len(ii.parts))
|
||||||
|
for i, iip := range ii.parts {
|
||||||
|
partPhrases[i] = iip.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(partPhrases, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ii *IfdIdentity) getIfdPath() string {
|
||||||
|
partPhrases := make([]string, len(ii.parts))
|
||||||
|
for i, iip := range ii.parts {
|
||||||
|
partPhrases[i] = iip.UnindexedString()
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(partPhrases, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a fully-qualified IFD path.
|
||||||
|
func (ii *IfdIdentity) String() string {
|
||||||
|
return ii.fqIfdPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnindexedString returns a non-fully-qualified IFD path.
|
||||||
|
func (ii *IfdIdentity) UnindexedString() string {
|
||||||
|
return ii.ifdPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfdTag returns the tag struct behind this IFD.
|
||||||
|
func (ii *IfdIdentity) IfdTag() IfdTag {
|
||||||
|
return ii.ifdTag
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagId returns the tag-ID of the IFD.
|
||||||
|
func (ii *IfdIdentity) TagId() uint16 {
|
||||||
|
return ii.ifdTag.TagId()
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeafPathPart returns the last right-most path-part, which represents the
|
||||||
|
// current IFD.
|
||||||
|
func (ii *IfdIdentity) LeafPathPart() IfdIdentityPart {
|
||||||
|
return ii.parts[len(ii.parts)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the simple name of this IFD.
|
||||||
|
func (ii *IfdIdentity) Name() string {
|
||||||
|
return ii.LeafPathPart().Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index returns the index of this IFD (more then one IFD under a parent IFD
|
||||||
|
// will be numbered [0..n]).
|
||||||
|
func (ii *IfdIdentity) Index() int {
|
||||||
|
return ii.LeafPathPart().Index
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals returns true if the two IfdIdentity instances are effectively
|
||||||
|
// identical.
|
||||||
|
//
|
||||||
|
// Since there's no way to get a specific fully-qualified IFD path without a
|
||||||
|
// certain slice of parts and all other fields are also derived from this,
|
||||||
|
// checking that the fully-qualified IFD path is equals is sufficient.
|
||||||
|
func (ii *IfdIdentity) Equals(ii2 *IfdIdentity) bool {
|
||||||
|
return ii.String() == ii2.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChild creates an IfdIdentity for an IFD that is a child of the current
|
||||||
|
// IFD.
|
||||||
|
func (ii *IfdIdentity) NewChild(childIfdTag IfdTag, index int) (iiChild *IfdIdentity) {
|
||||||
|
if *childIfdTag.parentIfdTag != ii.ifdTag {
|
||||||
|
log.Panicf("can not add child; we are not the parent:\nUS=%v\nCHILD=%v", ii.ifdTag, childIfdTag)
|
||||||
|
}
|
||||||
|
|
||||||
|
childPart := IfdIdentityPart{childIfdTag.name, index}
|
||||||
|
childParts := append(ii.parts, childPart)
|
||||||
|
|
||||||
|
iiChild = NewIfdIdentity(childIfdTag, childParts...)
|
||||||
|
return iiChild
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSibling creates an IfdIdentity for an IFD that is a sibling to the current
|
||||||
|
// one.
|
||||||
|
func (ii *IfdIdentity) NewSibling(index int) (iiSibling *IfdIdentity) {
|
||||||
|
parts := make([]IfdIdentityPart, len(ii.parts))
|
||||||
|
|
||||||
|
copy(parts, ii.parts)
|
||||||
|
parts[len(parts)-1].Index = index
|
||||||
|
|
||||||
|
iiSibling = NewIfdIdentity(ii.ifdTag, parts...)
|
||||||
|
return iiSibling
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// IfdStandardIfdIdentity represents the IFD path for IFD0.
|
||||||
|
IfdStandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 0})
|
||||||
|
|
||||||
|
// IfdExifStandardIfdIdentity represents the IFD path for IFD0/Exif0.
|
||||||
|
IfdExifStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(exifStandardIfd, 0)
|
||||||
|
|
||||||
|
// IfdExifIopStandardIfdIdentity represents the IFD path for IFD0/Exif0/Iop0.
|
||||||
|
IfdExifIopStandardIfdIdentity = IfdExifStandardIfdIdentity.NewChild(iopStandardIfd, 0)
|
||||||
|
|
||||||
|
// IfdGPSInfoStandardIfdIdentity represents the IFD path for IFD0/GPSInfo0.
|
||||||
|
IfdGpsInfoStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(gpsInfoStandardIfd, 0)
|
||||||
|
|
||||||
|
// Ifd1StandardIfdIdentity represents the IFD path for IFD1.
|
||||||
|
Ifd1StandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 1})
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
IfdPathStandard = IfdStandardIfdIdentity
|
||||||
|
IfdPathStandardExif = IfdExifStandardIfdIdentity
|
||||||
|
IfdPathStandardExifIop = IfdExifIopStandardIfdIdentity
|
||||||
|
IfdPathStandardGps = IfdGpsInfoStandardIfdIdentity
|
||||||
|
)
|
219
vendor/github.com/dsoprea/go-exif/v2/common/parser.go
generated
vendored
Normal file
219
vendor/github.com/dsoprea/go-exif/v2/common/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
||||||
|
package exifcommon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
parserLogger = log.NewLogger("exifcommon.parser")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parser knows how to parse all well-defined, encoded EXIF types.
|
||||||
|
type Parser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBytesknows how to parse a byte-type value.
|
||||||
|
func (p *Parser) ParseBytes(data []byte, unitCount uint32) (value []uint8, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeByte.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = []uint8(data[:count])
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAscii returns a string and auto-strips the trailing NUL character that
|
||||||
|
// should be at the end of the encoding.
|
||||||
|
func (p *Parser) ParseAscii(data []byte, unitCount uint32) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeAscii.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data) == 0 || data[count-1] != 0 {
|
||||||
|
s := string(data[:count])
|
||||||
|
parserLogger.Warningf(nil, "ascii not terminated with nul as expected: [%v]", s)
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto-strip the NUL from the end. It serves no purpose outside of
|
||||||
|
// encoding semantics.
|
||||||
|
|
||||||
|
return string(data[:count-1]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAsciiNoNul returns a string without any consideration for a trailing NUL
|
||||||
|
// character.
|
||||||
|
func (p *Parser) ParseAsciiNoNul(data []byte, unitCount uint32) (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeAscii.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(data[:count]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseShorts knows how to parse an encoded list of shorts.
|
||||||
|
func (p *Parser) ParseShorts(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint16, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeShort.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = make([]uint16, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
value[i] = byteOrder.Uint16(data[i*2:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseLongs knows how to encode an encoded list of unsigned longs.
|
||||||
|
func (p *Parser) ParseLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeLong.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = make([]uint32, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
value[i] = byteOrder.Uint32(data[i*4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseRationals knows how to parse an encoded list of unsigned rationals.
|
||||||
|
func (p *Parser) ParseRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []Rational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeRational.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = make([]Rational, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
value[i].Numerator = byteOrder.Uint32(data[i*8:])
|
||||||
|
value[i].Denominator = byteOrder.Uint32(data[i*8+4:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSignedLongs knows how to parse an encoded list of signed longs.
|
||||||
|
func (p *Parser) ParseSignedLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []int32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeSignedLong.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(data)
|
||||||
|
|
||||||
|
value = make([]int32, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
err := binary.Read(b, byteOrder, &value[i])
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSignedRationals knows how to parse an encoded list of signed
|
||||||
|
// rationals.
|
||||||
|
func (p *Parser) ParseSignedRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []SignedRational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
count := int(unitCount)
|
||||||
|
|
||||||
|
if len(data) < (TypeSignedRational.Size() * count) {
|
||||||
|
log.Panic(ErrNotEnoughData)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(data)
|
||||||
|
|
||||||
|
value = make([]SignedRational, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
err = binary.Read(b, byteOrder, &value[i].Numerator)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = binary.Read(b, byteOrder, &value[i].Denominator)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
88
vendor/github.com/dsoprea/go-exif/v2/common/testing_common.go
generated
vendored
Normal file
88
vendor/github.com/dsoprea/go-exif/v2/common/testing_common.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package exifcommon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
moduleRootPath = ""
|
||||||
|
|
||||||
|
testExifData []byte = nil
|
||||||
|
|
||||||
|
// EncodeDefaultByteOrder is the default byte-order for encoding operations.
|
||||||
|
EncodeDefaultByteOrder = binary.BigEndian
|
||||||
|
|
||||||
|
// Default byte order for tests.
|
||||||
|
TestDefaultByteOrder = binary.BigEndian
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetModuleRootPath() string {
|
||||||
|
if moduleRootPath == "" {
|
||||||
|
moduleRootPath = os.Getenv("EXIF_MODULE_ROOT_PATH")
|
||||||
|
if moduleRootPath != "" {
|
||||||
|
return moduleRootPath
|
||||||
|
}
|
||||||
|
|
||||||
|
currentWd, err := os.Getwd()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
currentPath := currentWd
|
||||||
|
|
||||||
|
visited := make([]string, 0)
|
||||||
|
|
||||||
|
for {
|
||||||
|
tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT")
|
||||||
|
|
||||||
|
_, err := os.Stat(tryStampFilepath)
|
||||||
|
if err != nil && os.IsNotExist(err) != true {
|
||||||
|
log.Panic(err)
|
||||||
|
} else if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
visited = append(visited, tryStampFilepath)
|
||||||
|
|
||||||
|
currentPath = path.Dir(currentPath)
|
||||||
|
if currentPath == "/" {
|
||||||
|
log.Panicf("could not find module-root: %v", visited)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
moduleRootPath = currentPath
|
||||||
|
}
|
||||||
|
|
||||||
|
return moduleRootPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTestAssetsPath() string {
|
||||||
|
moduleRootPath := GetModuleRootPath()
|
||||||
|
assetsPath := path.Join(moduleRootPath, "assets")
|
||||||
|
|
||||||
|
return assetsPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTestImageFilepath() string {
|
||||||
|
assetsPath := GetTestAssetsPath()
|
||||||
|
testImageFilepath := path.Join(assetsPath, "NDM_8901.jpg")
|
||||||
|
return testImageFilepath
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTestExifData() []byte {
|
||||||
|
if testExifData == nil {
|
||||||
|
assetsPath := GetTestAssetsPath()
|
||||||
|
filepath := path.Join(assetsPath, "NDM_8901.jpg.exif")
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
testExifData, err = ioutil.ReadFile(filepath)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return testExifData
|
||||||
|
}
|
452
vendor/github.com/dsoprea/go-exif/v2/common/type.go
generated
vendored
Normal file
452
vendor/github.com/dsoprea/go-exif/v2/common/type.go
generated
vendored
Normal file
|
@ -0,0 +1,452 @@
|
||||||
|
package exifcommon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeLogger = log.NewLogger("exif.type")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotEnoughData is used when there isn't enough data to accommodate what
|
||||||
|
// we're trying to parse (sizeof(type) * unit_count).
|
||||||
|
ErrNotEnoughData = errors.New("not enough data for type")
|
||||||
|
|
||||||
|
// ErrWrongType is used when we try to parse anything other than the
|
||||||
|
// current type.
|
||||||
|
ErrWrongType = errors.New("wrong type, can not parse")
|
||||||
|
|
||||||
|
// ErrUnhandledUndefinedTypedTag is used when we try to parse a tag that's
|
||||||
|
// recorded as an "unknown" type but not a documented tag (therefore
|
||||||
|
// leaving us not knowning how to read it).
|
||||||
|
ErrUnhandledUndefinedTypedTag = errors.New("not a standard unknown-typed tag")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TagTypePrimitive is a type-alias that let's us easily lookup type properties.
|
||||||
|
type TagTypePrimitive uint16
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TypeByte describes an encoded list of bytes.
|
||||||
|
TypeByte TagTypePrimitive = 1
|
||||||
|
|
||||||
|
// TypeAscii describes an encoded list of characters that is terminated
|
||||||
|
// with a NUL in its encoded form.
|
||||||
|
TypeAscii TagTypePrimitive = 2
|
||||||
|
|
||||||
|
// TypeShort describes an encoded list of shorts.
|
||||||
|
TypeShort TagTypePrimitive = 3
|
||||||
|
|
||||||
|
// TypeLong describes an encoded list of longs.
|
||||||
|
TypeLong TagTypePrimitive = 4
|
||||||
|
|
||||||
|
// TypeRational describes an encoded list of rationals.
|
||||||
|
TypeRational TagTypePrimitive = 5
|
||||||
|
|
||||||
|
// TypeUndefined describes an encoded value that has a complex/non-clearcut
|
||||||
|
// interpretation.
|
||||||
|
TypeUndefined TagTypePrimitive = 7
|
||||||
|
|
||||||
|
// We've seen type-8, but have no documentation on it.
|
||||||
|
|
||||||
|
// TypeSignedLong describes an encoded list of signed longs.
|
||||||
|
TypeSignedLong TagTypePrimitive = 9
|
||||||
|
|
||||||
|
// TypeSignedRational describes an encoded list of signed rationals.
|
||||||
|
TypeSignedRational TagTypePrimitive = 10
|
||||||
|
|
||||||
|
// TypeAsciiNoNul is just a pseudo-type, for our own purposes.
|
||||||
|
TypeAsciiNoNul TagTypePrimitive = 0xf0
|
||||||
|
)
|
||||||
|
|
||||||
|
// String returns the name of the type
|
||||||
|
func (typeType TagTypePrimitive) String() string {
|
||||||
|
return TypeNames[typeType]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of one atomic unit of the type.
|
||||||
|
func (tagType TagTypePrimitive) Size() int {
|
||||||
|
if tagType == TypeByte {
|
||||||
|
return 1
|
||||||
|
} else if tagType == TypeAscii || tagType == TypeAsciiNoNul {
|
||||||
|
return 1
|
||||||
|
} else if tagType == TypeShort {
|
||||||
|
return 2
|
||||||
|
} else if tagType == TypeLong {
|
||||||
|
return 4
|
||||||
|
} else if tagType == TypeRational {
|
||||||
|
return 8
|
||||||
|
} else if tagType == TypeSignedLong {
|
||||||
|
return 4
|
||||||
|
} else if tagType == TypeSignedRational {
|
||||||
|
return 8
|
||||||
|
} else {
|
||||||
|
log.Panicf("can not determine tag-value size for type (%d): [%s]", tagType, TypeNames[tagType])
|
||||||
|
|
||||||
|
// Never called.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid returns true if tagType is a valid type.
|
||||||
|
func (tagType TagTypePrimitive) IsValid() bool {
|
||||||
|
|
||||||
|
// TODO(dustin): Add test
|
||||||
|
|
||||||
|
return tagType == TypeByte ||
|
||||||
|
tagType == TypeAscii ||
|
||||||
|
tagType == TypeAsciiNoNul ||
|
||||||
|
tagType == TypeShort ||
|
||||||
|
tagType == TypeLong ||
|
||||||
|
tagType == TypeRational ||
|
||||||
|
tagType == TypeSignedLong ||
|
||||||
|
tagType == TypeSignedRational ||
|
||||||
|
tagType == TypeUndefined
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO(dustin): Rename TypeNames() to typeNames() and add getter.
|
||||||
|
TypeNames = map[TagTypePrimitive]string{
|
||||||
|
TypeByte: "BYTE",
|
||||||
|
TypeAscii: "ASCII",
|
||||||
|
TypeShort: "SHORT",
|
||||||
|
TypeLong: "LONG",
|
||||||
|
TypeRational: "RATIONAL",
|
||||||
|
TypeUndefined: "UNDEFINED",
|
||||||
|
TypeSignedLong: "SLONG",
|
||||||
|
TypeSignedRational: "SRATIONAL",
|
||||||
|
|
||||||
|
TypeAsciiNoNul: "_ASCII_NO_NUL",
|
||||||
|
}
|
||||||
|
|
||||||
|
typeNamesR = map[string]TagTypePrimitive{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Rational describes an unsigned rational value.
|
||||||
|
type Rational struct {
|
||||||
|
// Numerator is the numerator of the rational value.
|
||||||
|
Numerator uint32
|
||||||
|
|
||||||
|
// Denominator is the numerator of the rational value.
|
||||||
|
Denominator uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignedRational describes a signed rational value.
|
||||||
|
type SignedRational struct {
|
||||||
|
// Numerator is the numerator of the rational value.
|
||||||
|
Numerator int32
|
||||||
|
|
||||||
|
// Denominator is the numerator of the rational value.
|
||||||
|
Denominator int32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format returns a stringified value for the given encoding. Automatically
|
||||||
|
// parses. Automatically calculates count based on type size. This function
|
||||||
|
// also supports undefined-type values (the ones that we support, anyway) by
|
||||||
|
// way of the String() method that they all require. We can't be more specific
|
||||||
|
// because we're a base package and we can't refer to it.
|
||||||
|
func FormatFromType(value interface{}, justFirst bool) (phrase string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): !! Add test
|
||||||
|
|
||||||
|
switch t := value.(type) {
|
||||||
|
case []byte:
|
||||||
|
return DumpBytesToString(t), nil
|
||||||
|
case string:
|
||||||
|
return t, nil
|
||||||
|
case []uint16:
|
||||||
|
if len(t) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
var valueSuffix string
|
||||||
|
if len(t) > 1 {
|
||||||
|
valueSuffix = "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v%s", t[0], valueSuffix), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v", t), nil
|
||||||
|
case []uint32:
|
||||||
|
if len(t) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
var valueSuffix string
|
||||||
|
if len(t) > 1 {
|
||||||
|
valueSuffix = "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v%s", t[0], valueSuffix), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v", t), nil
|
||||||
|
case []Rational:
|
||||||
|
if len(t) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := make([]string, len(t))
|
||||||
|
for i, r := range t {
|
||||||
|
parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator)
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
var valueSuffix string
|
||||||
|
if len(t) > 1 {
|
||||||
|
valueSuffix = "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v", parts), nil
|
||||||
|
case []int32:
|
||||||
|
if len(t) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
var valueSuffix string
|
||||||
|
if len(t) > 1 {
|
||||||
|
valueSuffix = "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v%s", t[0], valueSuffix), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v", t), nil
|
||||||
|
case []SignedRational:
|
||||||
|
if len(t) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := make([]string, len(t))
|
||||||
|
for i, r := range t {
|
||||||
|
parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator)
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if justFirst == true {
|
||||||
|
var valueSuffix string
|
||||||
|
if len(t) > 1 {
|
||||||
|
valueSuffix = "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%v", parts), nil
|
||||||
|
case fmt.Stringer:
|
||||||
|
// An undefined value that is documented (or that we otherwise support).
|
||||||
|
return t.String(), nil
|
||||||
|
default:
|
||||||
|
// Affects only "unknown" values, in general.
|
||||||
|
log.Panicf("type can not be formatted into string: %v", reflect.TypeOf(value).Name())
|
||||||
|
|
||||||
|
// Never called.
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format returns a stringified value for the given encoding. Automatically
|
||||||
|
// parses. Automatically calculates count based on type size.
|
||||||
|
func FormatFromBytes(rawBytes []byte, tagType TagTypePrimitive, justFirst bool, byteOrder binary.ByteOrder) (phrase string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// TODO(dustin): !! Add test
|
||||||
|
|
||||||
|
typeSize := tagType.Size()
|
||||||
|
|
||||||
|
if len(rawBytes)%typeSize != 0 {
|
||||||
|
log.Panicf("byte-count (%d) does not align for [%s] type with a size of (%d) bytes", len(rawBytes), TypeNames[tagType], typeSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unitCount is the calculated unit-count. This should equal the original
|
||||||
|
// value from the tag (pre-resolution).
|
||||||
|
unitCount := uint32(len(rawBytes) / typeSize)
|
||||||
|
|
||||||
|
// Truncate the items if it's not bytes or a string and we just want the first.
|
||||||
|
|
||||||
|
var value interface{}
|
||||||
|
|
||||||
|
switch tagType {
|
||||||
|
case TypeByte:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
value, err = parser.ParseBytes(rawBytes, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeAscii:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
value, err = parser.ParseAscii(rawBytes, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeAsciiNoNul:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
value, err = parser.ParseAsciiNoNul(rawBytes, unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeShort:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
value, err = parser.ParseShorts(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeLong:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
value, err = parser.ParseLongs(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeRational:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
value, err = parser.ParseRationals(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeSignedLong:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
value, err = parser.ParseSignedLongs(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
case TypeSignedRational:
|
||||||
|
var err error
|
||||||
|
|
||||||
|
value, err = parser.ParseSignedRationals(rawBytes, unitCount, byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
default:
|
||||||
|
// Affects only "unknown" values, in general.
|
||||||
|
log.Panicf("value of type [%s] can not be formatted into string", tagType.String())
|
||||||
|
|
||||||
|
// Never called.
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
phrase, err = FormatFromType(value, justFirst)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return phrase, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TranslateStringToType converts user-provided strings to properly-typed
|
||||||
|
// values. If a string, returns a string. Else, assumes that it's a single
|
||||||
|
// number. If a list needs to be processed, it is the caller's responsibility to
|
||||||
|
// split it (according to whichever convention has been established).
|
||||||
|
func TranslateStringToType(tagType TagTypePrimitive, valueString string) (value interface{}, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if tagType == TypeUndefined {
|
||||||
|
// The caller should just call String() on the decoded type.
|
||||||
|
log.Panicf("undefined-type values are not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
if tagType == TypeByte {
|
||||||
|
wide, err := strconv.ParseInt(valueString, 16, 8)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return byte(wide), nil
|
||||||
|
} else if tagType == TypeAscii || tagType == TypeAsciiNoNul {
|
||||||
|
// Whether or not we're putting an NUL on the end is only relevant for
|
||||||
|
// byte-level encoding. This function really just supports a user
|
||||||
|
// interface.
|
||||||
|
|
||||||
|
return valueString, nil
|
||||||
|
} else if tagType == TypeShort {
|
||||||
|
n, err := strconv.ParseUint(valueString, 10, 16)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return uint16(n), nil
|
||||||
|
} else if tagType == TypeLong {
|
||||||
|
n, err := strconv.ParseUint(valueString, 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return uint32(n), nil
|
||||||
|
} else if tagType == TypeRational {
|
||||||
|
parts := strings.SplitN(valueString, "/", 2)
|
||||||
|
|
||||||
|
numerator, err := strconv.ParseUint(parts[0], 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
denominator, err := strconv.ParseUint(parts[1], 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return Rational{
|
||||||
|
Numerator: uint32(numerator),
|
||||||
|
Denominator: uint32(denominator),
|
||||||
|
}, nil
|
||||||
|
} else if tagType == TypeSignedLong {
|
||||||
|
n, err := strconv.ParseInt(valueString, 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return int32(n), nil
|
||||||
|
} else if tagType == TypeSignedRational {
|
||||||
|
parts := strings.SplitN(valueString, "/", 2)
|
||||||
|
|
||||||
|
numerator, err := strconv.ParseInt(parts[0], 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
denominator, err := strconv.ParseInt(parts[1], 10, 32)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return SignedRational{
|
||||||
|
Numerator: int32(numerator),
|
||||||
|
Denominator: int32(denominator),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Panicf("from-string encoding for type not supported; this shouldn't happen: [%s]", tagType.String())
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTypeByName returns the `TagTypePrimitive` for the given type name.
|
||||||
|
// Returns (0) if not valid.
|
||||||
|
func GetTypeByName(typeName string) (tagType TagTypePrimitive, found bool) {
|
||||||
|
tagType, found = typeNamesR[typeName]
|
||||||
|
return tagType, found
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasicTag describes a single tag for any purpose.
|
||||||
|
type BasicTag struct {
|
||||||
|
// FqIfdPath is the fully-qualified IFD-path.
|
||||||
|
FqIfdPath string
|
||||||
|
|
||||||
|
// IfdPath is the unindexed IFD-path.
|
||||||
|
IfdPath string
|
||||||
|
|
||||||
|
// TagId is the tag-ID.
|
||||||
|
TagId uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for typeId, typeName := range TypeNames {
|
||||||
|
typeNamesR[typeName] = typeId
|
||||||
|
}
|
||||||
|
}
|
79
vendor/github.com/dsoprea/go-exif/v2/common/utility.go
generated
vendored
Normal file
79
vendor/github.com/dsoprea/go-exif/v2/common/utility.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package exifcommon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DumpBytes prints a list of hex-encoded bytes.
|
||||||
|
func DumpBytes(data []byte) {
|
||||||
|
fmt.Printf("DUMP: ")
|
||||||
|
for _, x := range data {
|
||||||
|
fmt.Printf("%02x ", x)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// DumpBytesClause prints a list like DumpBytes(), but encapsulated in
|
||||||
|
// "[]byte { ... }".
|
||||||
|
func DumpBytesClause(data []byte) {
|
||||||
|
fmt.Printf("DUMP: ")
|
||||||
|
|
||||||
|
fmt.Printf("[]byte { ")
|
||||||
|
|
||||||
|
for i, x := range data {
|
||||||
|
fmt.Printf("0x%02x", x)
|
||||||
|
|
||||||
|
if i < len(data)-1 {
|
||||||
|
fmt.Printf(", ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" }\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// DumpBytesToString returns a stringified list of hex-encoded bytes.
|
||||||
|
func DumpBytesToString(data []byte) string {
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
for i, x := range data {
|
||||||
|
_, err := b.WriteString(fmt.Sprintf("%02x", x))
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if i < len(data)-1 {
|
||||||
|
_, err := b.WriteRune(' ')
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DumpBytesClauseToString returns a comma-separated list of hex-encoded bytes.
|
||||||
|
func DumpBytesClauseToString(data []byte) string {
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
for i, x := range data {
|
||||||
|
_, err := b.WriteString(fmt.Sprintf("0x%02x", x))
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
if i < len(data)-1 {
|
||||||
|
_, err := b.WriteString(", ")
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExifFullTimestampString produces a string like "2018:11:30 13:01:49" from a
|
||||||
|
// `time.Time` struct. It will attempt to convert to UTC first.
|
||||||
|
func ExifFullTimestampString(t time.Time) (fullTimestampPhrase string) {
|
||||||
|
t = t.UTC()
|
||||||
|
|
||||||
|
return fmt.Sprintf("%04d:%02d:%02d %02d:%02d:%02d", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
|
||||||
|
}
|
412
vendor/github.com/dsoprea/go-exif/v2/common/value_context.go
generated
vendored
Normal file
412
vendor/github.com/dsoprea/go-exif/v2/common/value_context.go
generated
vendored
Normal file
|
@ -0,0 +1,412 @@
|
||||||
|
package exifcommon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
parser *Parser
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotFarValue indicates that an offset-based lookup was attempted for a
|
||||||
|
// non-offset-based (embedded) value.
|
||||||
|
ErrNotFarValue = errors.New("not a far value")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValueContext embeds all of the parameters required to find and extract the
|
||||||
|
// actual tag value.
|
||||||
|
type ValueContext struct {
|
||||||
|
unitCount uint32
|
||||||
|
valueOffset uint32
|
||||||
|
rawValueOffset []byte
|
||||||
|
addressableData []byte
|
||||||
|
|
||||||
|
tagType TagTypePrimitive
|
||||||
|
byteOrder binary.ByteOrder
|
||||||
|
|
||||||
|
// undefinedValueTagType is the effective type to use if this is an
|
||||||
|
// "undefined" value.
|
||||||
|
undefinedValueTagType TagTypePrimitive
|
||||||
|
|
||||||
|
ifdPath string
|
||||||
|
tagId uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dustin): We can update newValueContext() to derive `valueOffset` itself (from `rawValueOffset`).
|
||||||
|
|
||||||
|
// NewValueContext returns a new ValueContext struct.
|
||||||
|
func NewValueContext(ifdPath string, tagId uint16, unitCount, valueOffset uint32, rawValueOffset, addressableData []byte, tagType TagTypePrimitive, byteOrder binary.ByteOrder) *ValueContext {
|
||||||
|
return &ValueContext{
|
||||||
|
unitCount: unitCount,
|
||||||
|
valueOffset: valueOffset,
|
||||||
|
rawValueOffset: rawValueOffset,
|
||||||
|
addressableData: addressableData,
|
||||||
|
|
||||||
|
tagType: tagType,
|
||||||
|
byteOrder: byteOrder,
|
||||||
|
|
||||||
|
ifdPath: ifdPath,
|
||||||
|
tagId: tagId,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUndefinedValueType sets the effective type if this is an unknown-type tag.
|
||||||
|
func (vc *ValueContext) SetUndefinedValueType(tagType TagTypePrimitive) {
|
||||||
|
if vc.tagType != TypeUndefined {
|
||||||
|
log.Panicf("can not set effective type for unknown-type tag because this is *not* an unknown-type tag")
|
||||||
|
}
|
||||||
|
|
||||||
|
vc.undefinedValueTagType = tagType
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnitCount returns the embedded unit-count.
|
||||||
|
func (vc *ValueContext) UnitCount() uint32 {
|
||||||
|
return vc.unitCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueOffset returns the value-offset decoded as a `uint32`.
|
||||||
|
func (vc *ValueContext) ValueOffset() uint32 {
|
||||||
|
return vc.valueOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawValueOffset returns the uninterpreted value-offset. This is used for
|
||||||
|
// embedded values (values small enough to fit within the offset bytes rather
|
||||||
|
// than needing to be stored elsewhere and referred to by an actual offset).
|
||||||
|
func (vc *ValueContext) RawValueOffset() []byte {
|
||||||
|
return vc.rawValueOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddressableData returns the block of data that we can dereference into.
|
||||||
|
func (vc *ValueContext) AddressableData() []byte {
|
||||||
|
return vc.addressableData
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByteOrder returns the byte-order of numbers.
|
||||||
|
func (vc *ValueContext) ByteOrder() binary.ByteOrder {
|
||||||
|
return vc.byteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
// IfdPath returns the path of the IFD containing this tag.
|
||||||
|
func (vc *ValueContext) IfdPath() string {
|
||||||
|
return vc.ifdPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagId returns the ID of the tag that we represent.
|
||||||
|
func (vc *ValueContext) TagId() uint16 {
|
||||||
|
return vc.tagId
|
||||||
|
}
|
||||||
|
|
||||||
|
// isEmbedded returns whether the value is embedded or a reference. This can't
|
||||||
|
// be precalculated since the size is not defined for all types (namely the
|
||||||
|
// "undefined" types).
|
||||||
|
func (vc *ValueContext) isEmbedded() bool {
|
||||||
|
tagType := vc.effectiveValueType()
|
||||||
|
|
||||||
|
return (tagType.Size() * int(vc.unitCount)) <= 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizeInBytes returns the number of bytes that this value requires. The
|
||||||
|
// underlying call will panic if the type is UNDEFINED. It is the
|
||||||
|
// responsibility of the caller to preemptively check that.
|
||||||
|
func (vc *ValueContext) SizeInBytes() int {
|
||||||
|
tagType := vc.effectiveValueType()
|
||||||
|
|
||||||
|
return tagType.Size() * int(vc.unitCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// effectiveValueType returns the effective type of the unknown-type tag or, if
|
||||||
|
// not unknown, the actual type.
|
||||||
|
func (vc *ValueContext) effectiveValueType() (tagType TagTypePrimitive) {
|
||||||
|
if vc.tagType == TypeUndefined {
|
||||||
|
tagType = vc.undefinedValueTagType
|
||||||
|
|
||||||
|
if tagType == 0 {
|
||||||
|
log.Panicf("undefined-value type not set")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tagType = vc.tagType
|
||||||
|
}
|
||||||
|
|
||||||
|
return tagType
|
||||||
|
}
|
||||||
|
|
||||||
|
// readRawEncoded returns the encoded bytes for the value that we represent.
|
||||||
|
func (vc *ValueContext) readRawEncoded() (rawBytes []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
tagType := vc.effectiveValueType()
|
||||||
|
|
||||||
|
unitSizeRaw := uint32(tagType.Size())
|
||||||
|
|
||||||
|
if vc.isEmbedded() == true {
|
||||||
|
byteLength := unitSizeRaw * vc.unitCount
|
||||||
|
return vc.rawValueOffset[:byteLength], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return vc.addressableData[vc.valueOffset : vc.valueOffset+vc.unitCount*unitSizeRaw], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFarOffset returns the offset if the value is not embedded [within the
|
||||||
|
// pointer itself] or an error if an embedded value.
|
||||||
|
func (vc *ValueContext) GetFarOffset() (offset uint32, err error) {
|
||||||
|
if vc.isEmbedded() == true {
|
||||||
|
return 0, ErrNotFarValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return vc.valueOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadRawEncoded returns the encoded bytes for the value that we represent.
|
||||||
|
func (vc *ValueContext) ReadRawEncoded() (rawBytes []byte, err error) {
|
||||||
|
|
||||||
|
// TODO(dustin): Remove this method and rename readRawEncoded in its place.
|
||||||
|
|
||||||
|
return vc.readRawEncoded()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format returns a string representation for the value.
|
||||||
|
//
|
||||||
|
// Where the type is not ASCII, `justFirst` indicates whether to just stringify
|
||||||
|
// the first item in the slice (or return an empty string if the slice is
|
||||||
|
// empty).
|
||||||
|
//
|
||||||
|
// Since this method lacks the information to process undefined-type tags (e.g.
|
||||||
|
// byte-order, tag-ID, IFD type), it will return an error if attempted. See
|
||||||
|
// `Undefined()`.
|
||||||
|
func (vc *ValueContext) Format() (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawBytes, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
phrase, err := FormatFromBytes(rawBytes, vc.effectiveValueType(), false, vc.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return phrase, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatFirst is similar to `Format` but only gets and stringifies the first
|
||||||
|
// item.
|
||||||
|
func (vc *ValueContext) FormatFirst() (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawBytes, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
phrase, err := FormatFromBytes(rawBytes, vc.tagType, true, vc.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return phrase, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadBytes parses the encoded byte-array from the value-context.
|
||||||
|
func (vc *ValueContext) ReadBytes() (value []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawValue, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value, err = parser.ParseBytes(rawValue, vc.unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAscii parses the encoded NUL-terminated ASCII string from the value-
|
||||||
|
// context.
|
||||||
|
func (vc *ValueContext) ReadAscii() (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawValue, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value, err = parser.ParseAscii(rawValue, vc.unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAsciiNoNul parses the non-NUL-terminated encoded ASCII string from the
|
||||||
|
// value-context.
|
||||||
|
func (vc *ValueContext) ReadAsciiNoNul() (value string, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawValue, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value, err = parser.ParseAsciiNoNul(rawValue, vc.unitCount)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadShorts parses the list of encoded shorts from the value-context.
|
||||||
|
func (vc *ValueContext) ReadShorts() (value []uint16, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawValue, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value, err = parser.ParseShorts(rawValue, vc.unitCount, vc.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadLongs parses the list of encoded, unsigned longs from the value-context.
|
||||||
|
func (vc *ValueContext) ReadLongs() (value []uint32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawValue, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value, err = parser.ParseLongs(rawValue, vc.unitCount, vc.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadRationals parses the list of encoded, unsigned rationals from the value-
|
||||||
|
// context.
|
||||||
|
func (vc *ValueContext) ReadRationals() (value []Rational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawValue, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value, err = parser.ParseRationals(rawValue, vc.unitCount, vc.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSignedLongs parses the list of encoded, signed longs from the value-context.
|
||||||
|
func (vc *ValueContext) ReadSignedLongs() (value []int32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawValue, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value, err = parser.ParseSignedLongs(rawValue, vc.unitCount, vc.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSignedRationals parses the list of encoded, signed rationals from the
|
||||||
|
// value-context.
|
||||||
|
func (vc *ValueContext) ReadSignedRationals() (value []SignedRational, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rawValue, err := vc.readRawEncoded()
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
value, err = parser.ParseSignedRationals(rawValue, vc.unitCount, vc.byteOrder)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values knows how to resolve the given value. This value is always a list
|
||||||
|
// (undefined-values aside), so we're named accordingly.
|
||||||
|
//
|
||||||
|
// Since this method lacks the information to process unknown-type tags (e.g.
|
||||||
|
// byte-order, tag-ID, IFD type), it will return an error if attempted. See
|
||||||
|
// `Undefined()`.
|
||||||
|
func (vc *ValueContext) Values() (values interface{}, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if vc.tagType == TypeByte {
|
||||||
|
values, err = vc.ReadBytes()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else if vc.tagType == TypeAscii {
|
||||||
|
values, err = vc.ReadAscii()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else if vc.tagType == TypeAsciiNoNul {
|
||||||
|
values, err = vc.ReadAsciiNoNul()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else if vc.tagType == TypeShort {
|
||||||
|
values, err = vc.ReadShorts()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else if vc.tagType == TypeLong {
|
||||||
|
values, err = vc.ReadLongs()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else if vc.tagType == TypeRational {
|
||||||
|
values, err = vc.ReadRationals()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else if vc.tagType == TypeSignedLong {
|
||||||
|
values, err = vc.ReadSignedLongs()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else if vc.tagType == TypeSignedRational {
|
||||||
|
values, err = vc.ReadSignedRationals()
|
||||||
|
log.PanicIf(err)
|
||||||
|
} else if vc.tagType == TypeUndefined {
|
||||||
|
log.Panicf("will not parse undefined-type value")
|
||||||
|
|
||||||
|
// Never called.
|
||||||
|
return nil, nil
|
||||||
|
} else {
|
||||||
|
log.Panicf("value of type [%s] is unparseable", vc.tagType)
|
||||||
|
// Never called.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
parser = new(Parser)
|
||||||
|
}
|
229
vendor/github.com/dsoprea/go-exif/v2/common/value_encoder.go
generated
vendored
Normal file
229
vendor/github.com/dsoprea/go-exif/v2/common/value_encoder.go
generated
vendored
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
package exifcommon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeEncodeLogger = log.NewLogger("exif.type_encode")
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodedData encapsulates the compound output of an encoding operation.
|
||||||
|
type EncodedData struct {
|
||||||
|
Type TagTypePrimitive
|
||||||
|
Encoded []byte
|
||||||
|
|
||||||
|
// TODO(dustin): Is this really necessary? We might have this just to correlate to the incoming stream format (raw bytes and a unit-count both for incoming and outgoing).
|
||||||
|
UnitCount uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueEncoder knows how to encode values of every type to bytes.
|
||||||
|
type ValueEncoder struct {
|
||||||
|
byteOrder binary.ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValueEncoder returns a new ValueEncoder.
|
||||||
|
func NewValueEncoder(byteOrder binary.ByteOrder) *ValueEncoder {
|
||||||
|
return &ValueEncoder{
|
||||||
|
byteOrder: byteOrder,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeBytes(value []uint8) (ed EncodedData, err error) {
|
||||||
|
ed.Type = TypeByte
|
||||||
|
ed.Encoded = []byte(value)
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeAscii(value string) (ed EncodedData, err error) {
|
||||||
|
ed.Type = TypeAscii
|
||||||
|
|
||||||
|
ed.Encoded = []byte(value)
|
||||||
|
ed.Encoded = append(ed.Encoded, 0)
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(ed.Encoded))
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeAsciiNoNul returns a string encoded as a byte-string without a trailing
|
||||||
|
// NUL byte.
|
||||||
|
//
|
||||||
|
// Note that:
|
||||||
|
//
|
||||||
|
// 1. This type can not be automatically encoded using `Encode()`. The default
|
||||||
|
// mode is to encode *with* a trailing NUL byte using `encodeAscii`. Only
|
||||||
|
// certain undefined-type tags using an unterminated ASCII string and these
|
||||||
|
// are exceptional in nature.
|
||||||
|
//
|
||||||
|
// 2. The presence of this method allows us to completely test the complimentary
|
||||||
|
// no-nul parser.
|
||||||
|
//
|
||||||
|
func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) {
|
||||||
|
ed.Type = TypeAsciiNoNul
|
||||||
|
ed.Encoded = []byte(value)
|
||||||
|
ed.UnitCount = uint32(len(ed.Encoded))
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeShorts(value []uint16) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
ed.Encoded = make([]byte, ed.UnitCount*2)
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
ve.byteOrder.PutUint16(ed.Encoded[i*2:(i+1)*2], value[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeShort
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeLongs(value []uint32) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
ed.Encoded = make([]byte, ed.UnitCount*4)
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], value[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeLong
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeRationals(value []Rational) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
ed.Encoded = make([]byte, ed.UnitCount*8)
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
ve.byteOrder.PutUint32(ed.Encoded[i*8+0:i*8+4], value[i].Numerator)
|
||||||
|
ve.byteOrder.PutUint32(ed.Encoded[i*8+4:i*8+8], value[i].Denominator)
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeRational
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeSignedLongs(value []int32) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount))
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
err := binary.Write(b, ve.byteOrder, value[i])
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeSignedLong
|
||||||
|
ed.Encoded = b.Bytes()
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve *ValueEncoder) encodeSignedRationals(value []SignedRational) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ed.UnitCount = uint32(len(value))
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount))
|
||||||
|
|
||||||
|
for i := uint32(0); i < ed.UnitCount; i++ {
|
||||||
|
err := binary.Write(b, ve.byteOrder, value[i].Numerator)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = binary.Write(b, ve.byteOrder, value[i].Denominator)
|
||||||
|
log.PanicIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ed.Type = TypeSignedRational
|
||||||
|
ed.Encoded = b.Bytes()
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode returns bytes for the given value, infering type from the actual
|
||||||
|
// value. This does not support `TypeAsciiNoNull` (all strings are encoded as
|
||||||
|
// `TypeAscii`).
|
||||||
|
func (ve *ValueEncoder) Encode(value interface{}) (ed EncodedData, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
switch value.(type) {
|
||||||
|
case []byte:
|
||||||
|
ed, err = ve.encodeBytes(value.([]byte))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case string:
|
||||||
|
ed, err = ve.encodeAscii(value.(string))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []uint16:
|
||||||
|
ed, err = ve.encodeShorts(value.([]uint16))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []uint32:
|
||||||
|
ed, err = ve.encodeLongs(value.([]uint32))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []Rational:
|
||||||
|
ed, err = ve.encodeRationals(value.([]Rational))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []int32:
|
||||||
|
ed, err = ve.encodeSignedLongs(value.([]int32))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case []SignedRational:
|
||||||
|
ed, err = ve.encodeSignedRationals(value.([]SignedRational))
|
||||||
|
log.PanicIf(err)
|
||||||
|
case time.Time:
|
||||||
|
// For convenience, if the user doesn't want to deal with translation
|
||||||
|
// semantics with timestamps.
|
||||||
|
|
||||||
|
t := value.(time.Time)
|
||||||
|
s := ExifFullTimestampString(t)
|
||||||
|
|
||||||
|
ed, err = ve.encodeAscii(s)
|
||||||
|
log.PanicIf(err)
|
||||||
|
default:
|
||||||
|
log.Panicf("value not encodable: [%s] [%v]", reflect.TypeOf(value), value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ed, nil
|
||||||
|
}
|
14
vendor/github.com/dsoprea/go-exif/v2/error.go
generated
vendored
Normal file
14
vendor/github.com/dsoprea/go-exif/v2/error.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrTagNotFound indicates that the tag was not found.
|
||||||
|
ErrTagNotFound = errors.New("tag not found")
|
||||||
|
|
||||||
|
// ErrTagNotKnown indicates that the tag is not registered with us as a
|
||||||
|
// known tag.
|
||||||
|
ErrTagNotKnown = errors.New("tag is not known")
|
||||||
|
)
|
258
vendor/github.com/dsoprea/go-exif/v2/exif.go
generated
vendored
Normal file
258
vendor/github.com/dsoprea/go-exif/v2/exif.go
generated
vendored
Normal file
|
@ -0,0 +1,258 @@
|
||||||
|
package exif
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"encoding/binary"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-logging"
|
||||||
|
|
||||||
|
"github.com/dsoprea/go-exif/v2/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ExifAddressableAreaStart is the absolute offset in the file that all
|
||||||
|
// offsets are relative to.
|
||||||
|
ExifAddressableAreaStart = uint32(0x0)
|
||||||
|
|
||||||
|
// ExifDefaultFirstIfdOffset is essentially the number of bytes in addition
|
||||||
|
// to `ExifAddressableAreaStart` that you have to move in order to escape
|
||||||
|
// the rest of the header and get to the earliest point where we can put
|
||||||
|
// stuff (which has to be the first IFD). This is the size of the header
|
||||||
|
// sequence containing the two-character byte-order, two-character fixed-
|
||||||
|
// bytes, and the four bytes describing the first-IFD offset.
|
||||||
|
ExifDefaultFirstIfdOffset = uint32(2 + 2 + 4)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ExifSignatureLength is the number of bytes in the EXIF signature (which
|
||||||
|
// customarily includes the first IFD offset).
|
||||||
|
ExifSignatureLength = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
exifLogger = log.NewLogger("exif.exif")
|
||||||
|
|
||||||
|
ExifBigEndianSignature = [4]byte{'M', 'M', 0x00, 0x2a}
|
||||||
|
ExifLittleEndianSignature = [4]byte{'I', 'I', 0x2a, 0x00}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoExif = errors.New("no exif data")
|
||||||
|
ErrExifHeaderError = errors.New("exif header error")
|
||||||
|
)
|
||||||
|
|
||||||
|
// SearchAndExtractExif searches for an EXIF blob in the byte-slice.
|
||||||
|
func SearchAndExtractExif(data []byte) (rawExif []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(data)
|
||||||
|
|
||||||
|
rawExif, err = SearchAndExtractExifWithReader(b)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrNoExif {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rawExif, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchAndExtractExifWithReader searches for an EXIF blob using an
|
||||||
|
// `io.Reader`. We can't know how much long the EXIF data is without parsing it,
|
||||||
|
// so this will likely grab up a lot of the image-data, too.
|
||||||
|
func SearchAndExtractExifWithReader(r io.Reader) (rawExif []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Search for the beginning of the EXIF information. The EXIF is near the
|
||||||
|
// beginning of most JPEGs, so this likely doesn't have a high cost (at
|
||||||
|
// least, again, with JPEGs).
|
||||||
|
|
||||||
|
br := bufio.NewReader(r)
|
||||||
|
discarded := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
window, err := br.Peek(ExifSignatureLength)
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = ParseExifHeader(window)
|
||||||
|
if err != nil {
|
||||||
|
if log.Is(err, ErrNoExif) == true {
|
||||||
|
// No EXIF. Move forward by one byte.
|
||||||
|
|
||||||
|
_, err := br.Discard(1)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
discarded++
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some other error.
|
||||||
|
log.Panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
exifLogger.Debugf(nil, "Found EXIF blob (%d) bytes from initial position.", discarded)
|
||||||
|
|
||||||
|
rawExif, err = ioutil.ReadAll(br)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return rawExif, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchFileAndExtractExif returns a slice from the beginning of the EXIF data
|
||||||
|
// to the end of the file (it's not practical to try and calculate where the
|
||||||
|
// data actually ends).
|
||||||
|
func SearchFileAndExtractExif(filepath string) (rawExif []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Open the file.
|
||||||
|
|
||||||
|
f, err := os.Open(filepath)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
rawExif, err = SearchAndExtractExifWithReader(f)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return rawExif, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExifHeader struct {
|
||||||
|
ByteOrder binary.ByteOrder
|
||||||
|
FirstIfdOffset uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eh ExifHeader) String() string {
|
||||||
|
return fmt.Sprintf("ExifHeader<BYTE-ORDER=[%v] FIRST-IFD-OFFSET=(0x%02x)>", eh.ByteOrder, eh.FirstIfdOffset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseExifHeader parses the bytes at the very top of the header.
|
||||||
|
//
|
||||||
|
// This will panic with ErrNoExif on any data errors so that we can double as
|
||||||
|
// an EXIF-detection routine.
|
||||||
|
func ParseExifHeader(data []byte) (eh ExifHeader, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Good reference:
|
||||||
|
//
|
||||||
|
// CIPA DC-008-2016; JEITA CP-3451D
|
||||||
|
// -> http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf
|
||||||
|
|
||||||
|
if len(data) < ExifSignatureLength {
|
||||||
|
exifLogger.Warningf(nil, "Not enough data for EXIF header: (%d)", len(data))
|
||||||
|
return eh, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Equal(data[:4], ExifBigEndianSignature[:]) == true {
|
||||||
|
eh.ByteOrder = binary.BigEndian
|
||||||
|
} else if bytes.Equal(data[:4], ExifLittleEndianSignature[:]) == true {
|
||||||
|
eh.ByteOrder = binary.LittleEndian
|
||||||
|
} else {
|
||||||
|
return eh, ErrNoExif
|
||||||
|
}
|
||||||
|
|
||||||
|
eh.FirstIfdOffset = eh.ByteOrder.Uint32(data[4:8])
|
||||||
|
|
||||||
|
return eh, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit recursively invokes a callback for every tag.
|
||||||
|
func Visit(rootIfdIdentity *exifcommon.IfdIdentity, ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, exifData []byte, visitor TagVisitorFn) (eh ExifHeader, furthestOffset uint32, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
eh, err = ParseExifHeader(exifData)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
ie := NewIfdEnumerate(ifdMapping, tagIndex, exifData, eh.ByteOrder)
|
||||||
|
|
||||||
|
_, err = ie.Scan(rootIfdIdentity, eh.FirstIfdOffset, visitor)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
furthestOffset = ie.FurthestOffset()
|
||||||
|
|
||||||
|
return eh, furthestOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect recursively builds a static structure of all IFDs and tags.
|
||||||
|
func Collect(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, exifData []byte) (eh ExifHeader, index IfdIndex, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
eh, err = ParseExifHeader(exifData)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
ie := NewIfdEnumerate(ifdMapping, tagIndex, exifData, eh.ByteOrder)
|
||||||
|
|
||||||
|
index, err = ie.Collect(eh.FirstIfdOffset)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return eh, index, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildExifHeader constructs the bytes that go at the front of the stream.
|
||||||
|
func BuildExifHeader(byteOrder binary.ByteOrder, firstIfdOffset uint32) (headerBytes []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if state := recover(); state != nil {
|
||||||
|
err = log.Wrap(state.(error))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
var signatureBytes []byte
|
||||||
|
if byteOrder == binary.BigEndian {
|
||||||
|
signatureBytes = ExifBigEndianSignature[:]
|
||||||
|
} else {
|
||||||
|
signatureBytes = ExifLittleEndianSignature[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = b.Write(signatureBytes)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
err = binary.Write(b, byteOrder, firstIfdOffset)
|
||||||
|
log.PanicIf(err)
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
15
vendor/github.com/dsoprea/go-exif/v2/go.mod
generated
vendored
Normal file
15
vendor/github.com/dsoprea/go-exif/v2/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
module github.com/dsoprea/go-exif/v2
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
// Development only
|
||||||
|
// replace github.com/dsoprea/go-logging => ../../go-logging
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/dsoprea/go-logging v0.0.0-20200517223158-a10564966e9d
|
||||||
|
github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf // indirect
|
||||||
|
github.com/golang/geo v0.0.0-20200319012246-673a6f80352d
|
||||||
|
github.com/jessevdk/go-flags v1.4.0
|
||||||
|
golang.org/x/net v0.0.0-20200513185701-a91f0712d120 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.3.0
|
||||||
|
)
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue