From 09f0c74df484cf4e71dc4aad0203857851013fb5 Mon Sep 17 00:00:00 2001 From: CrazyMax <1951866+crazy-max@users.noreply.github.com> Date: Thu, 12 Mar 2026 08:19:35 +0100 Subject: [PATCH 1/4] use distribution reference for image parsing and lookups --- internal/grpc/image.go | 2 +- pkg/registry/image.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/grpc/image.go b/internal/grpc/image.go index f36d464c9..3678800cf 100644 --- a/internal/grpc/image.go +++ b/internal/grpc/image.go @@ -6,8 +6,8 @@ import ( "github.com/crazy-max/diun/v4/pb" "github.com/crazy-max/diun/v4/pkg/registry" + "github.com/distribution/reference" "github.com/pkg/errors" - "go.podman.io/image/v5/docker/reference" "google.golang.org/protobuf/types/known/timestamppb" ) diff --git a/pkg/registry/image.go b/pkg/registry/image.go index bc2a81523..058d8daae 100644 --- a/pkg/registry/image.go +++ b/pkg/registry/image.go @@ -7,9 +7,9 @@ import ( "strings" "text/template" + "github.com/distribution/reference" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "go.podman.io/image/v5/docker/reference" ) // Image holds information about an image. From 0e6eec337437a4350fd0776f0654c41ccf3bf84e Mon Sep 17 00:00:00 2001 From: CrazyMax <1951866+crazy-max@users.noreply.github.com> Date: Thu, 12 Mar 2026 08:42:01 +0100 Subject: [PATCH 2/4] registry: use docker cli auth config --- go.mod | 1 + internal/app/job.go | 9 +- pkg/registry/auth.go | 52 + pkg/registry/auth_test.go | 46 + pkg/registry/registry.go | 14 +- pkg/registry/registry_test.go | 28 + vendor/github.com/docker/cli/AUTHORS | 945 ++++++++++++++++++ vendor/github.com/docker/cli/LICENSE | 191 ++++ vendor/github.com/docker/cli/NOTICE | 19 + .../docker/cli/cli/config/config.go | 176 ++++ .../docker/cli/cli/config/configfile/file.go | 441 ++++++++ .../cli/cli/config/configfile/file_unix.go | 35 + .../cli/cli/config/configfile/file_windows.go | 5 + .../cli/cli/config/credentials/credentials.go | 17 + .../cli/config/credentials/default_store.go | 22 + .../credentials/default_store_darwin.go | 5 + .../config/credentials/default_store_linux.go | 13 + .../credentials/default_store_unsupported.go | 7 + .../credentials/default_store_windows.go | 5 + .../cli/cli/config/credentials/file_store.go | 118 +++ .../cli/config/credentials/native_store.go | 147 +++ .../cli/cli/config/memorystore/store.go | 131 +++ .../docker/cli/cli/config/types/authconfig.go | 17 + vendor/modules.txt | 7 + 24 files changed, 2444 insertions(+), 7 deletions(-) create mode 100644 pkg/registry/auth.go create mode 100644 pkg/registry/auth_test.go create mode 100644 vendor/github.com/docker/cli/AUTHORS create mode 100644 vendor/github.com/docker/cli/LICENSE create mode 100644 vendor/github.com/docker/cli/NOTICE create mode 100644 vendor/github.com/docker/cli/cli/config/config.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file_unix.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file_windows.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/credentials.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/file_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/native_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/memorystore/store.go create mode 100644 vendor/github.com/docker/cli/cli/config/types/authconfig.go diff --git a/go.mod b/go.mod index cd1073375..4c70fd24a 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/crazy-max/gohealthchecks v0.6.0 github.com/crazy-max/gonfig v0.7.1 github.com/distribution/reference v0.6.0 + github.com/docker/cli v29.1.4+incompatible github.com/docker/docker v28.5.2+incompatible github.com/docker/go-connections v0.6.0 github.com/docker/go-units v0.5.0 diff --git a/internal/app/job.go b/internal/app/job.go index 5271d8210..78bcab370 100644 --- a/internal/app/job.go +++ b/internal/app/job.go @@ -8,9 +8,8 @@ import ( "github.com/crazy-max/diun/v4/internal/model" "github.com/crazy-max/diun/v4/pkg/registry" "github.com/crazy-max/diun/v4/pkg/utl" + dockerregistry "github.com/docker/docker/api/types/registry" "github.com/rs/zerolog/log" - "go.podman.io/image/v5/pkg/docker/config" - "go.podman.io/image/v5/types" ) func (di *Diun) createJob(job model.Job) { @@ -84,14 +83,14 @@ func (di *Diun) createJob(job model.Job) { } } - var auth types.DockerAuthConfig + var auth dockerregistry.AuthConfig if len(regUser) > 0 { - auth = types.DockerAuthConfig{ + auth = dockerregistry.AuthConfig{ Username: regUser, Password: regPassword, } } else { - auth, err = config.GetCredentials(nil, job.RegImage.Domain) + auth, err = registry.LookupAuth(job.RegImage.Domain) if err != nil { sublog.Warn().Err(err).Msg("Error seeking Docker credentials") } diff --git a/pkg/registry/auth.go b/pkg/registry/auth.go new file mode 100644 index 000000000..7706145ea --- /dev/null +++ b/pkg/registry/auth.go @@ -0,0 +1,52 @@ +package registry + +import ( + "io" + + dockerconfig "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + dockerregistry "github.com/docker/docker/api/types/registry" +) + +const dockerHubConfigKey = "https://index.docker.io/v1/" + +// LookupAuth returns Docker registry credentials for the given registry domain. +// If no credentials are configured, an empty AuthConfig is returned. +func LookupAuth(domain string) (dockerregistry.AuthConfig, error) { + return lookupAuth("", domain) +} + +func lookupAuth(configDir, domain string) (dockerregistry.AuthConfig, error) { + cfg, err := loadDockerConfig(configDir) + if err != nil { + return dockerregistry.AuthConfig{}, err + } + + auth, err := cfg.GetAuthConfig(dockerConfigKey(domain)) + if err != nil { + return dockerregistry.AuthConfig{}, err + } + + return dockerregistry.AuthConfig{ + Username: auth.Username, + Password: auth.Password, + Auth: auth.Auth, + ServerAddress: auth.ServerAddress, + IdentityToken: auth.IdentityToken, + RegistryToken: auth.RegistryToken, + }, nil +} + +func loadDockerConfig(configDir string) (*configfile.ConfigFile, error) { + if configDir == "" { + return dockerconfig.LoadDefaultConfigFile(io.Discard), nil + } + return dockerconfig.Load(configDir) +} + +func dockerConfigKey(domain string) string { + if domain == "docker.io" || domain == "index.docker.io" { + return dockerHubConfigKey + } + return domain +} diff --git a/pkg/registry/auth_test.go b/pkg/registry/auth_test.go new file mode 100644 index 000000000..a1331e9ae --- /dev/null +++ b/pkg/registry/auth_test.go @@ -0,0 +1,46 @@ +package registry + +import ( + "encoding/base64" + "os" + "path/filepath" + "testing" + + dockerregistry "github.com/docker/docker/api/types/registry" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLookupAuthDockerHub(t *testing.T) { + t.Parallel() + + configDir := t.TempDir() + configPath := filepath.Join(configDir, "config.json") + auth := base64.StdEncoding.EncodeToString([]byte("janedoe:s3cr3t")) + require.NoError(t, os.WriteFile(configPath, []byte(`{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "`+auth+`" + } + } + }`), 0o600)) + + got, err := lookupAuth(configDir, "docker.io") + require.NoError(t, err) + assert.Equal(t, dockerregistry.AuthConfig{ + Username: "janedoe", + Password: "s3cr3t", + ServerAddress: dockerHubConfigKey, + }, got) +} + +func TestLookupAuthNotFound(t *testing.T) { + t.Parallel() + + configDir := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(configDir, "config.json"), []byte(`{"auths":{}}`), 0o600)) + + got, err := lookupAuth(configDir, "ghcr.io") + require.NoError(t, err) + assert.Equal(t, dockerregistry.AuthConfig{}, got) +} diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index e56719cdf..006223661 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -4,6 +4,7 @@ import ( "context" "time" + dockerregistry "github.com/docker/docker/api/types/registry" "github.com/pkg/errors" "go.podman.io/image/v5/types" ) @@ -16,7 +17,7 @@ type Client struct { // Options holds docker registry object options type Options struct { - Auth types.DockerAuthConfig + Auth dockerregistry.AuthConfig InsecureTLS bool Timeout time.Duration UserAgent string @@ -28,10 +29,19 @@ type Options struct { // New creates new docker registry client instance func New(opts Options) (*Client, error) { + var auth *types.DockerAuthConfig + if opts.Auth != (dockerregistry.AuthConfig{}) { + auth = &types.DockerAuthConfig{ + Username: opts.Auth.Username, + Password: opts.Auth.Password, + IdentityToken: opts.Auth.IdentityToken, + } + } + return &Client{ opts: opts, sysCtx: &types.SystemContext{ - DockerAuthConfig: &opts.Auth, + DockerAuthConfig: auth, DockerDaemonInsecureSkipTLSVerify: opts.InsecureTLS, DockerInsecureSkipTLSVerify: types.NewOptionalBool(opts.InsecureTLS), DockerRegistryUserAgent: opts.UserAgent, diff --git a/pkg/registry/registry_test.go b/pkg/registry/registry_test.go index e5ebb6e16..ad4312b49 100644 --- a/pkg/registry/registry_test.go +++ b/pkg/registry/registry_test.go @@ -4,7 +4,9 @@ import ( "os" "testing" + dockerregistry "github.com/docker/docker/api/types/registry" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -28,3 +30,29 @@ func TestMain(m *testing.M) { func TestNew(t *testing.T) { assert.NotNil(t, rc) } + +func TestNewMapsDockerRegistryAuth(t *testing.T) { + t.Parallel() + + rc, err := New(Options{ + Auth: dockerregistry.AuthConfig{ + Username: "janedoe", + Password: "s3cr3t", + IdentityToken: "token", + }, + InsecureTLS: true, + UserAgent: "diun/test", + ImageOs: "linux", + ImageArch: "amd64", + }) + require.NoError(t, err) + require.NotNil(t, rc.sysCtx) + require.NotNil(t, rc.sysCtx.DockerAuthConfig) + + assert.Equal(t, "janedoe", rc.sysCtx.DockerAuthConfig.Username) + assert.Equal(t, "s3cr3t", rc.sysCtx.DockerAuthConfig.Password) + assert.Equal(t, "token", rc.sysCtx.DockerAuthConfig.IdentityToken) + assert.Equal(t, "diun/test", rc.sysCtx.DockerRegistryUserAgent) + assert.Equal(t, "linux", rc.sysCtx.OSChoice) + assert.Equal(t, "amd64", rc.sysCtx.ArchitectureChoice) +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS new file mode 100644 index 000000000..57af08b20 --- /dev/null +++ b/vendor/github.com/docker/cli/AUTHORS @@ -0,0 +1,945 @@ +# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See scripts/docs/generate-authors.sh to make modifications. + +A. Lester Buck III +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Abreto FU +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrian Plata +Adrien Duermael +Adrien Folie +Adyanth Hosavalike +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akhil Mohan +Akihiro Suda +Akim Demaille +Alan Thompson +Alano Terblanche +Albert Callarisa +Alberto Roura +Albin Kerouanton +Aleksa Sarai +Aleksander Piotrowski +Alessandro Boch +Alex Couture-Beil +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Chneerov +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alexey Igrychev +Alexis Couvreur +Alfred Landrum +Ali Rostami +Alicia Lauerman +Allen Sun +Allie Sadler +Alvin Deng +Amen Belayneh +Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anca Iordache +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andres G. Aragoneses +Andres Leon Rangel +Andrew France +Andrew He +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrew-Zipperer +Andrey Petrov +Andrii Berehuliak +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Archimedes Trajano +Arko Dasgupta +Arnaud Porterie +Arnaud Rebillout +Arthur Flageul +Arthur Peka +Ashly Mathew +Ashwini Oruganti +Aslam Ahemad +Austin Vazquez +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bodenmiller +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benjamin Böhmke +Benjamin Nater +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Bishal Das +Bjorn Neergaard +Boaz Shuster +Boban Acimovic +Bogdan Anton +Boris Pruessmann +Brad Baker +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Brian Tracy +Brian Wieder +Bruno Sousa +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Calvin Liu +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +carsontham +Carston Schilds +Casey Korver +Ce Gao +Cedric Davies +Cesar Talledo +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +Charlotte Mach +ChaYoung You +Chee Hau Lim +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Chinchilla +Chris Couzens +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Vermilion +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christopher Petito <47751006+krissetto@users.noreply.github.com> +Christopher Petito +Christopher Svensson +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Conner Crosby +Corey Farrell +Corey Quon +Cory Bennet +Cory Snider +Craig Osterhout +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +Daisuke Ito +dalanlan +Damien Nadé +Dan Cotora +Dan Wallis +Danial Gharib +Daniel Artine +Daniel Cassidy +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Daniil Nikolenko +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Alvarez +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Karlsson +David le Blanc +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> +Derek McGowan +Des Preston +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dilep Dev <34891655+DilepDev@users.noreply.github.com> +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Djordje Lukic +Dmitriy Fishman +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dominik Braun +Don Kjer +Dong Chen +DongGeon Lee +Doug Davis +Drew Erny +Ed Costello +Ed Morley <501702+edmorley@users.noreply.github.com> +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eng Zer Jun +Eric Bode +Eric Curtin +Eric Engestrom +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik Humphrey +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evan Lezar +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Geyer +Felix Hupfeld +Felix Rabe +fezzik1620 +Filip Jareš +Flavio Crisciani +Florian Klein +Forest Johnson +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Gore +Gabriel Nicolas Avellaneda +Gabriela Georgieva +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Margaritis +George Xie +Gianluca Borello +Giau. Tran Minh +Giedrius Jonikas +Gildas Cuisinier +Gio d'Amelio +Gleb Stsenov +Goksu Toprak +Gou Rao +Govind Rai +Grace Choi +Graeme Wiebe +Grant Reaber +Greg Pflaum +Gsealy +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +Guillaume Tardif +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Hector S +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hossein Abbasi <16090309+hsnabszhdn@users.noreply.github.com> +Hu Keping +Huayi Zhang +Hugo Chastel +Hugo Gabriel Eyherabide +huqun +Huu Nguyen +Hyzhou Zhy +Iain MacDonald +Iain Samuel McLean Elder +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Grund +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jacopo Rigoli +Jaivish Kothari +Jake Lambert +Jake Sanders +Jake Stokes +Jakub Panek +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Hall +Jason Heiss +Jason Plum +Jay Kamat +Jean Lecordier +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jennings Zhang +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jianyong Wu +Jie Luo +Jilles Oldenbeuving +Jim Chen +Jim Galasyn +Jim Lin +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Abbey +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard +John Howard +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jon Johnson +Jon Zeolla +Jonatas Baldin +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonathan Warriss-Simmons +Jonh Wendell +Jordan Jennings +Jorge Vallecillo +Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julian +Julien Barbier +Julien Kassar +Julien Maitrehenry +Julio Cesar Garcia +Justas Brazauskas +Justin Chadwell +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Kathryn Spiers +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Kelton Bassingthwaite +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Alvarez +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +Kevin Woblick +khaled souf +Kim Eik +Kir Kolyshkin +Kirill A. Korinsky +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Kyle Mitofsky +Lachlan Cooper +Lai Jiangshan +Lajos Papp +Lars Kellogg-Stedman +Laura Brehm +Laura Frank +Laurent Erignoux +Laurent Goderre +Lee Gaines +Lei Jitang +Lennie +lentil32 +Leo Gallucci +Leonid Skorospelov +Lewis Daly +Li Fu Bang +Li Yi +Li Zeghong +Liang-Chi Hsieh +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Lovekesh Kumar +Luca Favatella +Luca Marturana +Lucas Chan +Luis Henrique Mulinari +Luka Hartwig +Lukas Heeren +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Maciej Kalisz +Madhav Puri +Madhu Venugopal +Madhur Batra +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marc Cornellà +Marco Mariani +Marco Spiess +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Ileana +Marius Meschter +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathias Duedahl <64321057+Lussebullen@users.noreply.github.com> +Mathieu Champlon +Mathieu Rollet +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Matthieu MOREL +Mauro Porras P +Max Shytikov +Max-Julian Pogner +Maxime Petazzoni +Maximillian Fan Xavier +Mei ChunTao +Melroy van den Berg +Mert Şişmanoğlu +Metal <2466052+tedhexaflow@users.noreply.github.com> +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael Tews +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Dalton +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Miroslav Gula +Misty Stanley-Jones +Mohammad Banikazemi +Mohammad Hossein +Mohammed Aaqib Ansari +Mohammed Aminu Futa +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Morten Hekkvang +Morten Linderud +Moysés Borges +Mozi <29089388+pzhlkj6612@users.noreply.github.com> +Mrunal Patel +muicoder +Murukesh Mohanan +Muthukumar R +Máximo Cuadros +Mårten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nick Santos +Nick Sieger +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +NinaLua +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Silas +Noah Treuhaft +O.S. Tezer +Oded Arbel +Odin Ugedal +ohmystack +OKA Naoya +Oliver Pomeroy +Olle Jonsson +Olli Janatuinen +Oscar Wieman +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Daigle <114765035+pdaig@users.noreply.github.com> +Patrick Hemmer +Patrick Lang +Patrick St. laurent +Paul +Paul Kehrer +Paul Lietar +Paul Mulders +Paul Rogalski +Paul Seyfert +Paul Weaver +Pavel Pospisil +Paweł Gronowski +Paweł Pokrywka +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Dave Hello +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +Phong Tran +Pieter E Smit +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +QQ喵 +qudongfang +Raghavendra K T +Rahul Kadyan +Rahul Zoldyck +Ravi Shekhar Jethani +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com> +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Rob Murray +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Roch Feuillade +Rodolfo Ortiz +Rogelio Canedo +Rohan Verma +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Rui JingAn +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Sam Thibault +Samarth Shah +Sambuddha Basu +Sami Tabet +Samuel Cochran +Samuel Karp +Sandro Jäckel +Santhosh Manohar +Sarah Sanders +Sargun Dhillon +Saswat Bhattacharya +Saurabh Kumar +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Simon Heimberg +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +Spring Lee +squeegels +Srini Brahmaroutu +Stavros Panakakis +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Stoica-Marcu Floris-Andrei +Stuart Williams +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Takeshi Koenuma +Takuya Noguchi +Taylor Jones +Teiva Harsanyi +Tejaswini Duggaraju +Tengfei Wang +Teppei Fukuda +Thatcher Peskens +Thibault Coupin +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Sampson +Tim Smith +Tim Waugh +Tim Welsh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Bäckman +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulrich Bareth +Ulysses Souza +Umesh Yadav +Vaclav Struhar +Valentin Lorentz +Vardan Pogosian +Venkateswara Reddy Bukkasamudram +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Ville Skyttä +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenlong Zhang +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Will Wang +William Henry +Xianglin Gao +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yucheng Wu +Yue Zhang +Yunxiang Huang +Zachary Romero +Zander Mackie +zebrilee +Zeel B Patel +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhiwei Liang +Zhou Hao +Zhoulin Xie +Zhu Guihua +Zhuo Zhi +Álex González +Álvaro Lázaro +Átila Camurça Alves +Александр Менщиков <__Singleton__@hackerdom.ru> +徐俊杰 +林博仁 Buo-ren Lin diff --git a/vendor/github.com/docker/cli/LICENSE b/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 000000000..9c8e20ab8 --- /dev/null +++ b/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 000000000..1c40faaec --- /dev/null +++ b/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go new file mode 100644 index 000000000..5a6378050 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -0,0 +1,176 @@ +package config + +import ( + "fmt" + "io" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" +) + +const ( + // EnvOverrideConfigDir is the name of the environment variable that can be + // used to override the location of the client configuration files (~/.docker). + // + // It takes priority over the default, but can be overridden by the "--config" + // command line option. + EnvOverrideConfigDir = "DOCKER_CONFIG" + + // ConfigFileName is the name of the client configuration file inside the + // config-directory. + ConfigFileName = "config.json" + configFileDir = ".docker" + contextsDir = "contexts" +) + +var ( + initConfigDir = new(sync.Once) + configDir string +) + +// resetConfigDir is used in testing to reset the "configDir" package variable +// and its sync.Once to force re-lookup between tests. +func resetConfigDir() { + configDir = "" + initConfigDir = new(sync.Once) +} + +// getHomeDir returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// On non-Windows platforms, it falls back to nss lookups, if the home +// directory cannot be obtained from environment-variables. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +// +// getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker +// as dependency for consumers that only need to read the config-file. +// +// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get +func getHomeDir() string { + home, _ := os.UserHomeDir() + if home == "" && runtime.GOOS != "windows" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// Provider defines an interface for providing the CLI config. +type Provider interface { + ConfigFile() *configfile.ConfigFile +} + +// Dir returns the directory the configuration file is stored in +func Dir() string { + initConfigDir.Do(func() { + configDir = os.Getenv(EnvOverrideConfigDir) + if configDir == "" { + configDir = filepath.Join(getHomeDir(), configFileDir) + } + }) + return configDir +} + +// ContextStoreDir returns the directory the docker contexts are stored in +func ContextStoreDir() string { + return filepath.Join(Dir(), contextsDir) +} + +// SetDir sets the directory the configuration file is stored in +func SetDir(dir string) { + // trigger the sync.Once to synchronise with Dir() + initConfigDir.Do(func() {}) + configDir = filepath.Clean(dir) +} + +// Path returns the path to a file relative to the config dir +func Path(p ...string) (string, error) { + path := filepath.Join(append([]string{Dir()}, p...)...) + if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { + return "", fmt.Errorf("path %q is outside of root config directory %q", path, Dir()) + } + return path, nil +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader. It returns an error if configData is malformed. +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// Load reads the configuration file ([ConfigFileName]) from the given directory. +// If no directory is given, it uses the default [Dir]. A [*configfile.ConfigFile] +// is returned containing the contents of the configuration file, or a default +// struct if no configfile exists in the given location. +// +// Load returns an error if a configuration file exists in the given location, +// but cannot be read, or is malformed. Consumers must handle errors to prevent +// overwriting an existing configuration file. +func Load(configDir string) (*configfile.ConfigFile, error) { + if configDir == "" { + configDir = Dir() + } + return load(configDir) +} + +func load(configDir string) (*configfile.ConfigFile, error) { + filename := filepath.Join(configDir, ConfigFileName) + configFile := configfile.New(filename) + + file, err := os.Open(filename) + if err != nil { + if os.IsNotExist(err) { + // It is OK for no configuration file to be present, in which + // case we return a default struct. + return configFile, nil + } + // Any other error happening when failing to read the file must be returned. + return configFile, fmt.Errorf("loading config file: %w", err) + } + defer func() { _ = file.Close() }() + err = configFile.LoadFromReader(file) + if err != nil { + err = fmt.Errorf("parsing config file (%s): %w", filename, err) + } + return configFile, err +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// a reference to the ConfigFile struct. If none is found or when failing to load +// the configuration file, it initializes a default ConfigFile struct. If no +// credentials-store is set in the configuration file, it attempts to discover +// the default store to use for the current platform. +// +// Important: LoadDefaultConfigFile prints a warning to stderr when failing to +// load the configuration file, but otherwise ignores errors. Consumers should +// consider using [Load] (and [credentials.DetectDefaultStore]) to detect errors +// when updating the configuration file, to prevent discarding a (malformed) +// configuration file. +func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { + configFile, err := load(Dir()) + if err != nil { + // FIXME(thaJeztah): we should not proceed here to prevent overwriting existing (but malformed) config files; see https://github.com/docker/cli/issues/5075 + _, _ = fmt.Fprintln(stderr, "WARNING: Error", err) + } + if !configFile.ContainsAuth() { + configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) + } + return configFile +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go new file mode 100644 index 000000000..fab3ed4cb --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -0,0 +1,441 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/memorystore" + "github.com/docker/cli/cli/config/types" + "github.com/sirupsen/logrus" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + PluginsFormat string `json:"pluginsFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` + ServicesFormat string `json:"servicesFormat,omitempty"` + TasksFormat string `json:"tasksFormat,omitempty"` + SecretFormat string `json:"secretFormat,omitempty"` + ConfigFormat string `json:"configFormat,omitempty"` + NodesFormat string `json:"nodesFormat,omitempty"` + PruneFilters []string `json:"pruneFilters,omitempty"` + Proxies map[string]ProxyConfig `json:"proxies,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` + Plugins map[string]map[string]string `json:"plugins,omitempty"` + Aliases map[string]string `json:"aliases,omitempty"` + Features map[string]string `json:"features,omitempty"` +} + +type configEnvAuth struct { + Auth string `json:"auth"` +} + +type configEnv struct { + AuthConfigs map[string]configEnvAuth `json:"auths"` +} + +// DockerEnvConfigKey is an environment variable that contains a JSON encoded +// credential config. It only supports storing the credentials as a base64 +// encoded string in the format base64("username:pat"). +// +// Adding additional fields will produce a parsing error. +// +// Example: +// +// { +// "auths": { +// "example.test": { +// "auth": base64-encoded-username-pat +// } +// } +// } +const DockerEnvConfigKey = "DOCKER_AUTH_CONFIG" + +// ProxyConfig contains proxy configuration settings +type ProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` + AllProxy string `json:"allProxy,omitempty"` +} + +// New initializes an empty configuration file for the given filename 'fn' +func New(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + Plugins: make(map[string]map[string]string), + Aliases: make(map[string]string), + } +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + if ac.Auth != "" { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return nil +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// GetAuthConfigs returns the mapping of repo to auth configuration +func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { + if configFile.AuthConfigs == nil { + configFile.AuthConfigs = make(map[string]types.AuthConfig) + } + return configFile.AuthConfigs +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + // User-Agent header is automatically set, and should not be stored in the configuration + for v := range configFile.HTTPHeaders { + if strings.EqualFold(v, "User-Agent") { + delete(configFile.HTTPHeaders, v) + } + } + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() (retErr error) { + if configFile.Filename == "" { + return errors.New("can't save config with empty filename") + } + + dir := filepath.Dir(configFile.Filename) + if err := os.MkdirAll(dir, 0o700); err != nil { + return err + } + temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename)) + if err != nil { + return err + } + defer func() { + // ignore error as the file may already be closed when we reach this. + _ = temp.Close() + if retErr != nil { + if err := os.Remove(temp.Name()); err != nil { + logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") + } + } + }() + + err = configFile.SaveToWriter(temp) + if err != nil { + return err + } + + if err := temp.Close(); err != nil { + return fmt.Errorf("error closing temp file: %w", err) + } + + // Handle situation where the configfile is a symlink, and allow for dangling symlinks + cfgFile := configFile.Filename + if f, err := filepath.EvalSymlinks(cfgFile); err == nil { + cfgFile = f + } else if os.IsNotExist(err) { + // extract the path from the error if the configfile does not exist or is a dangling symlink + var pathError *os.PathError + if errors.As(err, &pathError) { + cfgFile = pathError.Path + } + } + + // Try copying the current config file (if any) ownership and permissions + copyFilePermissions(cfgFile, temp.Name()) + return os.Rename(temp.Name(), cfgFile) +} + +// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and +// then checking this against any environment variables provided to the container +func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { + var cfgKey string + + if _, ok := configFile.Proxies[host]; !ok { + cfgKey = "default" + } else { + cfgKey = host + } + + config := configFile.Proxies[cfgKey] + permitted := map[string]*string{ + "HTTP_PROXY": &config.HTTPProxy, + "HTTPS_PROXY": &config.HTTPSProxy, + "NO_PROXY": &config.NoProxy, + "FTP_PROXY": &config.FTPProxy, + "ALL_PROXY": &config.AllProxy, + } + m := runOpts + if m == nil { + m = make(map[string]*string) + } + for k := range permitted { + if *permitted[k] == "" { + continue + } + if _, ok := m[k]; !ok { + m[k] = permitted[k] + } + if _, ok := m[strings.ToLower(k)]; !ok { + m[strings.ToLower(k)] = permitted[k] + } + } + return m +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", errors.New("something went wrong decoding auth config") + } + userName, password, ok := strings.Cut(string(decoded), ":") + if !ok || userName == "" { + return "", "", errors.New("invalid auth configuration file") + } + return userName, strings.Trim(password, "\x00"), nil +} + +// GetCredentialsStore returns a new credentials store from the settings in the +// configuration file +func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + store := credentials.NewFileStore(configFile) + + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { + store = newNativeStore(configFile, helper) + } + + envConfig := os.Getenv(DockerEnvConfigKey) + if envConfig == "" { + return store + } + + authConfig, err := parseEnvConfig(envConfig) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) + return store + } + + // use DOCKER_AUTH_CONFIG if set + // it uses the native or file store as a fallback to fetch and store credentials + envStore, err := memorystore.New( + memorystore.WithAuthConfig(authConfig), + memorystore.WithFallbackStore(store), + ) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) + return store + } + + return envStore +} + +func parseEnvConfig(v string) (map[string]types.AuthConfig, error) { + envConfig := &configEnv{} + decoder := json.NewDecoder(strings.NewReader(v)) + decoder.DisallowUnknownFields() + if err := decoder.Decode(envConfig); err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + if decoder.More() { + return nil, errors.New("DOCKER_AUTH_CONFIG does not support more than one JSON object") + } + + authConfigs := make(map[string]types.AuthConfig) + for addr, envAuth := range envConfig.AuthConfigs { + if envAuth.Auth == "" { + return nil, fmt.Errorf("DOCKER_AUTH_CONFIG environment variable is missing key `auth` for %s", addr) + } + username, password, err := decodeAuth(envAuth.Auth) + if err != nil { + return nil, err + } + authConfigs[addr] = types.AuthConfig{ + Username: username, + Password: password, + ServerAddress: addr, + } + } + return authConfigs, nil +} + +// var for unit testing. +var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { + return credentials.NewNativeStore(configFile, helperSuffix) +} + +// GetAuthConfig for a repository from the credential store +func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { + return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { + if c.CredentialHelpers != nil && registryHostname != "" { + if helper, exists := c.CredentialHelpers[registryHostname]; exists { + return helper + } + } + return c.CredentialsStore +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + addAll := func(from map[string]types.AuthConfig) { + for reg, ac := range from { + auths[reg] = ac + } + } + + defaultStore := configFile.GetCredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(newAuths) + + // Auth configs from a registry-specific helper should override those from the default store. + for registryHostname := range configFile.CredentialHelpers { + newAuth, err := configFile.GetAuthConfig(registryHostname) + if err != nil { + // TODO(thaJeztah): use context-logger, so that this output can be suppressed (in tests). + logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname) + continue + } + auths[registryHostname] = newAuth + } + return auths, nil +} + +// GetFilename returns the file name that this config file is based on. +func (configFile *ConfigFile) GetFilename() string { + return configFile.Filename +} + +// PluginConfig retrieves the requested option for the given plugin. +func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { + if configFile.Plugins == nil { + return "", false + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + return "", false + } + value, ok := pluginConfig[option] + return value, ok +} + +// SetPluginConfig sets the option to the given value for the given +// plugin. Passing a value of "" will remove the option. If removing +// the final config item for a given plugin then also cleans up the +// overall plugin entry. +func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { + if configFile.Plugins == nil { + configFile.Plugins = make(map[string]map[string]string) + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + pluginConfig = make(map[string]string) + configFile.Plugins[pluginname] = pluginConfig + } + if value != "" { + pluginConfig[option] = value + } else { + delete(pluginConfig, option) + } + if len(pluginConfig) == 0 { + delete(configFile.Plugins, pluginname) + } +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go new file mode 100644 index 000000000..06b811e7d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -0,0 +1,35 @@ +//go:build !windows + +package configfile + +import ( + "os" + "syscall" +) + +// copyFilePermissions copies file ownership and permissions from "src" to "dst", +// ignoring any error during the process. +func copyFilePermissions(src, dst string) { + var ( + mode os.FileMode = 0o600 + uid, gid int + ) + + fi, err := os.Stat(src) + if err != nil { + return + } + if fi.Mode().IsRegular() { + mode = fi.Mode() + } + if err := os.Chmod(dst, mode); err != nil { + return + } + + uid = int(fi.Sys().(*syscall.Stat_t).Uid) + gid = int(fi.Sys().(*syscall.Stat_t).Gid) + + if uid > 0 && gid > 0 { + _ = os.Chown(dst, uid, gid) + } +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go new file mode 100644 index 000000000..42fffc39a --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go @@ -0,0 +1,5 @@ +package configfile + +func copyFilePermissions(src, dst string) { + // TODO implement for Windows +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go new file mode 100644 index 000000000..28d58ec48 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go new file mode 100644 index 000000000..a36afc41f --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -0,0 +1,22 @@ +package credentials + +import "os/exec" + +// DetectDefaultStore return the default credentials store for the platform if +// no user-defined store is passed, and the store executable is available. +func DetectDefaultStore(store string) string { + if store != "" { + // use user-defined + return store + } + + platformDefault := defaultCredentialsStore() + if platformDefault == "" { + return "" + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err != nil { + return "" + } + return platformDefault +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go new file mode 100644 index 000000000..5d42dec62 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "osxkeychain" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go new file mode 100644 index 000000000..a9012c6d4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go @@ -0,0 +1,13 @@ +package credentials + +import ( + "os/exec" +) + +func defaultCredentialsStore() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go new file mode 100644 index 000000000..40c16eb83 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -0,0 +1,7 @@ +//go:build !windows && !darwin && !linux + +package credentials + +func defaultCredentialsStore() string { + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go new file mode 100644 index 000000000..bb799ca61 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "wincred" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go new file mode 100644 index 000000000..c69312b01 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -0,0 +1,118 @@ +package credentials + +import ( + "fmt" + "net" + "net/url" + "os" + "strings" + "sync/atomic" + + "github.com/docker/cli/cli/config/types" +) + +type store interface { + Save() error + GetAuthConfigs() map[string]types.AuthConfig + GetFilename() string +} + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file store +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file store) Store { + return &fileStore{file: file} +} + +// Erase removes the given credentials from the file store.This function is +// idempotent and does not update the file if credentials did not change. +func (c *fileStore) Erase(serverAddress string) error { + if _, exists := c.file.GetAuthConfigs()[serverAddress]; !exists { + // nothing to do; no credentials found for the given serverAddress + return nil + } + delete(c.file.GetAuthConfigs(), serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.GetAuthConfigs()[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.GetAuthConfigs() { + if serverAddress == ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.GetAuthConfigs(), nil +} + +// unencryptedWarning warns the user when using an insecure credential storage. +// After a deprecation period, user will get prompted if stdin and stderr are a terminal. +// Otherwise, we'll assume they want it (sadly), because people may have been scripting +// insecure logins and we don't want to break them. Maybe they'll see the warning in their +// logs and fix things. +const unencryptedWarning = ` +WARNING! Your credentials are stored unencrypted in '%s'. +Configure a credential helper to remove this warning. See +https://docs.docker.com/go/credential-store/ +` + +// alreadyPrinted ensures that we only print the unencryptedWarning once per +// CLI invocation (no need to warn the user multiple times per command). +var alreadyPrinted atomic.Bool + +// Store saves the given credentials in the file store. This function is +// idempotent and does not update the file if credentials did not change. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + authConfigs := c.file.GetAuthConfigs() + if oldAuthConfig, ok := authConfigs[authConfig.ServerAddress]; ok && oldAuthConfig == authConfig { + // Credentials didn't change, so skip updating the configuration file. + return nil + } + authConfigs[authConfig.ServerAddress] = authConfig + if err := c.file.Save(); err != nil { + return err + } + + if !alreadyPrinted.Load() && authConfig.Password != "" { + // Display a warning if we're storing the users password (not a token). + // + // FIXME(thaJeztah): make output configurable instead of hardcoding to os.Stderr + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf(unencryptedWarning, c.file.GetFilename())) + alreadyPrinted.Store(true) + } + + return nil +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. +func ConvertToHostname(maybeURL string) string { + stripped := maybeURL + if strings.Contains(stripped, "://") { + u, err := url.Parse(stripped) + if err == nil && u.Hostname() != "" { + if u.Port() == "" { + return u.Hostname() + } + return net.JoinHostPort(u.Hostname(), u.Port()) + } + } + hostName, _, _ := strings.Cut(stripped, "/") + return hostName +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go new file mode 100644 index 000000000..b9af145b9 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -0,0 +1,147 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" //nolint:gosec // ignore G101: Potential hardcoded credentials + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file store, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + auth.ServerAddress = creds.ServerAddress + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + if ac.ServerAddress == "" { + ac.ServerAddress = creds.ServerAddress + } + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keychain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/vendor/github.com/docker/cli/cli/config/memorystore/store.go b/vendor/github.com/docker/cli/cli/config/memorystore/store.go new file mode 100644 index 000000000..f8ec62b95 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/memorystore/store.go @@ -0,0 +1,131 @@ +// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: +//go:build go1.24 + +package memorystore + +import ( + "fmt" + "maps" + "os" + "sync" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" +) + +// notFoundErr is the error returned when a plugin could not be found. +type notFoundErr string + +func (notFoundErr) NotFound() {} + +func (e notFoundErr) Error() string { + return string(e) +} + +var errValueNotFound notFoundErr = "value not found" + +type Config struct { + lock sync.RWMutex + memoryCredentials map[string]types.AuthConfig + fallbackStore credentials.Store +} + +func (e *Config) Erase(serverAddress string) error { + e.lock.Lock() + defer e.lock.Unlock() + delete(e.memoryCredentials, serverAddress) + + if e.fallbackStore != nil { + err := e.fallbackStore.Erase(serverAddress) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) + } + } + + return nil +} + +func (e *Config) Get(serverAddress string) (types.AuthConfig, error) { + e.lock.RLock() + defer e.lock.RUnlock() + authConfig, ok := e.memoryCredentials[serverAddress] + if !ok { + if e.fallbackStore != nil { + return e.fallbackStore.Get(serverAddress) + } + return types.AuthConfig{}, errValueNotFound + } + return authConfig, nil +} + +func (e *Config) GetAll() (map[string]types.AuthConfig, error) { + e.lock.RLock() + defer e.lock.RUnlock() + creds := make(map[string]types.AuthConfig) + + if e.fallbackStore != nil { + fileCredentials, err := e.fallbackStore.GetAll() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) + } else { + creds = fileCredentials + } + } + + maps.Copy(creds, e.memoryCredentials) + return creds, nil +} + +func (e *Config) Store(authConfig types.AuthConfig) error { + e.lock.Lock() + defer e.lock.Unlock() + e.memoryCredentials[authConfig.ServerAddress] = authConfig + + if e.fallbackStore != nil { + return e.fallbackStore.Store(authConfig) + } + return nil +} + +// WithFallbackStore sets a fallback store. +// +// Write operations will be performed on both the memory store and the +// fallback store. +// +// Read operations will first check the memory store, and if the credential +// is not found, it will then check the fallback store. +// +// Retrieving all credentials will return from both the memory store and the +// fallback store, merging the results from both stores into a single map. +// +// Data stored in the memory store will take precedence over data in the +// fallback store. +func WithFallbackStore(store credentials.Store) Options { + return func(s *Config) error { + s.fallbackStore = store + return nil + } +} + +// WithAuthConfig allows to set the initial credentials in the memory store. +func WithAuthConfig(config map[string]types.AuthConfig) Options { + return func(s *Config) error { + s.memoryCredentials = config + return nil + } +} + +type Options func(*Config) error + +// New creates a new in memory credential store +func New(opts ...Options) (credentials.Store, error) { + m := &Config{ + memoryCredentials: make(map[string]types.AuthConfig), + } + for _, opt := range opts { + if err := opt(m); err != nil { + return nil, err + } + } + return m, nil +} diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 000000000..9fe90003b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,17 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d169f2f14..a05794326 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -96,6 +96,13 @@ github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference +# github.com/docker/cli v29.1.4+incompatible +## explicit +github.com/docker/cli/cli/config +github.com/docker/cli/cli/config/configfile +github.com/docker/cli/cli/config/credentials +github.com/docker/cli/cli/config/memorystore +github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.3+incompatible ## explicit github.com/docker/distribution/registry/api/errcode From 527f13162e5dd7c8fd4f02129431af7630fa76fc Mon Sep 17 00:00:00 2001 From: CrazyMax <1951866+crazy-max@users.noreply.github.com> Date: Mon, 23 Mar 2026 23:25:13 +0100 Subject: [PATCH 3/4] registry: replace go.podman.io/image/v5 with regclient --- .github/workflows/e2e.yml | 2 + go.mod | 26 +- go.sum | 65 +- internal/app/job.go | 42 +- internal/grpc/notif.go | 11 +- internal/logging/ants.go | 13 + internal/logging/logger.go | 37 - internal/logging/regclient.go | 223 + internal/logging/regclient_test.go | 81 + internal/provider/file/image.go | 14 +- pkg/registry/auth.go | 52 - pkg/registry/auth_test.go | 46 - pkg/registry/image.go | 20 +- pkg/registry/image_test.go | 77 +- pkg/registry/manifest.go | 186 +- pkg/registry/manifest_test.go | 507 +- pkg/registry/platform.go | 24 + pkg/registry/platform_test.go | 29 + pkg/registry/ref.go | 66 - pkg/registry/ref_test.go | 68 - pkg/registry/registry.go | 58 +- pkg/registry/registry_test.go | 81 +- pkg/registry/tags.go | 28 +- pkg/registry/tags_test.go | 198 +- .../containers/libtrust/CODE-OF-CONDUCT.md | 3 - .../containers/libtrust/SECURITY.md | 3 - .../containers/libtrust/ec_key_no_openssl.go | 23 - .../containers/libtrust/ec_key_openssl.go | 24 - vendor/github.com/containers/ocicrypt/LICENSE | 189 - .../containers/ocicrypt/spec/spec.go | 20 - vendor/github.com/docker/cli/AUTHORS | 945 ---- vendor/github.com/docker/cli/LICENSE | 191 - vendor/github.com/docker/cli/NOTICE | 19 - .../docker/cli/cli/config/config.go | 176 - .../docker/cli/cli/config/configfile/file.go | 441 -- .../cli/cli/config/configfile/file_unix.go | 35 - .../cli/cli/config/configfile/file_windows.go | 5 - .../cli/cli/config/credentials/credentials.go | 17 - .../cli/config/credentials/default_store.go | 22 - .../credentials/default_store_darwin.go | 5 - .../config/credentials/default_store_linux.go | 13 - .../credentials/default_store_unsupported.go | 7 - .../credentials/default_store_windows.go | 5 - .../cli/cli/config/credentials/file_store.go | 118 - .../cli/config/credentials/native_store.go | 147 - .../cli/cli/config/memorystore/store.go | 131 - .../docker/cli/cli/config/types/authconfig.go | 17 - vendor/github.com/docker/distribution/LICENSE | 202 - .../registry/api/errcode/errors.go | 267 -- .../registry/api/errcode/handler.go | 40 - .../registry/api/errcode/register.go | 138 - .../registry/api/v2/descriptors.go | 1613 ------- .../distribution/registry/api/v2/doc.go | 9 - .../distribution/registry/api/v2/errors.go | 145 - .../registry/api/v2/headerparser.go | 161 - .../distribution/registry/api/v2/routes.go | 40 - .../distribution/registry/api/v2/urls.go | 254 - .../client/client.go | 114 - .../client/command.go | 57 - .../credentials/credentials.go | 209 - .../credentials/error.go | 124 - .../credentials/helper.go | 14 - .../credentials/version.go | 16 - .../libtrust/CONTRIBUTING.md | 0 .../{containers => docker}/libtrust/LICENSE | 0 .../libtrust/MAINTAINERS | 0 .../{containers => docker}/libtrust/README.md | 0 .../libtrust/certificates.go | 0 .../{containers => docker}/libtrust/doc.go | 0 .../{containers => docker}/libtrust/ec_key.go | 10 +- .../{containers => docker}/libtrust/filter.go | 0 .../{containers => docker}/libtrust/hash.go | 0 .../libtrust/jsonsign.go | 0 .../{containers => docker}/libtrust/key.go | 0 .../libtrust/key_files.go | 0 .../libtrust/key_manager.go | 0 .../libtrust/rsa_key.go | 0 .../{containers => docker}/libtrust/util.go | 0 vendor/github.com/gorilla/mux/.editorconfig | 20 - vendor/github.com/gorilla/mux/.gitignore | 1 - vendor/github.com/gorilla/mux/Makefile | 34 - vendor/github.com/gorilla/mux/README.md | 812 ---- vendor/github.com/gorilla/mux/doc.go | 305 -- vendor/github.com/gorilla/mux/middleware.go | 74 - vendor/github.com/gorilla/mux/mux.go | 608 --- vendor/github.com/gorilla/mux/regexp.go | 388 -- vendor/github.com/gorilla/mux/route.go | 765 --- vendor/github.com/gorilla/mux/test_helpers.go | 19 - .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 123 + .../sys/user => klauspost/compress}/LICENSE | 104 +- .../github.com/klauspost/compress/README.md | 693 +++ .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 167 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 +++ .../klauspost/compress/fse/decompress.go | 376 ++ .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 224 + .../klauspost/compress/huff0/bitwriter.go | 102 + .../klauspost/compress/huff0/compress.go | 742 +++ .../klauspost/compress/huff0/decompress.go | 1161 +++++ .../compress/huff0/decompress_amd64.go | 223 + .../compress/huff0/decompress_amd64.s | 830 ++++ .../compress/huff0/decompress_generic.go | 299 ++ .../klauspost/compress/huff0/huff0.go | 337 ++ .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../klauspost/compress/internal/le/le.go | 5 + .../compress/internal/le/unsafe_disabled.go | 42 + .../compress/internal/le/unsafe_enabled.go | 52 + .../compress/internal/snapref}/LICENSE | 8 +- .../compress/internal/snapref/decode.go | 264 ++ .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 291 ++ .../compress/internal/snapref/encode_other.go | 250 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 3 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 ++ .../klauspost/compress/zstd/bitreader.go | 135 + .../klauspost/compress/zstd/bitwriter.go | 112 + .../klauspost/compress/zstd/blockdec.go | 712 +++ .../klauspost/compress/zstd/blockenc.go | 892 ++++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 261 ++ .../klauspost/compress/zstd/decoder.go | 957 ++++ .../compress/zstd/decoder_options.go | 213 + .../klauspost/compress/zstd/dict.go | 559 +++ .../klauspost/compress/zstd/enc_base.go | 171 + .../klauspost/compress/zstd/enc_best.go | 549 +++ .../klauspost/compress/zstd/enc_better.go | 1234 +++++ .../klauspost/compress/zstd/enc_dfast.go | 1105 +++++ .../klauspost/compress/zstd/enc_fast.go | 873 ++++ .../klauspost/compress/zstd/encoder.go | 658 +++ .../compress/zstd/encoder_options.go | 377 ++ .../klauspost/compress/zstd/framedec.go | 412 ++ .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 ++ .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 73 + .../klauspost/compress/zstd/fse_encoder.go | 701 +++ .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../zstd/internal/xxhash/LICENSE.txt} | 14 +- .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 66 + .../compress/zstd/matchlen_generic.go | 38 + .../klauspost/compress/zstd/seqdec.go | 500 ++ .../klauspost/compress/zstd/seqdec_amd64.go | 388 ++ .../klauspost/compress/zstd/seqdec_amd64.s | 4151 +++++++++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 112 + .../klauspost/compress/zstd/simple_go124.go | 56 + .../klauspost/compress/zstd/snappy.go | 434 ++ .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 126 + .../moby/sys/capability/CHANGELOG.md | 124 - vendor/github.com/moby/sys/capability/LICENSE | 25 - .../github.com/moby/sys/capability/README.md | 13 - .../moby/sys/capability/capability.go | 176 - .../moby/sys/capability/capability_linux.go | 591 --- .../moby/sys/capability/capability_noop.go | 46 - vendor/github.com/moby/sys/capability/enum.go | 330 -- .../moby/sys/capability/enum_gen.go | 137 - .../moby/sys/capability/syscall_linux.go | 161 - vendor/github.com/moby/sys/mountinfo/LICENSE | 202 - vendor/github.com/moby/sys/mountinfo/doc.go | 44 - .../moby/sys/mountinfo/mounted_linux.go | 101 - .../moby/sys/mountinfo/mounted_unix.go | 53 - .../moby/sys/mountinfo/mountinfo.go | 67 - .../moby/sys/mountinfo/mountinfo_bsd.go | 56 - .../moby/sys/mountinfo/mountinfo_filters.go | 63 - .../sys/mountinfo/mountinfo_freebsdlike.go | 14 - .../moby/sys/mountinfo/mountinfo_linux.go | 250 - .../moby/sys/mountinfo/mountinfo_openbsd.go | 11 - .../sys/mountinfo/mountinfo_unsupported.go | 19 - .../moby/sys/mountinfo/mountinfo_windows.go | 10 - vendor/github.com/moby/sys/user/idtools.go | 141 - .../github.com/moby/sys/user/idtools_unix.go | 143 - .../moby/sys/user/idtools_windows.go | 13 - .../github.com/moby/sys/user/lookup_unix.go | 157 - vendor/github.com/moby/sys/user/user.go | 604 --- .../github.com/moby/sys/user/user_fuzzer.go | 43 - .../runtime-spec/specs-go/config.go | 1067 ----- .../runtime-spec/specs-go/state.go | 56 - .../runtime-spec/specs-go/version.go | 18 - .../regclient/regclient/.dockerignore | 15 + .../github.com/regclient/regclient/.gitignore | 5 + .../regclient/regclient/.markdownlint.yml | 19 + .../regclient/regclient/.osv-scanner.toml | 1 + .../regclient/regclient/.version-bump.lock | 53 + .../regclient/regclient/.version-bump.yml | 346 ++ .../regclient/regclient/CODE_OF_CONDUCT.md | 134 + .../regclient/regclient/CONTRIBUTING.md | 80 + .../regclient}/LICENSE | 2 +- .../github.com/regclient/regclient/Makefile | 285 ++ .../github.com/regclient/regclient/README.md | 116 + .../regclient/regclient/SECURITY.md | 5 + vendor/github.com/regclient/regclient/blob.go | 283 ++ .../regclient/regclient/config/credhelper.go | 100 + .../regclient/regclient/config/docker.go | 210 + .../regclient/regclient/config/host.go | 521 +++ .../github.com/regclient/regclient/image.go | 1905 ++++++++ .../regclient/regclient/internal/auth/auth.go | 916 ++++ .../regclient/internal/auth/error.go | 48 + .../regclient/internal/cache/cache.go | 181 + .../regclient/internal/conffile/conffile.go | 188 + .../internal/conffile/conffile_unix.go | 37 + .../internal/conffile/conffile_windows.go | 31 + .../regclient/internal/httplink/httplink.go | 198 + .../regclient/internal/limitread/limitread.go | 29 + .../regclient/internal/pqueue/pqueue.go | 257 + .../regclient/internal/reghttp/http.go | 975 ++++ .../regclient/internal/reqmeta/data.go | 88 + .../regclient/internal/sloghandle/logrus.go | 125 + .../regclient/internal/strparse/strparse.go | 91 + .../regclient/internal/timejson/timejson.go | 41 + .../regclient/internal/units/size.go | 59 + .../regclient/internal/version/version.go | 32 + .../internal/version/version_buildinfo.go | 74 + .../regclient/internal/version/version_old.go | 37 + .../regclient/regclient/manifest.go | 206 + vendor/github.com/regclient/regclient/ping.go | 18 + .../regclient/pkg/archive/archive.go | 2 + .../regclient/pkg/archive/compress.go | 160 + .../regclient/regclient/pkg/archive/errors.go | 13 + .../regclient/regclient/pkg/archive/tar.go | 170 + .../regclient/regclient/referrer.go | 57 + .../regclient/regclient/regclient.go | 274 ++ .../regclient/regclient/regclient_nowasm.go | 19 + .../github.com/regclient/regclient/release.md | 36 + vendor/github.com/regclient/regclient/repo.go | 33 + .../github.com/regclient/regclient/scheme.go | 33 + .../regclient/regclient/scheme/ocidir/blob.go | 160 + .../regclient/scheme/ocidir/close.go | 117 + .../regclient/scheme/ocidir/manifest.go | 305 ++ .../regclient/scheme/ocidir/ocidir.go | 408 ++ .../regclient/scheme/ocidir/ocidir_nowasm.go | 19 + .../regclient/regclient/scheme/ocidir/ping.go | 29 + .../regclient/scheme/ocidir/referrer.go | 160 + .../regclient/regclient/scheme/ocidir/tag.go | 89 + .../regclient/regclient/scheme/reg/blob.go | 671 +++ .../regclient/scheme/reg/manifest.go | 295 ++ .../regclient/regclient/scheme/reg/ping.go | 39 + .../regclient/scheme/reg/referrer.go | 366 ++ .../regclient/regclient/scheme/reg/reg.go | 272 ++ .../regclient/scheme/reg/reg_nowasm.go | 20 + .../regclient/regclient/scheme/reg/repo.go | 79 + .../regclient/regclient/scheme/reg/tag.go | 343 ++ .../regclient/regclient/scheme/scheme.go | 227 + vendor/github.com/regclient/regclient/tag.go | 40 + .../regclient/regclient/types/annotations.go | 72 + .../regclient/regclient/types/blob/blob.go | 103 + .../regclient/regclient/types/blob/common.go | 68 + .../regclient/types/blob/ociconfig.go | 127 + .../regclient/regclient/types/blob/reader.go | 210 + .../regclient/regclient/types/blob/tar.go | 191 + .../regclient/regclient/types/callback.go | 29 + .../regclient/regclient/types/descriptor.go | 40 + .../regclient/types/descriptor/descriptor.go | 349 ++ .../regclient/regclient/types/doc.go | 3 + .../types/docker/schema1/manifest.go | 135 + .../regclient/types/docker/schema2/doc.go | 2 + .../types/docker/schema2/manifest.go | 29 + .../types/docker/schema2/manifestlist.go | 25 + .../regclient/types/docker/versioned.go | 10 + .../regclient/regclient/types/error.go | 186 + .../regclient/regclient/types/errs/error.go | 91 + .../regclient/types/manifest/common.go | 127 + .../regclient/types/manifest/docker1.go | 246 + .../regclient/types/manifest/docker2.go | 364 ++ .../regclient/types/manifest/manifest.go | 594 +++ .../regclient/types/manifest/oci1.go | 611 +++ .../regclient/regclient/types/mediatype.go | 110 + .../regclient/types/mediatype/mediatype.go | 65 + .../regclient/regclient/types/oci/doc.go | 18 + .../regclient/types/oci/v1/artifact.go | 21 + .../regclient/types/oci/v1/config.go | 141 + .../regclient/regclient/types/oci/v1/doc.go | 18 + .../regclient/regclient/types/oci/v1/index.go | 32 + .../regclient/types/oci/v1/layout.go | 6 + .../regclient/types/oci/v1/manifest.go | 35 + .../regclient/regclient/types/oci/version.go | 8 + .../regclient/regclient/types/ping/ping.go | 13 + .../regclient/types/platform/compare.go | 213 + .../regclient/types/platform/cpuinfo.go | 30 + .../regclient/types/platform/cpuinfo_armx.go | 82 + .../regclient/types/platform/cpuinfo_other.go | 7 + .../regclient/types/platform/cpuinfo_x86.go | 100 + .../regclient/types/platform/cpuinfo_x86.s | 26 + .../regclient/types/platform/os_darwin.go | 7 + .../regclient/types/platform/os_other.go | 7 + .../regclient/types/platform/platform.go | 183 + .../types/platform/platform_other.go | 16 + .../types/platform/platform_windows.go | 23 + .../regclient/regclient/types/ratelimit.go | 8 + .../regclient/regclient/types/ref/ref.go | 316 ++ .../regclient/types/referrer/referrer.go | 160 + .../regclient/types/repo/repolist.go | 136 + .../regclient/regclient/types/slog.go | 8 + .../regclient/regclient/types/tag/gcrlist.go | 98 + .../regclient/regclient/types/tag/tag.go | 2 + .../regclient/regclient/types/tag/taglist.go | 254 + .../regclient/types/warning/warning.go | 77 + vendor/github.com/spf13/pflag/flag.go | 14 +- vendor/github.com/ulikunitz/xz/.gitignore | 28 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 88 + vendor/github.com/ulikunitz/xz/SECURITY.md | 19 + vendor/github.com/ulikunitz/xz/TODO.md | 386 ++ vendor/github.com/ulikunitz/xz/bits.go | 79 + vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/format.go | 721 +++ .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 0 -> 96 bytes vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 + .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 456 ++ .../github.com/ulikunitz/xz/lzma/bintree.go | 522 +++ vendor/github.com/ulikunitz/xz/lzma/bitops.go | 47 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 + .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 ++ .../ulikunitz/xz/lzma/decoderdict.go | 128 + .../ulikunitz/xz/lzma/directcodec.go | 38 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 140 + .../github.com/ulikunitz/xz/lzma/encoder.go | 268 ++ .../ulikunitz/xz/lzma/encoderdict.go | 149 + vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 ++ vendor/github.com/ulikunitz/xz/lzma/header.go | 170 + .../github.com/ulikunitz/xz/lzma/header2.go | 398 ++ .../ulikunitz/xz/lzma/lengthcodec.go | 115 + .../ulikunitz/xz/lzma/literalcodec.go | 125 + .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 55 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 222 + vendor/github.com/ulikunitz/xz/lzma/reader.go | 193 + .../github.com/ulikunitz/xz/lzma/reader2.go | 231 + vendor/github.com/ulikunitz/xz/lzma/state.go | 145 + .../ulikunitz/xz/lzma/treecodecs.go | 133 + vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 + .../github.com/ulikunitz/xz/lzma/writer2.go | 305 ++ vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 + vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/none-check.go | 23 + vendor/github.com/ulikunitz/xz/reader.go | 359 ++ vendor/github.com/ulikunitz/xz/writer.go | 399 ++ vendor/go.podman.io/image/v5/LICENSE | 189 - .../image/v5/docker/body_reader.go | 253 - vendor/go.podman.io/image/v5/docker/cache.go | 23 - .../image/v5/docker/distribution_error.go | 161 - .../image/v5/docker/docker_client.go | 1221 ----- .../image/v5/docker/docker_image.go | 186 - .../image/v5/docker/docker_image_dest.go | 937 ---- .../image/v5/docker/docker_image_src.go | 863 ---- .../image/v5/docker/docker_transport.go | 211 - vendor/go.podman.io/image/v5/docker/errors.go | 102 - .../image/v5/docker/paths_common.go | 5 - .../image/v5/docker/paths_freebsd.go | 5 - .../v5/docker/policyconfiguration/naming.go | 78 - .../image/v5/docker/reference/README.md | 2 - .../image/v5/docker/reference/helpers.go | 42 - .../image/v5/docker/reference/normalize.go | 181 - .../image/v5/docker/reference/reference.go | 433 -- .../v5/docker/reference/regexp-additions.go | 6 - .../image/v5/docker/reference/regexp.go | 156 - .../image/v5/docker/registries_d.go | 303 -- .../image/v5/docker/wwwauthenticate.go | 175 - .../internal/blobinfocache/blobinfocache.go | 55 - .../image/v5/internal/blobinfocache/types.go | 81 - .../image/v5/internal/image/docker_list.go | 34 - .../image/v5/internal/image/docker_schema1.go | 257 - .../image/v5/internal/image/docker_schema2.go | 415 -- .../image/v5/internal/image/manifest.go | 121 - .../image/v5/internal/image/memory.go | 64 - .../image/v5/internal/image/oci.go | 336 -- .../image/v5/internal/image/oci_index.go | 34 - .../image/v5/internal/image/sourced.go | 134 - .../image/v5/internal/image/unparsed.go | 125 - .../internal/imagedestination/impl/compat.go | 114 - .../internal/imagedestination/impl/helpers.go | 15 - .../imagedestination/impl/properties.go | 72 - .../stubs/original_oci_config.go | 16 - .../stubs/put_blob_partial.go | 52 - .../imagedestination/stubs/signatures.go | 50 - .../internal/imagedestination/stubs/stubs.go | 27 - .../v5/internal/imagesource/impl/compat.go | 55 - .../internal/imagesource/impl/layer_infos.go | 23 - .../internal/imagesource/impl/properties.go | 27 - .../internal/imagesource/impl/signatures.go | 19 - .../internal/imagesource/stubs/get_blob_at.go | 54 - .../v5/internal/imagesource/stubs/stubs.go | 28 - .../image/v5/internal/imagesource/wrapper.go | 56 - .../image/v5/internal/iolimits/iolimits.go | 58 - .../image/v5/internal/manifest/common.go | 72 - .../v5/internal/manifest/docker_schema2.go | 15 - .../internal/manifest/docker_schema2_list.go | 311 -- .../image/v5/internal/manifest/errors.go | 56 - .../image/v5/internal/manifest/list.go | 133 - .../image/v5/internal/manifest/manifest.go | 228 - .../image/v5/internal/manifest/oci_index.go | 466 -- .../image/v5/internal/multierr/multierr.go | 34 - .../internal/pkg/platform/platform_matcher.go | 223 - .../image/v5/internal/private/private.go | 239 - .../internal/putblobdigest/put_blob_digest.go | 57 - .../image/v5/internal/rootless/rootless.go | 25 - .../go.podman.io/image/v5/internal/set/set.go | 55 - .../image/v5/internal/signature/signature.go | 102 - .../image/v5/internal/signature/sigstore.go | 86 - .../image/v5/internal/signature/simple.go | 29 - .../v5/internal/streamdigest/stream_digest.go | 40 - .../image/v5/internal/tmpdir/tmpdir.go | 44 - .../v5/internal/uploadreader/upload_reader.go | 61 - .../image/v5/internal/useragent/useragent.go | 6 - .../go.podman.io/image/v5/manifest/common.go | 152 - .../image/v5/manifest/docker_schema1.go | 346 -- .../image/v5/manifest/docker_schema2.go | 307 -- .../image/v5/manifest/docker_schema2_list.go | 32 - vendor/go.podman.io/image/v5/manifest/list.go | 35 - .../image/v5/manifest/manifest.go | 173 - vendor/go.podman.io/image/v5/manifest/oci.go | 276 -- .../image/v5/manifest/oci_index.go | 27 - .../image/v5/pkg/blobinfocache/none/none.go | 63 - .../v5/pkg/compression/internal/types.go | 80 - .../image/v5/pkg/compression/types/types.go | 41 - .../image/v5/pkg/docker/config/config.go | 950 ---- .../image/v5/pkg/strslice/README.md | 1 - .../image/v5/pkg/strslice/strslice.go | 30 - .../v5/pkg/sysregistriesv2/paths_common.go | 11 - .../v5/pkg/sysregistriesv2/paths_freebsd.go | 11 - .../v5/pkg/sysregistriesv2/shortnames.go | 353 -- .../sysregistriesv2/system_registries_v2.go | 1083 ----- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 101 - .../go.podman.io/image/v5/transports/stub.go | 36 - .../image/v5/transports/transports.go | 90 - vendor/go.podman.io/image/v5/types/types.go | 735 --- .../go.podman.io/image/v5/version/version.go | 18 - vendor/go.podman.io/storage/AUTHORS | 1523 ------ vendor/go.podman.io/storage/LICENSE | 191 - vendor/go.podman.io/storage/NOTICE | 19 - .../internal/rawfilelock/rawfilelock.go | 64 - .../internal/rawfilelock/rawfilelock_unix.go | 49 - .../rawfilelock/rawfilelock_windows.go | 48 - .../storage/pkg/fileutils/exists_freebsd.go | 38 - .../storage/pkg/fileutils/exists_unix.go | 33 - .../storage/pkg/fileutils/exists_windows.go | 18 - .../storage/pkg/fileutils/fileutils.go | 369 -- .../storage/pkg/fileutils/fileutils_darwin.go | 27 - .../pkg/fileutils/fileutils_solaris.go | 7 - .../storage/pkg/fileutils/fileutils_unix.go | 21 - .../pkg/fileutils/fileutils_windows.go | 7 - .../storage/pkg/fileutils/reflink_linux.go | 20 - .../pkg/fileutils/reflink_unsupported.go | 15 - .../storage/pkg/homedir/homedir.go | 37 - .../storage/pkg/homedir/homedir_unix.go | 182 - .../storage/pkg/homedir/homedir_windows.go | 61 - .../storage/pkg/idtools/idtools.go | 620 --- .../storage/pkg/idtools/idtools_supported.go | 97 - .../storage/pkg/idtools/idtools_unix.go | 212 - .../pkg/idtools/idtools_unsupported.go | 11 - .../storage/pkg/idtools/idtools_windows.go | 23 - .../storage/pkg/idtools/parser.go | 59 - .../storage/pkg/idtools/usergroupadd_linux.go | 158 - .../pkg/idtools/usergroupadd_unsupported.go | 12 - .../storage/pkg/idtools/utils_unix.go | 31 - .../storage/pkg/ioutils/buffer.go | 51 - .../storage/pkg/ioutils/bytespipe.go | 184 - .../storage/pkg/ioutils/fswriters.go | 284 -- .../storage/pkg/ioutils/fswriters_linux.go | 23 - .../storage/pkg/ioutils/fswriters_other.go | 25 - .../storage/pkg/ioutils/readers.go | 170 - .../storage/pkg/ioutils/temp_unix.go | 10 - .../storage/pkg/ioutils/temp_windows.go | 18 - .../storage/pkg/ioutils/writeflusher.go | 92 - .../storage/pkg/ioutils/writers.go | 66 - .../storage/pkg/lockfile/lastwrite.go | 82 - .../storage/pkg/lockfile/lockfile.go | 450 -- .../storage/pkg/lockfile/lockfile_unix.go | 66 - .../storage/pkg/lockfile/lockfile_windows.go | 73 - .../storage/pkg/longpath/longpath.go | 26 - .../go.podman.io/storage/pkg/mount/flags.go | 149 - .../storage/pkg/mount/flags_freebsd.go | 48 - .../storage/pkg/mount/flags_linux.go | 87 - .../storage/pkg/mount/flags_unsupported.go | 31 - .../go.podman.io/storage/pkg/mount/mount.go | 110 - .../storage/pkg/mount/mounter_freebsd.go | 62 - .../storage/pkg/mount/mounter_linux.go | 74 - .../storage/pkg/mount/mounter_unsupported.go | 7 - .../storage/pkg/mount/mountinfo.go | 13 - .../storage/pkg/mount/mountinfo_linux.go | 18 - .../storage/pkg/mount/sharedsubtree_linux.go | 64 - .../storage/pkg/mount/unmount_unix.go | 34 - .../storage/pkg/mount/unmount_unsupported.go | 7 - .../go.podman.io/storage/pkg/reexec/README.md | 5 - .../storage/pkg/reexec/command_freebsd.go | 37 - .../storage/pkg/reexec/command_linux.go | 34 - .../storage/pkg/reexec/command_unix.go | 32 - .../storage/pkg/reexec/command_unsupported.go | 20 - .../storage/pkg/reexec/command_windows.go | 34 - .../go.podman.io/storage/pkg/reexec/reexec.go | 66 - .../go.podman.io/storage/pkg/regexp/regexp.go | 234 - .../pkg/regexp/regexp_dontprecompile.go | 5 - .../storage/pkg/regexp/regexp_precompile.go | 5 - .../go.podman.io/storage/pkg/system/chmod.go | 17 - .../storage/pkg/system/chtimes.go | 35 - .../storage/pkg/system/chtimes_unix.go | 14 - .../storage/pkg/system/chtimes_windows.go | 28 - .../go.podman.io/storage/pkg/system/errors.go | 8 - .../storage/pkg/system/exitcode.go | 33 - .../storage/pkg/system/extattr_freebsd.go | 93 - .../storage/pkg/system/extattr_unsupported.go | 24 - .../go.podman.io/storage/pkg/system/init.go | 22 - .../storage/pkg/system/init_windows.go | 16 - .../storage/pkg/system/lchflags_bsd.go | 55 - .../go.podman.io/storage/pkg/system/lchown.go | 20 - .../storage/pkg/system/lcow_unix.go | 8 - .../storage/pkg/system/lcow_windows.go | 6 - .../storage/pkg/system/lstat_unix.go | 20 - .../storage/pkg/system/lstat_windows.go | 14 - .../storage/pkg/system/meminfo.go | 17 - .../storage/pkg/system/meminfo_freebsd.go | 85 - .../storage/pkg/system/meminfo_linux.go | 65 - .../storage/pkg/system/meminfo_solaris.go | 129 - .../storage/pkg/system/meminfo_unsupported.go | 8 - .../storage/pkg/system/meminfo_windows.go | 46 - .../go.podman.io/storage/pkg/system/mknod.go | 22 - .../storage/pkg/system/mknod_freebsd.go | 22 - .../storage/pkg/system/mknod_windows.go | 13 - .../go.podman.io/storage/pkg/system/path.go | 20 - .../storage/pkg/system/path_unix.go | 9 - .../storage/pkg/system/path_windows.go | 33 - .../storage/pkg/system/process_unix.go | 24 - vendor/go.podman.io/storage/pkg/system/rm.go | 99 - .../storage/pkg/system/rm_common.go | 9 - .../storage/pkg/system/rm_freebsd.go | 17 - .../storage/pkg/system/stat_common.go | 11 - .../storage/pkg/system/stat_darwin.go | 15 - .../storage/pkg/system/stat_freebsd.go | 28 - .../storage/pkg/system/stat_linux.go | 22 - .../storage/pkg/system/stat_netbsd.go | 15 - .../storage/pkg/system/stat_openbsd.go | 15 - .../storage/pkg/system/stat_solaris.go | 15 - .../storage/pkg/system/stat_unix.go | 87 - .../storage/pkg/system/stat_windows.go | 74 - .../storage/pkg/system/syscall_unix.go | 26 - .../storage/pkg/system/syscall_windows.go | 127 - .../go.podman.io/storage/pkg/system/umask.go | 13 - .../storage/pkg/system/umask_windows.go | 9 - .../storage/pkg/system/utimes_freebsd.go | 25 - .../storage/pkg/system/utimes_linux.go | 25 - .../storage/pkg/system/utimes_unsupported.go | 10 - .../storage/pkg/system/xattrs_darwin.go | 84 - .../storage/pkg/system/xattrs_freebsd.go | 83 - .../storage/pkg/system/xattrs_linux.go | 87 - .../storage/pkg/system/xattrs_unsupported.go | 31 - .../storage/pkg/unshare/getenv_linux_cgo.go | 22 - .../storage/pkg/unshare/getenv_linux_nocgo.go | 11 - .../storage/pkg/unshare/unshare.c | 379 -- .../storage/pkg/unshare/unshare.go | 32 - .../storage/pkg/unshare/unshare_cgo.go | 10 - .../storage/pkg/unshare/unshare_darwin.go | 58 - .../storage/pkg/unshare/unshare_freebsd.c | 76 - .../storage/pkg/unshare/unshare_freebsd.go | 178 - .../storage/pkg/unshare/unshare_gccgo.go | 25 - .../storage/pkg/unshare/unshare_linux.go | 755 --- .../pkg/unshare/unshare_unsupported.go | 55 - .../pkg/unshare/unshare_unsupported_cgo.go | 10 - vendor/modules.txt | 155 +- 594 files changed, 53610 insertions(+), 39806 deletions(-) create mode 100644 internal/logging/ants.go create mode 100644 internal/logging/regclient.go create mode 100644 internal/logging/regclient_test.go delete mode 100644 pkg/registry/auth.go delete mode 100644 pkg/registry/auth_test.go create mode 100644 pkg/registry/platform.go create mode 100644 pkg/registry/platform_test.go delete mode 100644 pkg/registry/ref.go delete mode 100644 pkg/registry/ref_test.go delete mode 100644 vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md delete mode 100644 vendor/github.com/containers/libtrust/SECURITY.md delete mode 100644 vendor/github.com/containers/libtrust/ec_key_no_openssl.go delete mode 100644 vendor/github.com/containers/libtrust/ec_key_openssl.go delete mode 100644 vendor/github.com/containers/ocicrypt/LICENSE delete mode 100644 vendor/github.com/containers/ocicrypt/spec/spec.go delete mode 100644 vendor/github.com/docker/cli/AUTHORS delete mode 100644 vendor/github.com/docker/cli/LICENSE delete mode 100644 vendor/github.com/docker/cli/NOTICE delete mode 100644 vendor/github.com/docker/cli/cli/config/config.go delete mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file.go delete mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file_unix.go delete mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file_windows.go delete mode 100644 vendor/github.com/docker/cli/cli/config/credentials/credentials.go delete mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store.go delete mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go delete mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go delete mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go delete mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go delete mode 100644 vendor/github.com/docker/cli/cli/config/credentials/file_store.go delete mode 100644 vendor/github.com/docker/cli/cli/config/credentials/native_store.go delete mode 100644 vendor/github.com/docker/cli/cli/config/memorystore/store.go delete mode 100644 vendor/github.com/docker/cli/cli/config/types/authconfig.go delete mode 100644 vendor/github.com/docker/distribution/LICENSE delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/descriptors.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/doc.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/errors.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/routes.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/urls.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/client/client.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/client/command.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/error.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/helper.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/version.go rename vendor/github.com/{containers => docker}/libtrust/CONTRIBUTING.md (100%) rename vendor/github.com/{containers => docker}/libtrust/LICENSE (100%) rename vendor/github.com/{containers => docker}/libtrust/MAINTAINERS (100%) rename vendor/github.com/{containers => docker}/libtrust/README.md (100%) rename vendor/github.com/{containers => docker}/libtrust/certificates.go (100%) rename vendor/github.com/{containers => docker}/libtrust/doc.go (100%) rename vendor/github.com/{containers => docker}/libtrust/ec_key.go (98%) rename vendor/github.com/{containers => docker}/libtrust/filter.go (100%) rename vendor/github.com/{containers => docker}/libtrust/hash.go (100%) rename vendor/github.com/{containers => docker}/libtrust/jsonsign.go (100%) rename vendor/github.com/{containers => docker}/libtrust/key.go (100%) rename vendor/github.com/{containers => docker}/libtrust/key_files.go (100%) rename vendor/github.com/{containers => docker}/libtrust/key_manager.go (100%) rename vendor/github.com/{containers => docker}/libtrust/rsa_key.go (100%) rename vendor/github.com/{containers => docker}/libtrust/util.go (100%) delete mode 100644 vendor/github.com/gorilla/mux/.editorconfig delete mode 100644 vendor/github.com/gorilla/mux/.gitignore delete mode 100644 vendor/github.com/gorilla/mux/Makefile delete mode 100644 vendor/github.com/gorilla/mux/README.md delete mode 100644 vendor/github.com/gorilla/mux/doc.go delete mode 100644 vendor/github.com/gorilla/mux/middleware.go delete mode 100644 vendor/github.com/gorilla/mux/mux.go delete mode 100644 vendor/github.com/gorilla/mux/regexp.go delete mode 100644 vendor/github.com/gorilla/mux/route.go delete mode 100644 vendor/github.com/gorilla/mux/test_helpers.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml rename vendor/github.com/{moby/sys/user => klauspost/compress}/LICENSE (67%) create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/le/le.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go rename vendor/github.com/{gorilla/mux => klauspost/compress/internal/snapref}/LICENSE (83%) create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go rename vendor/github.com/{docker/docker-credential-helpers/LICENSE => klauspost/compress/zstd/internal/xxhash/LICENSE.txt} (64%) create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/simple_go124.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go delete mode 100644 vendor/github.com/moby/sys/capability/CHANGELOG.md delete mode 100644 vendor/github.com/moby/sys/capability/LICENSE delete mode 100644 vendor/github.com/moby/sys/capability/README.md delete mode 100644 vendor/github.com/moby/sys/capability/capability.go delete mode 100644 vendor/github.com/moby/sys/capability/capability_linux.go delete mode 100644 vendor/github.com/moby/sys/capability/capability_noop.go delete mode 100644 vendor/github.com/moby/sys/capability/enum.go delete mode 100644 vendor/github.com/moby/sys/capability/enum_gen.go delete mode 100644 vendor/github.com/moby/sys/capability/syscall_linux.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/LICENSE delete mode 100644 vendor/github.com/moby/sys/mountinfo/doc.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_linux.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_unix.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go delete mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go delete mode 100644 vendor/github.com/moby/sys/user/idtools.go delete mode 100644 vendor/github.com/moby/sys/user/idtools_unix.go delete mode 100644 vendor/github.com/moby/sys/user/idtools_windows.go delete mode 100644 vendor/github.com/moby/sys/user/lookup_unix.go delete mode 100644 vendor/github.com/moby/sys/user/user.go delete mode 100644 vendor/github.com/moby/sys/user/user_fuzzer.go delete mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/config.go delete mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/state.go delete mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/version.go create mode 100644 vendor/github.com/regclient/regclient/.dockerignore create mode 100644 vendor/github.com/regclient/regclient/.gitignore create mode 100644 vendor/github.com/regclient/regclient/.markdownlint.yml create mode 100644 vendor/github.com/regclient/regclient/.osv-scanner.toml create mode 100644 vendor/github.com/regclient/regclient/.version-bump.lock create mode 100644 vendor/github.com/regclient/regclient/.version-bump.yml create mode 100644 vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/regclient/regclient/CONTRIBUTING.md rename vendor/github.com/{opencontainers/runtime-spec => regclient/regclient}/LICENSE (99%) create mode 100644 vendor/github.com/regclient/regclient/Makefile create mode 100644 vendor/github.com/regclient/regclient/README.md create mode 100644 vendor/github.com/regclient/regclient/SECURITY.md create mode 100644 vendor/github.com/regclient/regclient/blob.go create mode 100644 vendor/github.com/regclient/regclient/config/credhelper.go create mode 100644 vendor/github.com/regclient/regclient/config/docker.go create mode 100644 vendor/github.com/regclient/regclient/config/host.go create mode 100644 vendor/github.com/regclient/regclient/image.go create mode 100644 vendor/github.com/regclient/regclient/internal/auth/auth.go create mode 100644 vendor/github.com/regclient/regclient/internal/auth/error.go create mode 100644 vendor/github.com/regclient/regclient/internal/cache/cache.go create mode 100644 vendor/github.com/regclient/regclient/internal/conffile/conffile.go create mode 100644 vendor/github.com/regclient/regclient/internal/conffile/conffile_unix.go create mode 100644 vendor/github.com/regclient/regclient/internal/conffile/conffile_windows.go create mode 100644 vendor/github.com/regclient/regclient/internal/httplink/httplink.go create mode 100644 vendor/github.com/regclient/regclient/internal/limitread/limitread.go create mode 100644 vendor/github.com/regclient/regclient/internal/pqueue/pqueue.go create mode 100644 vendor/github.com/regclient/regclient/internal/reghttp/http.go create mode 100644 vendor/github.com/regclient/regclient/internal/reqmeta/data.go create mode 100644 vendor/github.com/regclient/regclient/internal/sloghandle/logrus.go create mode 100644 vendor/github.com/regclient/regclient/internal/strparse/strparse.go create mode 100644 vendor/github.com/regclient/regclient/internal/timejson/timejson.go create mode 100644 vendor/github.com/regclient/regclient/internal/units/size.go create mode 100644 vendor/github.com/regclient/regclient/internal/version/version.go create mode 100644 vendor/github.com/regclient/regclient/internal/version/version_buildinfo.go create mode 100644 vendor/github.com/regclient/regclient/internal/version/version_old.go create mode 100644 vendor/github.com/regclient/regclient/manifest.go create mode 100644 vendor/github.com/regclient/regclient/ping.go create mode 100644 vendor/github.com/regclient/regclient/pkg/archive/archive.go create mode 100644 vendor/github.com/regclient/regclient/pkg/archive/compress.go create mode 100644 vendor/github.com/regclient/regclient/pkg/archive/errors.go create mode 100644 vendor/github.com/regclient/regclient/pkg/archive/tar.go create mode 100644 vendor/github.com/regclient/regclient/referrer.go create mode 100644 vendor/github.com/regclient/regclient/regclient.go create mode 100644 vendor/github.com/regclient/regclient/regclient_nowasm.go create mode 100644 vendor/github.com/regclient/regclient/release.md create mode 100644 vendor/github.com/regclient/regclient/repo.go create mode 100644 vendor/github.com/regclient/regclient/scheme.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/blob.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/close.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/ocidir_nowasm.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/ping.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/tag.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/blob.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/manifest.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/ping.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/referrer.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/reg.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/reg_nowasm.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/repo.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/tag.go create mode 100644 vendor/github.com/regclient/regclient/scheme/scheme.go create mode 100644 vendor/github.com/regclient/regclient/tag.go create mode 100644 vendor/github.com/regclient/regclient/types/annotations.go create mode 100644 vendor/github.com/regclient/regclient/types/blob/blob.go create mode 100644 vendor/github.com/regclient/regclient/types/blob/common.go create mode 100644 vendor/github.com/regclient/regclient/types/blob/ociconfig.go create mode 100644 vendor/github.com/regclient/regclient/types/blob/reader.go create mode 100644 vendor/github.com/regclient/regclient/types/blob/tar.go create mode 100644 vendor/github.com/regclient/regclient/types/callback.go create mode 100644 vendor/github.com/regclient/regclient/types/descriptor.go create mode 100644 vendor/github.com/regclient/regclient/types/descriptor/descriptor.go create mode 100644 vendor/github.com/regclient/regclient/types/doc.go create mode 100644 vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go create mode 100644 vendor/github.com/regclient/regclient/types/docker/schema2/doc.go create mode 100644 vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go create mode 100644 vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go create mode 100644 vendor/github.com/regclient/regclient/types/docker/versioned.go create mode 100644 vendor/github.com/regclient/regclient/types/error.go create mode 100644 vendor/github.com/regclient/regclient/types/errs/error.go create mode 100644 vendor/github.com/regclient/regclient/types/manifest/common.go create mode 100644 vendor/github.com/regclient/regclient/types/manifest/docker1.go create mode 100644 vendor/github.com/regclient/regclient/types/manifest/docker2.go create mode 100644 vendor/github.com/regclient/regclient/types/manifest/manifest.go create mode 100644 vendor/github.com/regclient/regclient/types/manifest/oci1.go create mode 100644 vendor/github.com/regclient/regclient/types/mediatype.go create mode 100644 vendor/github.com/regclient/regclient/types/mediatype/mediatype.go create mode 100644 vendor/github.com/regclient/regclient/types/oci/doc.go create mode 100644 vendor/github.com/regclient/regclient/types/oci/v1/artifact.go create mode 100644 vendor/github.com/regclient/regclient/types/oci/v1/config.go create mode 100644 vendor/github.com/regclient/regclient/types/oci/v1/doc.go create mode 100644 vendor/github.com/regclient/regclient/types/oci/v1/index.go create mode 100644 vendor/github.com/regclient/regclient/types/oci/v1/layout.go create mode 100644 vendor/github.com/regclient/regclient/types/oci/v1/manifest.go create mode 100644 vendor/github.com/regclient/regclient/types/oci/version.go create mode 100644 vendor/github.com/regclient/regclient/types/ping/ping.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/compare.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s create mode 100644 vendor/github.com/regclient/regclient/types/platform/os_darwin.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/os_other.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/platform.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/platform_other.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/platform_windows.go create mode 100644 vendor/github.com/regclient/regclient/types/ratelimit.go create mode 100644 vendor/github.com/regclient/regclient/types/ref/ref.go create mode 100644 vendor/github.com/regclient/regclient/types/referrer/referrer.go create mode 100644 vendor/github.com/regclient/regclient/types/repo/repolist.go create mode 100644 vendor/github.com/regclient/regclient/types/slog.go create mode 100644 vendor/github.com/regclient/regclient/types/tag/gcrlist.go create mode 100644 vendor/github.com/regclient/regclient/types/tag/tag.go create mode 100644 vendor/github.com/regclient/regclient/types/tag/taglist.go create mode 100644 vendor/github.com/regclient/regclient/types/warning/warning.go create mode 100644 vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/SECURITY.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox-check-none.xz create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100644 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/none-check.go create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go delete mode 100644 vendor/go.podman.io/image/v5/LICENSE delete mode 100644 vendor/go.podman.io/image/v5/docker/body_reader.go delete mode 100644 vendor/go.podman.io/image/v5/docker/cache.go delete mode 100644 vendor/go.podman.io/image/v5/docker/distribution_error.go delete mode 100644 vendor/go.podman.io/image/v5/docker/docker_client.go delete mode 100644 vendor/go.podman.io/image/v5/docker/docker_image.go delete mode 100644 vendor/go.podman.io/image/v5/docker/docker_image_dest.go delete mode 100644 vendor/go.podman.io/image/v5/docker/docker_image_src.go delete mode 100644 vendor/go.podman.io/image/v5/docker/docker_transport.go delete mode 100644 vendor/go.podman.io/image/v5/docker/errors.go delete mode 100644 vendor/go.podman.io/image/v5/docker/paths_common.go delete mode 100644 vendor/go.podman.io/image/v5/docker/paths_freebsd.go delete mode 100644 vendor/go.podman.io/image/v5/docker/policyconfiguration/naming.go delete mode 100644 vendor/go.podman.io/image/v5/docker/reference/README.md delete mode 100644 vendor/go.podman.io/image/v5/docker/reference/helpers.go delete mode 100644 vendor/go.podman.io/image/v5/docker/reference/normalize.go delete mode 100644 vendor/go.podman.io/image/v5/docker/reference/reference.go delete mode 100644 vendor/go.podman.io/image/v5/docker/reference/regexp-additions.go delete mode 100644 vendor/go.podman.io/image/v5/docker/reference/regexp.go delete mode 100644 vendor/go.podman.io/image/v5/docker/registries_d.go delete mode 100644 vendor/go.podman.io/image/v5/docker/wwwauthenticate.go delete mode 100644 vendor/go.podman.io/image/v5/internal/blobinfocache/blobinfocache.go delete mode 100644 vendor/go.podman.io/image/v5/internal/blobinfocache/types.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/docker_list.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/docker_schema1.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/docker_schema2.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/manifest.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/memory.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/oci.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/oci_index.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/sourced.go delete mode 100644 vendor/go.podman.io/image/v5/internal/image/unparsed.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagedestination/impl/compat.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagedestination/impl/helpers.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagedestination/impl/properties.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagedestination/stubs/original_oci_config.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagedestination/stubs/put_blob_partial.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagedestination/stubs/signatures.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagedestination/stubs/stubs.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagesource/impl/compat.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagesource/impl/layer_infos.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagesource/impl/properties.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagesource/impl/signatures.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagesource/stubs/get_blob_at.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagesource/stubs/stubs.go delete mode 100644 vendor/go.podman.io/image/v5/internal/imagesource/wrapper.go delete mode 100644 vendor/go.podman.io/image/v5/internal/iolimits/iolimits.go delete mode 100644 vendor/go.podman.io/image/v5/internal/manifest/common.go delete mode 100644 vendor/go.podman.io/image/v5/internal/manifest/docker_schema2.go delete mode 100644 vendor/go.podman.io/image/v5/internal/manifest/docker_schema2_list.go delete mode 100644 vendor/go.podman.io/image/v5/internal/manifest/errors.go delete mode 100644 vendor/go.podman.io/image/v5/internal/manifest/list.go delete mode 100644 vendor/go.podman.io/image/v5/internal/manifest/manifest.go delete mode 100644 vendor/go.podman.io/image/v5/internal/manifest/oci_index.go delete mode 100644 vendor/go.podman.io/image/v5/internal/multierr/multierr.go delete mode 100644 vendor/go.podman.io/image/v5/internal/pkg/platform/platform_matcher.go delete mode 100644 vendor/go.podman.io/image/v5/internal/private/private.go delete mode 100644 vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go delete mode 100644 vendor/go.podman.io/image/v5/internal/rootless/rootless.go delete mode 100644 vendor/go.podman.io/image/v5/internal/set/set.go delete mode 100644 vendor/go.podman.io/image/v5/internal/signature/signature.go delete mode 100644 vendor/go.podman.io/image/v5/internal/signature/sigstore.go delete mode 100644 vendor/go.podman.io/image/v5/internal/signature/simple.go delete mode 100644 vendor/go.podman.io/image/v5/internal/streamdigest/stream_digest.go delete mode 100644 vendor/go.podman.io/image/v5/internal/tmpdir/tmpdir.go delete mode 100644 vendor/go.podman.io/image/v5/internal/uploadreader/upload_reader.go delete mode 100644 vendor/go.podman.io/image/v5/internal/useragent/useragent.go delete mode 100644 vendor/go.podman.io/image/v5/manifest/common.go delete mode 100644 vendor/go.podman.io/image/v5/manifest/docker_schema1.go delete mode 100644 vendor/go.podman.io/image/v5/manifest/docker_schema2.go delete mode 100644 vendor/go.podman.io/image/v5/manifest/docker_schema2_list.go delete mode 100644 vendor/go.podman.io/image/v5/manifest/list.go delete mode 100644 vendor/go.podman.io/image/v5/manifest/manifest.go delete mode 100644 vendor/go.podman.io/image/v5/manifest/oci.go delete mode 100644 vendor/go.podman.io/image/v5/manifest/oci_index.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/blobinfocache/none/none.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/compression/internal/types.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/compression/types/types.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/docker/config/config.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/strslice/README.md delete mode 100644 vendor/go.podman.io/image/v5/pkg/strslice/strslice.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_common.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_freebsd.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/sysregistriesv2/shortnames.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/sysregistriesv2/system_registries_v2.go delete mode 100644 vendor/go.podman.io/image/v5/pkg/tlsclientconfig/tlsclientconfig.go delete mode 100644 vendor/go.podman.io/image/v5/transports/stub.go delete mode 100644 vendor/go.podman.io/image/v5/transports/transports.go delete mode 100644 vendor/go.podman.io/image/v5/types/types.go delete mode 100644 vendor/go.podman.io/image/v5/version/version.go delete mode 100644 vendor/go.podman.io/storage/AUTHORS delete mode 100644 vendor/go.podman.io/storage/LICENSE delete mode 100644 vendor/go.podman.io/storage/NOTICE delete mode 100644 vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go delete mode 100644 vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go delete mode 100644 vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/exists_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/exists_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/exists_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/fileutils.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/fileutils_darwin.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/fileutils_solaris.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/fileutils_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/fileutils_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/reflink_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/fileutils/reflink_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/homedir/homedir.go delete mode 100644 vendor/go.podman.io/storage/pkg/homedir/homedir_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/homedir/homedir_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/idtools.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/idtools_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/parser.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/usergroupadd_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/idtools/utils_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/buffer.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/fswriters.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/fswriters_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/fswriters_other.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/readers.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/temp_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/temp_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/writeflusher.go delete mode 100644 vendor/go.podman.io/storage/pkg/ioutils/writers.go delete mode 100644 vendor/go.podman.io/storage/pkg/lockfile/lastwrite.go delete mode 100644 vendor/go.podman.io/storage/pkg/lockfile/lockfile.go delete mode 100644 vendor/go.podman.io/storage/pkg/lockfile/lockfile_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/lockfile/lockfile_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/longpath/longpath.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/flags.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/flags_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/flags_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/flags_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/mount.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/mounter_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/mounter_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/mountinfo.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/sharedsubtree_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/unmount_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/mount/unmount_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/reexec/README.md delete mode 100644 vendor/go.podman.io/storage/pkg/reexec/command_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/reexec/command_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/reexec/command_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/reexec/command_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/reexec/command_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/reexec/reexec.go delete mode 100644 vendor/go.podman.io/storage/pkg/regexp/regexp.go delete mode 100644 vendor/go.podman.io/storage/pkg/regexp/regexp_dontprecompile.go delete mode 100644 vendor/go.podman.io/storage/pkg/regexp/regexp_precompile.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/chmod.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/chtimes.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/chtimes_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/chtimes_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/errors.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/exitcode.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/extattr_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/extattr_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/init.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/init_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/lchflags_bsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/lchown.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/lcow_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/lcow_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/lstat_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/lstat_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/meminfo.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/meminfo_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/meminfo_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/meminfo_solaris.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/meminfo_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/meminfo_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/mknod.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/mknod_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/mknod_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/path.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/path_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/path_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/process_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/rm.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/rm_common.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/rm_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_common.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_darwin.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_netbsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_openbsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_solaris.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/stat_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/syscall_unix.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/syscall_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/umask.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/umask_windows.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/utimes_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/utimes_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/utimes_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/xattrs_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/system/xattrs_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/getenv_linux_cgo.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/getenv_linux_nocgo.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare.c delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare_cgo.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare_darwin.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.c delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare_gccgo.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare_linux.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported.go delete mode 100644 vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported_cgo.go diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 1d27fb224..38fde9668 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -37,6 +37,8 @@ jobs: fail-fast: false matrix: include: + - folder: docker1 + loglevel: trace - folder: docker1 loglevel: debug - folder: docker2 diff --git a/go.mod b/go.mod index 4c70fd24a..6475d14f2 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/crazy-max/gohealthchecks v0.6.0 github.com/crazy-max/gonfig v0.7.1 github.com/distribution/reference v0.6.0 - github.com/docker/cli v29.1.4+incompatible github.com/docker/docker v28.5.2+incompatible github.com/docker/go-connections v0.6.0 github.com/docker/go-units v0.5.0 @@ -33,13 +32,12 @@ require ( github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 github.com/rabbitmq/amqp091-go v1.10.0 + github.com/regclient/regclient v0.11.2 github.com/rs/zerolog v1.34.0 github.com/russross/blackfriday/v2 v2.1.0 - github.com/sirupsen/logrus v1.9.4 github.com/stretchr/testify v1.11.1 github.com/tidwall/pretty v1.2.1 go.etcd.io/bbolt v1.4.3 - go.podman.io/image/v5 v5.39.1 golang.org/x/mod v0.33.0 golang.org/x/sys v0.42.0 google.golang.org/grpc v1.78.0 @@ -53,7 +51,6 @@ require ( require ( filippo.io/edwards25519 v1.1.1 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.5.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/sprig v2.16.0+incompatible // indirect @@ -67,11 +64,9 @@ require ( github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/ocicrypt v1.2.1 // indirect + github.com/creack/pty v1.1.18 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.5 // indirect + github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/felixge/fgprof v0.9.5 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -100,7 +95,6 @@ require ( github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/css v1.0.1 // indirect - github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/hashicorp/cronexpr v1.1.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -112,6 +106,7 @@ require ( github.com/jaytaylor/html2text v0.0.0-20180606194806-57d518f124b0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.18.4 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -121,23 +116,22 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/atomicwriter v0.1.0 // indirect - github.com/moby/sys/capability v0.4.0 // indirect - github.com/moby/sys/mountinfo v0.7.2 // indirect - github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/olekukonko/tablewriter v0.0.1 // indirect - github.com/opencontainers/runtime-spec v1.3.0 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/spf13/pflag v1.0.9 // indirect + github.com/sirupsen/logrus v1.9.4 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf // indirect github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.2.0 // indirect github.com/tidwall/sjson v1.2.5 // indirect github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/vanng822/css v0.0.0-20190504095207-a21e860bcd04 // indirect github.com/vanng822/go-premailer v0.0.0-20191214114701-be27abe028fe // indirect github.com/x448/float16 v0.8.4 // indirect @@ -147,7 +141,6 @@ require ( go.opentelemetry.io/otel v1.38.0 // indirect go.opentelemetry.io/otel/metric v1.38.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.podman.io/storage v1.62.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.48.0 // indirect @@ -165,6 +158,7 @@ require ( gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect @@ -178,5 +172,3 @@ tool ( google.golang.org/grpc/cmd/protoc-gen-go-grpc google.golang.org/protobuf/cmd/protoc-gen-go ) - -replace google.golang.org/genproto => google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 // https://github.com/crazy-max/diun/pull/1445#issuecomment-3148492660 diff --git a/go.sum b/go.sum index 714755944..ac7991022 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -40,14 +40,10 @@ github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= @@ -67,10 +63,6 @@ github.com/containerd/platforms v1.0.0-rc.2 h1:0SPgaNZPVWGEi4grZdV8VRYQn78y+nm6a github.com/containerd/platforms v1.0.0-rc.2/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= -github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= -github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/crazy-max/cron/v3 v3.1.1 h1:8tsAXjV522daYSaN6/Mb/Nh8X/Ez+nedU0KuPV98rNU= github.com/crazy-max/cron/v3 v3.1.1/go.mod h1:yexi3kKoh3GcnmRCppyJKsvYIBWzDVaym0dwNWo+zdg= @@ -78,28 +70,23 @@ github.com/crazy-max/gohealthchecks v0.6.0 h1:mlYlrYLmwFJJh4Lebw7QXWHo//xgYkw+/X github.com/crazy-max/gohealthchecks v0.6.0/go.mod h1:LlA3nnu+LmJLKG868RcQePFcdsZsSZ2Y/3YlcWUSxGc= github.com/crazy-max/gonfig v0.7.1 h1:cT+Wj7syVnsxmjl+u+Fs/cwZEcorHdGdHgcp3UZNWDE= github.com/crazy-max/gonfig v0.7.1/go.mod h1:csPFrGh/m0nIamCJbah1ZN2/+5s510nQQ7szHsk8HZ0= -github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v29.1.4+incompatible h1:AI8fwZhqsAsrqZnVv9h6lbexeW/LzNTasf6A4vcNN8M= -github.com/docker/cli v29.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY= -github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= -github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/dromara/carbon/v2 v2.6.16 h1:AbxrnW1kJhR3KHdS8G96NFmxDwPFyre+t+xSiJIUD1I= github.com/dromara/carbon/v2 v2.6.16/go.mod h1:NGo3reeV5vhWCYWcSqbJRZm46MEwyfYI5EJRdVFoLJo= github.com/eclipse/paho.mqtt.golang v1.5.1 h1:/VSOv3oDLlpqR2Epjn1Q7b2bSTplJIeV2ISgCl2W7nE= @@ -189,8 +176,6 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= @@ -230,10 +215,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw= -github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= -github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= -github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= +github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -273,14 +256,8 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= -github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk= -github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= -github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= -github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= -github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= -github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -295,6 +272,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nlopes/slack v0.6.0 h1:jt0jxVQGhssx1Ib7naAOZEZcGdtIhTzkP0nopK0AsRA= github.com/nlopes/slack v0.6.0/go.mod h1:JzQ9m3PMAqcpeCam7UaHSuBuupz7CmpjehYMayT6YOk= +github.com/olareg/olareg v0.1.2 h1:75G8X6E9FUlzL/CSjgFcYfMgNzlc7CxULpUUNsZBIvI= +github.com/olareg/olareg v0.1.2/go.mod h1:TWs+N6pO1S4bdB6eerzUm/ITRQ6kw91mVf9ZYeGtw+Y= github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= @@ -305,8 +284,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opencontainers/runtime-spec v1.3.0 h1:YZupQUdctfhpZy3TM39nN9Ika5CBWT5diQ8ibYCRkxg= -github.com/opencontainers/runtime-spec v1.3.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/panjf2000/ants/v2 v2.11.5 h1:a7LMnMEeux/ebqTux140tRiaqcFTV0q2bEHF03nl6Rg= github.com/panjf2000/ants/v2 v2.11.5/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= @@ -320,16 +297,10 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= +github.com/regclient/regclient v0.11.2 h1:BMBxbXpJkia8CPnGTbJoQnt980NDh9dKNFxX57ah1/Q= +github.com/regclient/regclient v0.11.2/go.mod h1:AWbO1F0DJGP7MNlwmDHjYbgOEftZsTB0N0AXT6pN2C4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -346,8 +317,8 @@ github.com/shoenig/test v1.12.2/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczs github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= -github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -380,8 +351,6 @@ github.com/vanng822/css v0.0.0-20190504095207-a21e860bcd04 h1:L0rPdfzq43+NV8rfIx github.com/vanng822/css v0.0.0-20190504095207-a21e860bcd04/go.mod h1:tcnB1voG49QhCrwq1W0w5hhGasvOg+VQp9i9H1rCM1w= github.com/vanng822/go-premailer v0.0.0-20191214114701-be27abe028fe h1:9YnI5plmy+ad6BM+JCLJb2ZV7/TNiE5l7SNKfumYKgc= github.com/vanng822/go-premailer v0.0.0-20191214114701-be27abe028fe/go.mod h1:JTFJA/t820uFDoyPpErFQ3rb3amdZoPtxcKervG0OE4= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -411,10 +380,6 @@ go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJr go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= -go.podman.io/image/v5 v5.39.1 h1:loIw4qHzZzBlUguYZau40u8HbR5MrTPQhwT4Hy6sCm0= -go.podman.io/image/v5 v5.39.1/go.mod h1:SlaR6Pra1ATIx4BcuZ16oafb3QcCHISaKcJbtlN/G/0= -go.podman.io/storage v1.62.0 h1:0QjX1XlzVmbiaulb+aR/CG6p9+pzaqwIeZPe3tEjHbY= -go.podman.io/storage v1.62.0/go.mod h1:A3UBK0XypjNZ6pghRhuxg62+2NIm5lcUGv/7XyMhMUI= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= @@ -467,7 +432,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -510,7 +474,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 h1:qEFnJI6AnfZk0NNe8YTyXQh5i//Zxi4gBHwRgp76qpw= google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101 h1:vk5TfqZHNn0obhPIYeS+cxIFKFQgser/M2jnI+9c6MM= google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101/go.mod h1:E17fc4PDhkr22dE3RgnH2hEubUaky6ZwW4VhANxyspg= google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= diff --git a/internal/app/job.go b/internal/app/job.go index 78bcab370..5dd030f81 100644 --- a/internal/app/job.go +++ b/internal/app/job.go @@ -5,10 +5,11 @@ import ( "regexp" "dario.cat/mergo" + "github.com/crazy-max/diun/v4/internal/logging" "github.com/crazy-max/diun/v4/internal/model" "github.com/crazy-max/diun/v4/pkg/registry" "github.com/crazy-max/diun/v4/pkg/utl" - dockerregistry "github.com/docker/docker/api/types/registry" + regconfig "github.com/regclient/regclient/config" "github.com/rs/zerolog/log" ) @@ -83,33 +84,32 @@ func (di *Diun) createJob(job model.Job) { } } - var auth dockerregistry.AuthConfig + host := regconfig.HostNewName(job.RegImage.Domain) + if host.Name == regconfig.DockerRegistry { + host.ReqConcurrent = 1 + } + if *reg.InsecureTLS { + host.TLS = regconfig.TLSInsecure + } if len(regUser) > 0 { - auth = dockerregistry.AuthConfig{ - Username: regUser, - Password: regPassword, - } - } else { - auth, err = registry.LookupAuth(job.RegImage.Domain) - if err != nil { - sublog.Warn().Err(err).Msg("Error seeking Docker credentials") - } + host.User = regUser + host.Pass = regPassword } - job.Registry, err = registry.New(registry.Options{ - Auth: auth, + platform, err := registry.TargetPlatform(job.Image.Platform.OS, job.Image.Platform.Arch, job.Image.Platform.Variant) + if err != nil { + sublog.Error().Err(err).Msg("Cannot parse image platform") + return + } + + job.Registry = registry.New(registry.Options{ + Host: host, + Platform: platform, + Logger: logging.NewRegclientLogger(sublog), Timeout: *reg.Timeout, - InsecureTLS: *reg.InsecureTLS, UserAgent: di.meta.UserAgent, CompareDigest: *di.cfg.Watch.CompareDigest, - ImageOs: job.Image.Platform.OS, - ImageArch: job.Image.Platform.Arch, - ImageVariant: job.Image.Platform.Variant, }) - if err != nil { - sublog.Error().Err(err).Msg("Cannot create registry client") - return - } di.wg.Add(1) err = di.pool.Invoke(job) diff --git a/internal/grpc/notif.go b/internal/grpc/notif.go index bdef1df7a..ef20c1bdf 100644 --- a/internal/grpc/notif.go +++ b/internal/grpc/notif.go @@ -23,12 +23,11 @@ func (c *Client) NotifTest(_ context.Context, _ *pb.NotifTestRequest) (*pb.Notif Provider: "file", Image: image, Manifest: registry.Manifest{ - Name: "diun/testnotif", - Tag: "latest", - MIMEType: "application/vnd.docker.distribution.manifest.list.v2+json", - Digest: "sha256:216e3ae7de4ca8b553eb11ef7abda00651e79e537e85c46108284e5e91673e01", - Created: &createdAt, - DockerVersion: "", + Name: "diun/testnotif", + Tag: "latest", + MIMEType: "application/vnd.docker.distribution.manifest.list.v2+json", + Digest: "sha256:216e3ae7de4ca8b553eb11ef7abda00651e79e537e85c46108284e5e91673e01", + Created: &createdAt, Labels: map[string]string{ "maintainer": "CrazyMax", "org.label-schema.build-date": "2020-03-26T12:23:56Z", diff --git a/internal/logging/ants.go b/internal/logging/ants.go new file mode 100644 index 000000000..ae1760684 --- /dev/null +++ b/internal/logging/ants.go @@ -0,0 +1,13 @@ +package logging + +import ( + "github.com/rs/zerolog/log" +) + +// AntsLogger is a logger for ants module +type AntsLogger struct{} + +// Printf must have the same semantics as log.Printf +func (w *AntsLogger) Printf(format string, args ...interface{}) { + log.Debug().Msgf(format, args...) +} diff --git a/internal/logging/logger.go b/internal/logging/logger.go index 686e43e05..c109ff57e 100644 --- a/internal/logging/logger.go +++ b/internal/logging/logger.go @@ -1,14 +1,12 @@ package logging import ( - "fmt" "io" "os" "time" "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "github.com/sirupsen/logrus" ) type Options struct { @@ -49,39 +47,4 @@ func Configure(opts Options) { } else { zerolog.SetGlobalLevel(logLevel) } - - logrusLevel, err := logrus.ParseLevel(opts.LogLevel) - if err != nil { - log.Fatal().Err(err).Msgf("Unknown log level") - } else { - logrus.SetLevel(logrusLevel) - } - logrus.SetFormatter(new(LogrusFormatter)) -} - -// LogrusFormatter is a logrus formatter -type LogrusFormatter struct{} - -// Format renders a single log entry from logrus entry to zerolog -func (f *LogrusFormatter) Format(entry *logrus.Entry) ([]byte, error) { - message := fmt.Sprintf("[containers/image] %s", entry.Message) - switch entry.Level { - case logrus.ErrorLevel: - log.Error().Fields(entry.Data).Msg(message) - case logrus.WarnLevel: - log.Warn().Fields(entry.Data).Msg(message) - case logrus.DebugLevel: - log.Debug().Fields(entry.Data).Msg(message) - default: - log.Info().Fields(entry.Data).Msg(message) - } - return nil, nil -} - -// AntsLogger is a logger for ants module -type AntsLogger struct{} - -// Printf must have the same semantics as log.Printf -func (w *AntsLogger) Printf(format string, args ...interface{}) { - log.Debug().Msgf(format, args...) } diff --git a/internal/logging/regclient.go b/internal/logging/regclient.go new file mode 100644 index 000000000..83fcd6079 --- /dev/null +++ b/internal/logging/regclient.go @@ -0,0 +1,223 @@ +package logging + +import ( + "context" + "fmt" + "log/slog" + + regtypes "github.com/regclient/regclient/types" + "github.com/rs/zerolog" +) + +func NewRegclientLogger(logger zerolog.Logger) *slog.Logger { + return slog.New(®clientHandler{logger: logger}) +} + +type regclientHandler struct { + logger zerolog.Logger + attrs []slog.Attr + group string +} + +func (h *regclientHandler) Enabled(_ context.Context, level slog.Level) bool { + current := h.effectiveLevel() + return regclientLevel(current, level) >= current +} + +func (h *regclientHandler) Handle(_ context.Context, record slog.Record) error { + if h.skipMessage(record.Message) { + return nil + } + eventLogger, current := h.eventLogger() + level := regclientLevel(current, record.Level) + event := eventLogger.WithLevel(level) + if event == nil { + return nil + } + if !record.Time.IsZero() { + event.Time(zerolog.TimestampFieldName, record.Time) + } + + fields := h.fields(record) + message := "[regclient] " + record.Message + if record.Message == "reg http request" { + message = h.httpRequestMessage(fields) + } + + includeHeaders := current <= zerolog.TraceLevel + for key, value := range fields { + if !includeHeaders && (key == "req-headers" || key == "resp-headers") { + continue + } + event.Interface(key, value) + } + event.Msg(message) + return nil +} + +func (h *regclientHandler) skipMessage(message string) bool { + switch message { + case "regclient initialized", "Auth request parsed", "Sleeping for backoff": + return true + default: + return false + } +} + +func (h *regclientHandler) effectiveLevel() zerolog.Level { + level := h.logger.GetLevel() + if level == zerolog.NoLevel || zerolog.GlobalLevel() > level { + return zerolog.GlobalLevel() + } + return level +} + +func (h *regclientHandler) eventLogger() (zerolog.Logger, zerolog.Level) { + level := h.effectiveLevel() + if h.logger.GetLevel() != level { + return h.logger.Level(level), level + } + return h.logger, level +} + +func (h *regclientHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + ret := h.clone() + prefix := ret.group + if prefix != "" { + prefix += ":" + } + for _, attr := range attrs { + if attr.Key == "" { + continue + } + ret.attrs = append(ret.attrs, slog.Attr{ + Key: prefix + attr.Key, + Value: attr.Value, + }) + } + return ret +} + +func (h *regclientHandler) WithGroup(name string) slog.Handler { + if name == "" { + return h + } + ret := h.clone() + if ret.group == "" { + ret.group = name + } else { + ret.group += ":" + name + } + return ret +} + +func (h *regclientHandler) clone() *regclientHandler { + attrs := make([]slog.Attr, len(h.attrs)) + copy(attrs, h.attrs) + return ®clientHandler{ + logger: h.logger, + attrs: attrs, + group: h.group, + } +} + +func (h *regclientHandler) fields(record slog.Record) map[string]any { + fields := make(map[string]any, len(h.attrs)) + for _, attr := range h.attrs { + appendAttr(fields, attr, "") + } + record.Attrs(func(attr slog.Attr) bool { + appendAttr(fields, attr, "") + return true + }) + return fields +} + +func appendAttr(fields map[string]any, attr slog.Attr, prefix string) { + if attr.Key == "" { + return + } + key := prefix + attr.Key + if attr.Value.Kind() == slog.KindGroup { + nextPrefix := key + ":" + for _, groupAttr := range attr.Value.Group() { + appendAttr(fields, groupAttr, nextPrefix) + } + return + } + fields[key] = attrValue(attr.Value) +} + +func attrValue(value slog.Value) any { + switch value.Kind() { + case slog.KindString: + return value.String() + case slog.KindInt64: + return value.Int64() + case slog.KindUint64: + return value.Uint64() + case slog.KindFloat64: + return value.Float64() + case slog.KindBool: + return value.Bool() + case slog.KindDuration: + return value.Duration() + case slog.KindTime: + return value.Time() + case slog.KindAny: + return value.Any() + default: + return value.Any() + } +} + +func (h *regclientHandler) httpRequestMessage(fields map[string]any) string { + method, _ := fields["req-method"].(string) + url, _ := fields["req-url"].(string) + status, hasStatus := fields["resp-status"] + errValue, hasErr := fields["err"] + + delete(fields, "req-method") + delete(fields, "req-url") + delete(fields, "resp-status") + delete(fields, "err") + + parts := []string{"[regclient]"} + if method != "" { + parts = append(parts, method) + } + if url != "" { + parts = append(parts, url) + } + if hasStatus { + parts = append(parts, fmt.Sprintf("status=%v", status)) + } + if hasErr { + parts = append(parts, fmt.Sprintf("err=%v", errValue)) + } + message := parts[0] + for _, part := range parts[1:] { + message += " " + part + } + return message +} + +func regclientLevel(current zerolog.Level, level slog.Level) zerolog.Level { + switch { + case level <= regtypes.LevelTrace: + if current <= zerolog.TraceLevel { + return zerolog.TraceLevel + } + return zerolog.DebugLevel + case level <= slog.LevelDebug: + return zerolog.DebugLevel + case level <= slog.LevelInfo: + return zerolog.InfoLevel + case level <= slog.LevelWarn: + return zerolog.WarnLevel + default: + return zerolog.ErrorLevel + } +} + +var _ slog.Handler = (*regclientHandler)(nil) diff --git a/internal/logging/regclient_test.go b/internal/logging/regclient_test.go new file mode 100644 index 000000000..c0fb168f1 --- /dev/null +++ b/internal/logging/regclient_test.go @@ -0,0 +1,81 @@ +package logging + +import ( + "bytes" + "context" + "log/slog" + "net/http" + "strings" + "testing" + + regtypes "github.com/regclient/regclient/types" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" +) + +func TestRegclientLoggerFormatsHTTPRequests(t *testing.T) { + var buf bytes.Buffer + logger := zerolog.New(&buf).Level(zerolog.DebugLevel) + reglogger := NewRegclientLogger(logger) + + reglogger.LogAttrs(context.Background(), regtypes.LevelTrace, "reg http request", + slog.String("req-method", "GET"), + slog.String("req-url", "https://registry-1.docker.io/v2/"), + slog.String("resp-status", "401 Unauthorized"), + slog.Any("req-headers", http.Header{"Authorization": []string{"[censored]"}}), + ) + + output := buf.String() + require.Contains(t, output, "[regclient] GET https://registry-1.docker.io/v2/ status=401 Unauthorized") + require.NotContains(t, output, "Authorization") +} + +func TestRegclientLoggerIncludesHeadersAtTrace(t *testing.T) { + var buf bytes.Buffer + logger := zerolog.New(&buf).Level(zerolog.TraceLevel) + reglogger := NewRegclientLogger(logger) + + reglogger.LogAttrs(context.Background(), regtypes.LevelTrace, "reg http request", + slog.String("req-method", "GET"), + slog.String("req-url", "https://registry-1.docker.io/v2/"), + slog.String("resp-status", "401 Unauthorized"), + slog.Any("req-headers", http.Header{"Authorization": []string{"[censored]"}}), + ) + + output := buf.String() + require.Contains(t, output, "[regclient] GET https://registry-1.docker.io/v2/ status=401 Unauthorized") + require.True(t, strings.Contains(output, "Authorization") || strings.Contains(output, "req-headers")) +} + +func TestRegclientLoggerSkipsLowValueNoise(t *testing.T) { + var buf bytes.Buffer + logger := zerolog.New(&buf).Level(zerolog.DebugLevel) + reglogger := NewRegclientLogger(logger) + + reglogger.Debug("regclient initialized") + reglogger.Debug("Auth request parsed") + reglogger.Debug("Sleeping for backoff") + + require.Empty(t, buf.String()) +} + +func TestRegclientLoggerUsesInheritedGlobalLevel(t *testing.T) { + prev := zerolog.GlobalLevel() + zerolog.SetGlobalLevel(zerolog.DebugLevel) + defer zerolog.SetGlobalLevel(prev) + + var buf bytes.Buffer + logger := zerolog.New(&buf) + reglogger := NewRegclientLogger(logger) + + reglogger.LogAttrs(context.Background(), regtypes.LevelTrace, "reg http request", + slog.String("req-method", "HEAD"), + slog.String("req-url", "https://registry-1.docker.io/v2/test/app/manifests/latest"), + slog.String("resp-status", "200 OK"), + slog.Any("req-headers", http.Header{"Authorization": []string{"[censored]"}}), + ) + + output := buf.String() + require.Contains(t, output, "[regclient] HEAD https://registry-1.docker.io/v2/test/app/manifests/latest status=200 OK") + require.NotContains(t, output, "Authorization") +} diff --git a/internal/provider/file/image.go b/internal/provider/file/image.go index 836e63fcb..3659c1ecc 100644 --- a/internal/provider/file/image.go +++ b/internal/provider/file/image.go @@ -63,20 +63,16 @@ func (c *Client) listFileImage() []model.Image { // Check Platform if item.Platform != (model.ImagePlatform{}) { - _, err = platforms.Parse(platforms.Format(ocispecs.Platform{ + platform := platforms.Format(ocispecs.Platform{ OS: item.Platform.OS, Architecture: item.Platform.Arch, Variant: item.Platform.Variant, - })) - if err != nil { - c.logger.Error(). + }) + if _, err = platforms.Parse(platform); err != nil { + c.logger.Error().Err(err). Str("file", file). Str("img_name", item.Name). - Msgf("cannot parse %s platform", platforms.Format(ocispecs.Platform{ - OS: item.Platform.OS, - Architecture: item.Platform.Arch, - Variant: item.Platform.Variant, - })) + Msgf("cannot parse %s platform", platform) } } diff --git a/pkg/registry/auth.go b/pkg/registry/auth.go deleted file mode 100644 index 7706145ea..000000000 --- a/pkg/registry/auth.go +++ /dev/null @@ -1,52 +0,0 @@ -package registry - -import ( - "io" - - dockerconfig "github.com/docker/cli/cli/config" - "github.com/docker/cli/cli/config/configfile" - dockerregistry "github.com/docker/docker/api/types/registry" -) - -const dockerHubConfigKey = "https://index.docker.io/v1/" - -// LookupAuth returns Docker registry credentials for the given registry domain. -// If no credentials are configured, an empty AuthConfig is returned. -func LookupAuth(domain string) (dockerregistry.AuthConfig, error) { - return lookupAuth("", domain) -} - -func lookupAuth(configDir, domain string) (dockerregistry.AuthConfig, error) { - cfg, err := loadDockerConfig(configDir) - if err != nil { - return dockerregistry.AuthConfig{}, err - } - - auth, err := cfg.GetAuthConfig(dockerConfigKey(domain)) - if err != nil { - return dockerregistry.AuthConfig{}, err - } - - return dockerregistry.AuthConfig{ - Username: auth.Username, - Password: auth.Password, - Auth: auth.Auth, - ServerAddress: auth.ServerAddress, - IdentityToken: auth.IdentityToken, - RegistryToken: auth.RegistryToken, - }, nil -} - -func loadDockerConfig(configDir string) (*configfile.ConfigFile, error) { - if configDir == "" { - return dockerconfig.LoadDefaultConfigFile(io.Discard), nil - } - return dockerconfig.Load(configDir) -} - -func dockerConfigKey(domain string) string { - if domain == "docker.io" || domain == "index.docker.io" { - return dockerHubConfigKey - } - return domain -} diff --git a/pkg/registry/auth_test.go b/pkg/registry/auth_test.go deleted file mode 100644 index a1331e9ae..000000000 --- a/pkg/registry/auth_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package registry - -import ( - "encoding/base64" - "os" - "path/filepath" - "testing" - - dockerregistry "github.com/docker/docker/api/types/registry" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestLookupAuthDockerHub(t *testing.T) { - t.Parallel() - - configDir := t.TempDir() - configPath := filepath.Join(configDir, "config.json") - auth := base64.StdEncoding.EncodeToString([]byte("janedoe:s3cr3t")) - require.NoError(t, os.WriteFile(configPath, []byte(`{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "`+auth+`" - } - } - }`), 0o600)) - - got, err := lookupAuth(configDir, "docker.io") - require.NoError(t, err) - assert.Equal(t, dockerregistry.AuthConfig{ - Username: "janedoe", - Password: "s3cr3t", - ServerAddress: dockerHubConfigKey, - }, got) -} - -func TestLookupAuthNotFound(t *testing.T) { - t.Parallel() - - configDir := t.TempDir() - require.NoError(t, os.WriteFile(filepath.Join(configDir, "config.json"), []byte(`{"auths":{}}`), 0o600)) - - got, err := lookupAuth(configDir, "ghcr.io") - require.NoError(t, err) - assert.Equal(t, dockerregistry.AuthConfig{}, got) -} diff --git a/pkg/registry/image.go b/pkg/registry/image.go index 058d8daae..ea4455a67 100644 --- a/pkg/registry/image.go +++ b/pkg/registry/image.go @@ -10,9 +10,9 @@ import ( "github.com/distribution/reference" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + regref "github.com/regclient/regclient/types/ref" ) -// Image holds information about an image. type Image struct { Domain string Path string @@ -24,19 +24,18 @@ type Image struct { opts ParseImageOptions } -// ParseImageOptions holds image options for parsing. type ParseImageOptions struct { Name string HubTpl string } -// ParseImage returns an Image struct with all the values filled in for a given image. func ParseImage(parseOpts ParseImageOptions) (Image, error) { // Parse the image name and tag. named, err := reference.ParseNormalizedNamed(parseOpts.Name) if err != nil { return Image{}, errors.Wrapf(err, "parsing image %s failed", parseOpts.Name) } + // Add the latest lag if they did not provide one. named = reference.TagNameOnly(named) @@ -66,25 +65,32 @@ func ParseImage(parseOpts ParseImageOptions) (Image, error) { return i, nil } -// Name returns the full name representation of an image. func (i Image) Name() string { return i.named.Name() } -// String returns the string representation of an image. func (i Image) String() string { return i.named.String() } -// Reference returns either the digest if it is non-empty or the tag for the image. func (i Image) Reference() string { if len(i.Digest.String()) > 1 { return i.Digest.String() } - return i.Tag } +func (i Image) regRef() (regref.Ref, error) { + ref, err := regref.New(i.Name()) + if err != nil { + return regref.Ref{}, err + } + if i.Tag != "" { + ref = ref.SetTag(i.Tag) + } + return ref, nil +} + func (i Image) hubLink() (string, error) { if i.opts.HubTpl != "" { var out bytes.Buffer diff --git a/pkg/registry/image_test.go b/pkg/registry/image_test.go index 3b0618b8b..22a168fd0 100644 --- a/pkg/registry/image_test.go +++ b/pkg/registry/image_test.go @@ -3,9 +3,11 @@ package registry import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +const sha256digest = "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + func TestParseImage(t *testing.T) { testCases := []struct { desc string @@ -135,17 +137,13 @@ func TestParseImage(t *testing.T) { }, }, } - for _, tt := range testCases { - tt := tt t.Run(tt.desc, func(t *testing.T) { img, err := ParseImage(tt.parseOpts) - if err != nil { - t.Error(err) - } - assert.Equal(t, tt.expected.Domain, img.Domain) - assert.Equal(t, tt.expected.Path, img.Path) - assert.Equal(t, tt.expected.Tag, img.Tag) + require.NoError(t, err) + require.Equal(t, tt.expected.Domain, img.Domain) + require.Equal(t, tt.expected.Path, img.Path) + require.Equal(t, tt.expected.Tag, img.Tag) }) } } @@ -249,15 +247,66 @@ func TestHubLink(t *testing.T) { expected: "https://myregistry.example.com/ui/repos/an/image", }, } - for _, tt := range testCases { - tt := tt t.Run(tt.desc, func(t *testing.T) { img, err := ParseImage(tt.parseOpts) - if err != nil { - t.Error(err) + require.NoError(t, err) + require.Equal(t, tt.expected, img.HubLink) + }) + } +} + +func TestImageRegRef(t *testing.T) { + testCases := []struct { + input string + expected string + wantErr bool + }{ + { + input: "busybox", + expected: "docker.io/library/busybox:latest", + }, + { + input: "docker.io/library/busybox", + expected: "docker.io/library/busybox:latest", + }, + { + input: "docker.io/library/busybox:latest", + expected: "docker.io/library/busybox:latest", + }, + { + input: "busybox:notlatest", + expected: "docker.io/library/busybox:notlatest", + }, + { + input: "busybox" + sha256digest, + expected: "docker.io/library/busybox:latest", + }, + { + input: "busybox:latest" + sha256digest, + expected: "docker.io/library/busybox:latest", + }, + { + input: "busybox:v1.0.0" + sha256digest, + expected: "docker.io/library/busybox:v1.0.0", + }, + { + input: "UPPERCASEISINVALID", + wantErr: true, + }, + } + for _, tt := range testCases { + t.Run(tt.input, func(t *testing.T) { + image, err := ParseImage(ParseImageOptions{Name: tt.input}) + if tt.wantErr { + require.Error(t, err) + return } - assert.Equal(t, tt.expected, img.HubLink) + require.NoError(t, err) + + regRef, err := image.regRef() + require.NoError(t, err) + require.Equal(t, tt.expected, regRef.CommonName()) }) } } diff --git a/pkg/registry/manifest.go b/pkg/registry/manifest.go index 4cf64713a..8ba0d5da4 100644 --- a/pkg/registry/manifest.go +++ b/pkg/registry/manifest.go @@ -1,116 +1,174 @@ package registry import ( - "fmt" "time" "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "go.podman.io/image/v5/docker" - "go.podman.io/image/v5/manifest" + "github.com/regclient/regclient" + regdescriptor "github.com/regclient/regclient/types/descriptor" + regmanifest "github.com/regclient/regclient/types/manifest" + regplatform "github.com/regclient/regclient/types/platform" ) -// Manifest is the Docker image manifest information type Manifest struct { - Name string - Tag string - MIMEType string - Digest digest.Digest - Created *time.Time - DockerVersion string - Labels map[string]string - Layers []string - Platform string - Raw []byte + Name string + Tag string + MIMEType string + Digest digest.Digest + Created *time.Time + Labels map[string]string + Layers []string + Platform string + Raw []byte } -// Manifest returns the manifest for a specific image func (c *Client) Manifest(image Image, dbManifest Manifest) (Manifest, bool, error) { ctx, cancel := c.timeoutContext() defer cancel() - rmRef, err := ImageReference(image.String()) + regRef, err := image.regRef() if err != nil { - return Manifest{}, false, errors.Wrap(err, "cannot parse reference") + return Manifest{}, false, errors.Wrap(err, "cannot create regclient reference") } - // Retrieve remote digest through HEAD request - rmDigest, err := docker.GetDigest(ctx, c.sysCtx, rmRef) + headManifest, err := c.regctl.ManifestHead(ctx, regRef) if err != nil { - return Manifest{}, false, errors.Wrap(err, "cannot get image digest from HEAD request") + return Manifest{}, false, errors.Wrap(err, "cannot get manifest digest from HEAD request") + } else if headManifest == nil || headManifest.GetDescriptor().Digest == "" { + return Manifest{}, false, errors.New("manifest HEAD request returned no manifest or empty digest") } - // Digest match, returns db manifest - if c.opts.CompareDigest && len(dbManifest.Digest) > 0 && dbManifest.Digest == rmDigest { + remoteDigest := headManifest.GetDescriptor().Digest + if c.opts.CompareDigest && len(dbManifest.Digest) > 0 && dbManifest.Digest == remoteDigest { return dbManifest, false, nil } - rmCloser, err := rmRef.NewImage(ctx, c.sysCtx) + topManifest, err := c.regctl.ManifestGet(ctx, regRef) if err != nil { - return Manifest{}, false, errors.Wrap(err, "cannot create image closer") + return Manifest{}, false, errors.Wrap(err, "cannot get manifest") } - defer rmCloser.Close() + remoteDigest = topManifest.GetDescriptor().Digest - rmRawManifest, rmManifestMimeType, err := rmCloser.Manifest(ctx) + remoteRawManifest, err := topManifest.RawBody() if err != nil { - return Manifest{}, false, errors.Wrap(err, "cannot get raw manifest") + return Manifest{}, false, errors.Wrap(err, "cannot read raw manifest") } + remoteManifestMediaType := topManifest.GetDescriptor().MediaType - // For manifests list compare also digest matching the platform - updated := dbManifest.Digest != rmDigest - if c.opts.CompareDigest && len(dbManifest.Raw) > 0 && dbManifest.isManifestList() && isManifestList(rmManifestMimeType) { - dbManifestList, err := manifest.ListFromBlob(dbManifest.Raw, dbManifest.MIMEType) + updated := dbManifest.Digest != remoteDigest + if c.opts.CompareDigest && len(dbManifest.Raw) > 0 && topManifest.IsList() { + dbManifestValue, err := parseManifest(dbManifest.Raw, dbManifest.MIMEType) if err != nil { - return Manifest{}, false, errors.Wrap(err, "cannot parse manifest list") + return Manifest{}, false, errors.Wrap(err, "cannot parse stored manifest") } - dbManifestPlatformDigest, err := dbManifestList.ChooseInstance(c.sysCtx) - if err != nil { - return Manifest{}, false, errors.Wrapf(err, "error choosing image instance") + if dbManifestValue.IsList() { + dbManifestPlatformDigest, err := manifestPlatformDigest(dbManifestValue, c.opts.Platform) + if err != nil { + return Manifest{}, false, errors.Wrap(err, "cannot choose platform digest from stored manifest list") + } + remoteManifestPlatformDigest, err := manifestPlatformDigest(topManifest, c.opts.Platform) + if err != nil { + return Manifest{}, false, errors.Wrap(err, "cannot choose platform digest from remote manifest list") + } + updated = dbManifestPlatformDigest != remoteManifestPlatformDigest } - rmManifestList, err := manifest.ListFromBlob(rmRawManifest, rmManifestMimeType) + } + + selectedManifest := topManifest + platform := c.opts.Platform + + if topManifest.IsList() { + desc, err := manifestPlatformDescriptor(topManifest, platform) if err != nil { - return Manifest{}, false, errors.Wrap(err, "cannot parse manifest list") + return Manifest{}, false, errors.Wrap(err, "error choosing image instance") } - rmManifestPlatformDigest, err := rmManifestList.ChooseInstance(c.sysCtx) + selectedManifest, err = c.regctl.ManifestGet(ctx, regRef, regclient.WithManifestDesc(desc)) if err != nil { - return Manifest{}, false, errors.Wrapf(err, "error choosing image instance") + return Manifest{}, false, errors.Wrap(err, "cannot get selected platform manifest") } - updated = dbManifestPlatformDigest != rmManifestPlatformDigest + if desc.Platform != nil { + platform = *desc.Platform + } + } + + imageManifest, ok := selectedManifest.(regmanifest.Imager) + if !ok { + return Manifest{}, false, errors.Errorf("manifest media type %q is not an image manifest", selectedManifest.GetDescriptor().MediaType) } - // Metadata describing the Docker image - rmInspect, err := rmCloser.Inspect(ctx) + layersDesc, err := imageManifest.GetLayers() if err != nil { - return Manifest{}, false, errors.Wrap(err, "cannot inspect") + return Manifest{}, false, errors.Wrap(err, "cannot get image layers") } - rmTag := rmInspect.Tag - if len(rmTag) == 0 { - rmTag = image.Tag + + configDesc, err := imageManifest.GetConfig() + if err != nil { + return Manifest{}, false, errors.Wrap(err, "cannot get image config descriptor") } - rmPlatform := fmt.Sprintf("%s/%s", rmInspect.Os, rmInspect.Architecture) - if rmInspect.Variant != "" { - rmPlatform = fmt.Sprintf("%s/%s", rmPlatform, rmInspect.Variant) + + configData, err := c.regctl.BlobGetOCIConfig(ctx, regRef, configDesc) + if err != nil { + return Manifest{}, false, errors.Wrap(err, "cannot get image config") + } + + layers := make([]string, 0, len(layersDesc)) + for _, layer := range layersDesc { + layers = append(layers, layer.Digest.String()) + } + + imageConfig := configData.GetConfig() + + remotePlatform := platform.String() + if imageConfig.OS != "" && imageConfig.Architecture != "" { + remotePlatform = regplatform.Platform{ + OS: imageConfig.OS, + Architecture: imageConfig.Architecture, + Variant: imageConfig.Variant, + }.String() } return Manifest{ - Name: rmCloser.Reference().DockerReference().Name(), - Tag: rmTag, - MIMEType: rmManifestMimeType, - Digest: rmDigest, - Created: rmInspect.Created, - DockerVersion: rmInspect.DockerVersion, - Labels: rmInspect.Labels, - Layers: rmInspect.Layers, - Platform: rmPlatform, - Raw: rmRawManifest, + Name: image.Name(), + Tag: image.Tag, + MIMEType: remoteManifestMediaType, + Digest: remoteDigest, + Created: imageConfig.Created, + Labels: imageConfig.Config.Labels, + Layers: layers, + Platform: remotePlatform, + Raw: remoteRawManifest, }, updated, nil } -func (m Manifest) isManifestList() bool { - return isManifestList(m.MIMEType) +func platformDigestFromManifest(raw []byte, mimeType string, platform regplatform.Platform) (digest.Digest, error) { + manifest, err := parseManifest(raw, mimeType) + if err != nil { + return "", err + } + return manifestPlatformDigest(manifest, platform) } -func isManifestList(mimeType string) bool { - return mimeType == manifest.DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex +func manifestPlatformDigest(manifest regmanifest.Manifest, platform regplatform.Platform) (digest.Digest, error) { + desc, err := manifestPlatformDescriptor(manifest, platform) + if err != nil { + return "", err + } + return desc.Digest, nil +} + +func manifestPlatformDescriptor(manifest regmanifest.Manifest, platform regplatform.Platform) (regdescriptor.Descriptor, error) { + desc, err := regmanifest.GetPlatformDesc(manifest, &platform) + if err != nil { + return regdescriptor.Descriptor{}, errors.Wrap(err, "cannot select platform descriptor") + } + return *desc, nil +} + +func parseManifest(raw []byte, mimeType string) (regmanifest.Manifest, error) { + opts := []regmanifest.Opts{regmanifest.WithRaw(raw)} + if mimeType != "" { + opts = append(opts, regmanifest.WithDesc(regdescriptor.Descriptor{MediaType: mimeType})) + } + return regmanifest.New(opts...) } diff --git a/pkg/registry/manifest_test.go b/pkg/registry/manifest_test.go index 7274ee83a..433d5d79c 100644 --- a/pkg/registry/manifest_test.go +++ b/pkg/registry/manifest_test.go @@ -1,36 +1,327 @@ package registry import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/opencontainers/go-digest" + regconfig "github.com/regclient/regclient/config" + regplatform "github.com/regclient/regclient/types/platform" "github.com/stretchr/testify/require" ) -func TestCompareDigest(t *testing.T) { - t.Parallel() - rc, err := New(Options{ +func TestPlatformDigestFromManifest(t *testing.T) { + raw := []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:1111111111111111111111111111111111111111111111111111111111111111", + "size": 866, + "platform": { + "architecture": "arm64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:2222222222222222222222222222222222222222222222222222222222222222", + "size": 866, + "platform": { + "architecture": "amd64", + "os": "linux" + } + } + ] + }`) + got, err := platformDigestFromManifest(raw, "application/vnd.oci.image.index.v1+json", regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }) + require.NoError(t, err) + require.Equal(t, digest.Digest("sha256:2222222222222222222222222222222222222222222222222222222222222222"), got) +} + +func TestManifestFromRegistryHTTP(t *testing.T) { + var manifestGetCount int + + configBlob := `{ + "created": "2024-01-02T03:04:05Z", + "architecture": "amd64", + "os": "linux", + "variant": "", + "docker_version": "27.0.0", + "config": { + "Labels": { + "org.opencontainers.image.url": "https://example.test/app" + } + } + }` + configDigest := digest.FromString(configBlob).String() + + childManifest := fmt.Sprintf(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "%s", + "size": %d + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "size": 123 + } + ] + }`, configDigest, len(configBlob)) + childDigest := digest.FromString(childManifest).String() + + topManifest := fmt.Sprintf(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "%s", + "size": %d, + "platform": { + "architecture": "amd64", + "os": "linux" + } + } + ] + }`, childDigest, len(childManifest)) + topDigest := digest.FromString(topManifest).String() + + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + switch { + case req.URL.Path == "/v2/": + rw.WriteHeader(http.StatusOK) + case req.Method == http.MethodHead && req.URL.Path == "/v2/test/app/manifests/latest": + rw.Header().Set("Content-Type", "application/vnd.oci.image.index.v1+json") + rw.Header().Set("Docker-Content-Digest", topDigest) + rw.WriteHeader(http.StatusOK) + case req.Method == http.MethodGet && req.URL.Path == "/v2/test/app/manifests/latest": + manifestGetCount++ + rw.Header().Set("Content-Type", "application/vnd.oci.image.index.v1+json") + rw.Header().Set("Docker-Content-Digest", topDigest) + _, _ = rw.Write([]byte(topManifest)) + case req.Method == http.MethodGet && req.URL.Path == "/v2/test/app/manifests/"+childDigest: + manifestGetCount++ + rw.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json") + rw.Header().Set("Docker-Content-Digest", childDigest) + _, _ = rw.Write([]byte(childManifest)) + case req.Method == http.MethodGet && req.URL.Path == "/v2/test/app/blobs/"+configDigest: + rw.Header().Set("Content-Type", "application/vnd.oci.image.config.v1+json") + _, _ = rw.Write([]byte(configBlob)) + default: + rw.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + img, err := ParseImage(ParseImageOptions{Name: strings.TrimPrefix(server.URL, "https://") + "/test/app:latest"}) + require.NoError(t, err) + + client := New(Options{ + Host: ®config.Host{ + Name: img.Domain, + TLS: regconfig.TLSInsecure, + }, + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + }) + + manifest, updated, err := client.Manifest(img, Manifest{}) + require.NoError(t, err) + require.True(t, updated) + require.Equal(t, strings.TrimPrefix(server.URL, "https://")+"/test/app", manifest.Name) + require.Equal(t, "latest", manifest.Tag) + require.Equal(t, "application/vnd.oci.image.index.v1+json", manifest.MIMEType) + require.Equal(t, digest.Digest(topDigest), manifest.Digest) + require.Equal(t, "linux/amd64", manifest.Platform) + require.Equal(t, []string{"sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"}, manifest.Layers) + require.Equal(t, "https://example.test/app", manifest.Labels["org.opencontainers.image.url"]) + require.Equal(t, 2, manifestGetCount) +} + +func TestManifestCompareDigestSkipsGet(t *testing.T) { + var getCalled bool + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + switch { + case req.URL.Path == "/v2/": + rw.WriteHeader(http.StatusOK) + case req.Method == http.MethodHead && req.URL.Path == "/v2/test/app/manifests/latest": + rw.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json") + rw.Header().Set("Docker-Content-Digest", "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + rw.WriteHeader(http.StatusOK) + case req.Method == http.MethodGet && req.URL.Path == "/v2/test/app/manifests/latest": + getCalled = true + rw.WriteHeader(http.StatusInternalServerError) + default: + rw.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + img, err := ParseImage(ParseImageOptions{Name: strings.TrimPrefix(server.URL, "https://") + "/test/app:latest"}) + require.NoError(t, err) + + client := New(Options{ + Host: ®config.Host{ + Name: img.Domain, + TLS: regconfig.TLSInsecure, + }, CompareDigest: true, - ImageOs: "linux", - ImageArch: "amd64", + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, }) - if err != nil { - t.Error(err) + + dbManifest := Manifest{ + Name: strings.TrimPrefix(server.URL, "https://") + "/test/app", + Tag: "latest", + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", } + manifest, updated, err := client.Manifest(img, dbManifest) + require.NoError(t, err) + require.False(t, getCalled) + require.False(t, updated) + require.Equal(t, dbManifest, manifest) +} + +func TestManifestCompareDigestHeadFailureReturnsError(t *testing.T) { + var getCalled bool + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + switch { + case req.URL.Path == "/v2/": + rw.WriteHeader(http.StatusOK) + case req.Method == http.MethodHead && req.URL.Path == "/v2/test/app/manifests/latest": + rw.WriteHeader(http.StatusInternalServerError) + case req.Method == http.MethodGet && req.URL.Path == "/v2/test/app/manifests/latest": + getCalled = true + rw.Header().Set("Content-Type", "application/vnd.oci.image.manifest.v1+json") + rw.WriteHeader(http.StatusOK) + default: + rw.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + img, err := ParseImage(ParseImageOptions{Name: strings.TrimPrefix(server.URL, "https://") + "/test/app:latest"}) + require.NoError(t, err) + + client := New(Options{ + Host: ®config.Host{ + Name: img.Domain, + TLS: regconfig.TLSInsecure, + }, + CompareDigest: true, + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + }) + + _, _, err = client.Manifest(img, Manifest{ + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + }) + require.Error(t, err) + require.False(t, getCalled) +} + +func TestManifestMissingPlatformReturnsError(t *testing.T) { + var childManifestGetCount int + + topManifest := `{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:1111111111111111111111111111111111111111111111111111111111111111", + "size": 866, + "platform": { + "architecture": "arm64", + "os": "linux" + } + } + ] + }` + topDigest := digest.FromString(topManifest).String() + + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + switch { + case req.URL.Path == "/v2/": + rw.WriteHeader(http.StatusOK) + case req.Method == http.MethodHead && req.URL.Path == "/v2/test/app/manifests/latest": + rw.Header().Set("Content-Type", "application/vnd.oci.image.index.v1+json") + rw.Header().Set("Docker-Content-Digest", topDigest) + rw.WriteHeader(http.StatusOK) + case req.Method == http.MethodGet && req.URL.Path == "/v2/test/app/manifests/latest": + rw.Header().Set("Content-Type", "application/vnd.oci.image.index.v1+json") + rw.Header().Set("Docker-Content-Digest", topDigest) + _, _ = rw.Write([]byte(topManifest)) + case req.Method == http.MethodGet && strings.HasPrefix(req.URL.Path, "/v2/test/app/manifests/sha256:"): + childManifestGetCount++ + rw.WriteHeader(http.StatusInternalServerError) + default: + rw.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + img, err := ParseImage(ParseImageOptions{Name: strings.TrimPrefix(server.URL, "https://") + "/test/app:latest"}) + require.NoError(t, err) + + client := New(Options{ + Host: ®config.Host{ + Name: img.Domain, + TLS: regconfig.TLSInsecure, + }, + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + }) + + _, _, err = client.Manifest(img, Manifest{}) + require.Error(t, err) + require.ErrorContains(t, err, "error choosing image instance") + require.ErrorContains(t, err, "cannot select platform descriptor") + require.Equal(t, 0, childManifestGetCount) +} + +func TestCompareDigest(t *testing.T) { + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), + CompareDigest: true, + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + }) + img, err := ParseImage(ParseImageOptions{ Name: "crazymax/diun:2.5.0", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) // download manifest - _, _, err = rc.Manifest(img, Manifest{}) + _, _, err = client.Manifest(img, Manifest{}) require.NoError(t, err) // check manifest - manifest, _, err := rc.Manifest(img, Manifest{ + manifest, _, err := client.Manifest(img, Manifest{ Name: "docker.io/crazymax/diun", Tag: "2.5.0", MIMEType: "application/vnd.docker.distribution.manifest.list.v2+json", @@ -38,37 +329,33 @@ func TestCompareDigest(t *testing.T) { Platform: "linux/amd64", }) require.NoError(t, err) - assert.Equal(t, "docker.io/crazymax/diun", manifest.Name) - assert.Equal(t, "2.5.0", manifest.Tag) - assert.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", manifest.MIMEType) - assert.Equal(t, "linux/amd64", manifest.Platform) - assert.Empty(t, manifest.DockerVersion) + require.Equal(t, "docker.io/crazymax/diun", manifest.Name) + require.Equal(t, "2.5.0", manifest.Tag) + require.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", manifest.MIMEType) + require.Equal(t, "linux/amd64", manifest.Platform) } func TestManifest(t *testing.T) { - t.Parallel() - rc, err := New(Options{ + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), CompareDigest: true, - ImageOs: "linux", - ImageArch: "amd64", + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, }) - if err != nil { - t.Error(err) - } img, err := ParseImage(ParseImageOptions{ Name: "portainer/portainer-ce:linux-amd64-2.5.1", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) // download manifest - _, _, err = rc.Manifest(img, Manifest{}) + _, _, err = client.Manifest(img, Manifest{}) require.NoError(t, err) // check manifest - manifest, updated, err := rc.Manifest(img, Manifest{ + manifest, updated, err := client.Manifest(img, Manifest{ Name: "docker.io/portainer/portainer-ce", Tag: "linux-amd64-2.5.1", MIMEType: "application/vnd.docker.distribution.manifest.v2+json", @@ -103,38 +390,35 @@ func TestManifest(t *testing.T) { }) require.NoError(t, err) - assert.Equal(t, false, updated) - assert.Equal(t, "docker.io/portainer/portainer-ce", manifest.Name) - assert.Equal(t, "linux-amd64-2.5.1", manifest.Tag) - assert.Equal(t, "application/vnd.docker.distribution.manifest.v2+json", manifest.MIMEType) - assert.Equal(t, "sha256:653057af0d2d961f436c75deda1ca7fe3defc89664bed6bd3da8c91c88c1ce05", manifest.Digest.String()) - assert.Equal(t, "linux/amd64", manifest.Platform) + require.Equal(t, false, updated) + require.Equal(t, "docker.io/portainer/portainer-ce", manifest.Name) + require.Equal(t, "linux-amd64-2.5.1", manifest.Tag) + require.Equal(t, "application/vnd.docker.distribution.manifest.v2+json", manifest.MIMEType) + require.Equal(t, "sha256:653057af0d2d961f436c75deda1ca7fe3defc89664bed6bd3da8c91c88c1ce05", manifest.Digest.String()) + require.Equal(t, "linux/amd64", manifest.Platform) } func TestManifestMultiUpdatedPlatform(t *testing.T) { - t.Parallel() - rc, err := New(Options{ + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), CompareDigest: true, - ImageOs: "linux", - ImageArch: "amd64", + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, }) - if err != nil { - t.Error(err) - } img, err := ParseImage(ParseImageOptions{ Name: "mongo:3.6.21", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) // download manifest - _, _, err = rc.Manifest(img, Manifest{}) + _, _, err = client.Manifest(img, Manifest{}) require.NoError(t, err) // check manifest - manifest, updated, err := rc.Manifest(img, Manifest{ + manifest, updated, err := client.Manifest(img, Manifest{ Name: "docker.io/library/mongo", Tag: "3.6.21", MIMEType: "application/vnd.docker.distribution.manifest.list.v2+json", @@ -188,38 +472,35 @@ func TestManifestMultiUpdatedPlatform(t *testing.T) { }) require.NoError(t, err) - assert.Equal(t, true, updated) - assert.Equal(t, "docker.io/library/mongo", manifest.Name) - assert.Equal(t, "3.6.21", manifest.Tag) - assert.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", manifest.MIMEType) - assert.Equal(t, "sha256:3cff2069adb34a330552695659c261bca69148e325863763b78b0285dd1a25c9", manifest.Digest.String()) - assert.Equal(t, "linux/amd64", manifest.Platform) + require.Equal(t, true, updated) + require.Equal(t, "docker.io/library/mongo", manifest.Name) + require.Equal(t, "3.6.21", manifest.Tag) + require.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", manifest.MIMEType) + require.Equal(t, "sha256:3cff2069adb34a330552695659c261bca69148e325863763b78b0285dd1a25c9", manifest.Digest.String()) + require.Equal(t, "linux/amd64", manifest.Platform) } func TestManifestMultiNotUpdatedPlatform(t *testing.T) { - t.Parallel() - rc, err := New(Options{ + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), CompareDigest: true, - ImageOs: "linux", - ImageArch: "amd64", + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, }) - if err != nil { - t.Error(err) - } img, err := ParseImage(ParseImageOptions{ Name: "mongo:3.6.21", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) // download manifest - _, _, err = rc.Manifest(img, Manifest{}) + _, _, err = client.Manifest(img, Manifest{}) require.NoError(t, err) // check manifest - manifest, updated, err := rc.Manifest(img, Manifest{ + manifest, updated, err := client.Manifest(img, Manifest{ Name: "docker.io/library/mongo", Tag: "3.6.21", MIMEType: "application/vnd.docker.distribution.manifest.list.v2+json", @@ -273,93 +554,83 @@ func TestManifestMultiNotUpdatedPlatform(t *testing.T) { }) require.NoError(t, err) - assert.Equal(t, false, updated) - assert.Equal(t, "docker.io/library/mongo", manifest.Name) - assert.Equal(t, "3.6.21", manifest.Tag) - assert.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", manifest.MIMEType) - assert.Equal(t, "sha256:3cff2069adb34a330552695659c261bca69148e325863763b78b0285dd1a25c9", manifest.Digest.String()) - assert.Equal(t, "linux/amd64", manifest.Platform) + require.Equal(t, false, updated) + require.Equal(t, "docker.io/library/mongo", manifest.Name) + require.Equal(t, "3.6.21", manifest.Tag) + require.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", manifest.MIMEType) + require.Equal(t, "sha256:3cff2069adb34a330552695659c261bca69148e325863763b78b0285dd1a25c9", manifest.Digest.String()) + require.Equal(t, "linux/amd64", manifest.Platform) } func TestManifestVariant(t *testing.T) { - t.Parallel() - rc, err := New(Options{ - ImageOs: "linux", - ImageArch: "arm", - ImageVariant: "v7", + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "arm", + Variant: "v7", + }, }) - if err != nil { - t.Error(err) - } img, err := ParseImage(ParseImageOptions{ Name: "crazymax/diun:2.5.0", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) - manifest, _, err := rc.Manifest(img, Manifest{}) + manifest, _, err := client.Manifest(img, Manifest{}) require.NoError(t, err) - assert.Equal(t, "docker.io/crazymax/diun", manifest.Name) - assert.Equal(t, "2.5.0", manifest.Tag) - assert.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", manifest.MIMEType) - assert.Equal(t, "linux/arm/v7", manifest.Platform) - assert.Empty(t, manifest.DockerVersion) + require.Equal(t, "docker.io/crazymax/diun", manifest.Name) + require.Equal(t, "2.5.0", manifest.Tag) + require.Equal(t, "application/vnd.docker.distribution.manifest.list.v2+json", manifest.MIMEType) + require.Equal(t, "linux/arm/v7", manifest.Platform) } func TestManifestTaggedDigest(t *testing.T) { - t.Parallel() - rc, err := New(Options{ + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), CompareDigest: true, - ImageOs: "linux", - ImageArch: "amd64", + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, }) - if err != nil { - t.Error(err) - } img, err := ParseImage(ParseImageOptions{ Name: "crazymax/diun:4.25.0@sha256:3fca3dd86c2710586208b0f92d1ec4ce25382f4cad4ae76a2275db8e8bb24031", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) // download manifest - _, _, err = rc.Manifest(img, Manifest{}) + _, _, err = client.Manifest(img, Manifest{}) require.NoError(t, err) // check manifest - manifest, updated, err := rc.Manifest(img, manifestCrazymaxDiun4250) + manifest, updated, err := client.Manifest(img, manifestCrazymaxDiun4250) require.NoError(t, err) - assert.Equal(t, false, updated) - assert.Equal(t, "docker.io/crazymax/diun", manifest.Name) - assert.Equal(t, "4.25.0", manifest.Tag) - assert.Equal(t, "application/vnd.oci.image.index.v1+json", manifest.MIMEType) - assert.Equal(t, "sha256:3fca3dd86c2710586208b0f92d1ec4ce25382f4cad4ae76a2275db8e8bb24031", manifest.Digest.String()) - assert.Equal(t, "linux/amd64", manifest.Platform) + require.Equal(t, false, updated) + require.Equal(t, "docker.io/crazymax/diun", manifest.Name) + require.Equal(t, "4.25.0", manifest.Tag) + require.Equal(t, "application/vnd.oci.image.index.v1+json", manifest.MIMEType) + require.Equal(t, "sha256:3fca3dd86c2710586208b0f92d1ec4ce25382f4cad4ae76a2275db8e8bb24031", manifest.Digest.String()) + require.Equal(t, "linux/amd64", manifest.Platform) } func TestManifestTaggedDigestUnknownTag(t *testing.T) { - t.Parallel() - rc, err := New(Options{ + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), CompareDigest: true, - ImageOs: "linux", - ImageArch: "amd64", + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, }) - if err != nil { - t.Error(err) - } img, err := ParseImage(ParseImageOptions{ Name: "crazymax/diun:foo@sha256:3fca3dd86c2710586208b0f92d1ec4ce25382f4cad4ae76a2275db8e8bb24031", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) - _, _, err = rc.Manifest(img, Manifest{}) + _, _, err = client.Manifest(img, Manifest{}) require.Error(t, err) } diff --git a/pkg/registry/platform.go b/pkg/registry/platform.go new file mode 100644 index 000000000..23bfb84b7 --- /dev/null +++ b/pkg/registry/platform.go @@ -0,0 +1,24 @@ +package registry + +import ( + "path" + + regplatform "github.com/regclient/regclient/types/platform" +) + +func TargetPlatform(os, arch, variant string) (regplatform.Platform, error) { + local := regplatform.Local() + if os == "" && arch == "" && variant == "" { + return local, nil + } + if os == "" { + os = local.OS + } + if arch == "" { + arch = local.Architecture + } + if variant == "" && arch == local.Architecture { + variant = local.Variant + } + return regplatform.Parse(path.Join(os, arch, variant)) +} diff --git a/pkg/registry/platform_test.go b/pkg/registry/platform_test.go new file mode 100644 index 000000000..56bc52226 --- /dev/null +++ b/pkg/registry/platform_test.go @@ -0,0 +1,29 @@ +package registry + +import ( + "testing" + + regplatform "github.com/regclient/regclient/types/platform" + "github.com/stretchr/testify/require" +) + +func TestTargetPlatformDefault(t *testing.T) { + got, err := TargetPlatform("", "", "") + require.NoError(t, err) + require.Equal(t, regplatform.Local(), got) +} + +func TestTargetPlatformOverride(t *testing.T) { + got, err := TargetPlatform("linux", "arm", "v7") + require.NoError(t, err) + require.Equal(t, regplatform.Platform{ + OS: "linux", + Architecture: "arm", + Variant: "v7", + }, got) +} + +func TestTargetPlatformInvalid(t *testing.T) { + _, err := TargetPlatform("linux!", "", "") + require.Error(t, err) +} diff --git a/pkg/registry/ref.go b/pkg/registry/ref.go deleted file mode 100644 index 2af57fbce..000000000 --- a/pkg/registry/ref.go +++ /dev/null @@ -1,66 +0,0 @@ -package registry - -import ( - "fmt" - "strings" - - "github.com/distribution/reference" - "github.com/pkg/errors" - "go.podman.io/image/v5/docker" - "go.podman.io/image/v5/types" -) - -func ImageReference(name string) (types.ImageReference, error) { - ref, err := namedReference(name) - if err != nil { - return nil, errors.Wrap(err, "cannot parse reference") - } - refStr := ref.String() - if !strings.HasPrefix(refStr, "//") { - refStr = fmt.Sprintf("//%s", refStr) - } - return docker.ParseReference(refStr) -} - -func namedReference(name string) (reference.Named, error) { - name = strings.TrimPrefix(name, "//") - - ref, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, errors.Wrapf(err, "parsing normalized named %q", name) - } else if ref == nil { - return nil, errors.Errorf("%q is not a named reference", name) - } - - if _, hasTag := ref.(reference.NamedTagged); hasTag { - ref, err = normalizeTaggedDigestedNamed(ref) - if err != nil { - return nil, errors.Wrapf(err, "normalizing tagged digested name %q", name) - } - } else if _, hasDigest := ref.(reference.Digested); hasDigest { - ref = reference.TrimNamed(ref) - } - - return reference.TagNameOnly(ref), nil -} - -// normalizeTaggedDigestedNamed strips the digest off the specified named -// reference if it is tagged and digested. -func normalizeTaggedDigestedNamed(named reference.Named) (reference.Named, error) { - _, isDigested := named.(reference.Digested) - if !isDigested { - return named, nil - } - tag, isTagged := named.(reference.NamedTagged) - if !isTagged { - return named, nil - } - // strip off the tag and digest - newNamed := reference.TrimNamed(named) - // re-add the tag - newNamed, err := reference.WithTag(newNamed, tag.Tag()) - if err != nil { - return named, err - } - return newNamed, nil -} diff --git a/pkg/registry/ref_test.go b/pkg/registry/ref_test.go deleted file mode 100644 index 6588b8cd3..000000000 --- a/pkg/registry/ref_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package registry - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - sha256digest = "@sha256:" + sha256digestHex -) - -func TestImageReference(t *testing.T) { - testCases := []struct { - input string - expected string - wantErr bool - }{ - { - input: "busybox", - expected: "docker.io/library/busybox:latest", - }, - { - input: "docker.io/library/busybox", - expected: "docker.io/library/busybox:latest", - }, - { - input: "docker.io/library/busybox:latest", - expected: "docker.io/library/busybox:latest", - }, - { - input: "busybox:notlatest", - expected: "docker.io/library/busybox:notlatest", - }, - { - input: "busybox" + sha256digest, - expected: "docker.io/library/busybox:latest", - }, - { - input: "busybox:latest" + sha256digest, - expected: "docker.io/library/busybox:latest", - }, - { - input: "busybox:v1.0.0" + sha256digest, - expected: "docker.io/library/busybox:v1.0.0", - }, - { - input: "UPPERCASEISINVALID", - expected: "", - wantErr: true, - }, - } - - for _, tt := range testCases { - tt := tt - t.Run(tt.input, func(t *testing.T) { - ref, err := ImageReference(tt.input) - if tt.wantErr { - require.Error(t, err) - return - } - require.NoError(t, err) - assert.Equal(t, tt.expected, ref.DockerReference().String(), tt.input) - }) - } -} diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go index 006223661..7ad6abfa6 100644 --- a/pkg/registry/registry.go +++ b/pkg/registry/registry.go @@ -2,54 +2,48 @@ package registry import ( "context" + "log/slog" "time" - dockerregistry "github.com/docker/docker/api/types/registry" "github.com/pkg/errors" - "go.podman.io/image/v5/types" + "github.com/regclient/regclient" + regconfig "github.com/regclient/regclient/config" + regscheme "github.com/regclient/regclient/scheme/reg" + regplatform "github.com/regclient/regclient/types/platform" ) -// Client represents an active docker registry object type Client struct { opts Options - sysCtx *types.SystemContext + regctl *regclient.RegClient } -// Options holds docker registry object options type Options struct { - Auth dockerregistry.AuthConfig - InsecureTLS bool + Host *regconfig.Host + Platform regplatform.Platform + Logger *slog.Logger Timeout time.Duration UserAgent string CompareDigest bool - ImageOs string - ImageArch string - ImageVariant string } -// New creates new docker registry client instance -func New(opts Options) (*Client, error) { - var auth *types.DockerAuthConfig - if opts.Auth != (dockerregistry.AuthConfig{}) { - auth = &types.DockerAuthConfig{ - Username: opts.Auth.Username, - Password: opts.Auth.Password, - IdentityToken: opts.Auth.IdentityToken, - } +func New(opts Options) *Client { + regctlOpts := []regclient.Opt{ + regclient.WithDockerCreds(), + regclient.WithRegOpts(regscheme.WithDelay(2*time.Second, 60*time.Second)), + } + if opts.Host != nil { + regctlOpts = append(regctlOpts, regclient.WithConfigHost(*opts.Host)) + } + if opts.Logger != nil { + regctlOpts = append(regctlOpts, regclient.WithSlog(opts.Logger)) + } + if opts.UserAgent != "" { + regctlOpts = append(regctlOpts, regclient.WithUserAgent(opts.UserAgent)) } - return &Client{ - opts: opts, - sysCtx: &types.SystemContext{ - DockerAuthConfig: auth, - DockerDaemonInsecureSkipTLSVerify: opts.InsecureTLS, - DockerInsecureSkipTLSVerify: types.NewOptionalBool(opts.InsecureTLS), - DockerRegistryUserAgent: opts.UserAgent, - OSChoice: opts.ImageOs, - ArchitectureChoice: opts.ImageArch, - VariantChoice: opts.ImageVariant, - }, - }, nil + opts: opts, + regctl: regclient.New(regctlOpts...), + } } func (c *Client) timeoutContext() (context.Context, context.CancelFunc) { @@ -57,7 +51,7 @@ func (c *Client) timeoutContext() (context.Context, context.CancelFunc) { var cancelFunc context.CancelFunc = func() {} if c.opts.Timeout > 0 { cancelCtx, cancel := context.WithCancelCause(ctx) - ctx, _ = context.WithTimeoutCause(cancelCtx, c.opts.Timeout, errors.WithStack(context.DeadlineExceeded)) //nolint:govet // no need to manually cancel this context as we already rely on parent + ctx, _ = context.WithTimeoutCause(cancelCtx, c.opts.Timeout, errors.WithStack(context.DeadlineExceeded)) //nolint:govet // parent cancellation is enough cancelFunc = func() { cancel(errors.WithStack(context.Canceled)) } } return ctx, cancelFunc diff --git a/pkg/registry/registry_test.go b/pkg/registry/registry_test.go index ad4312b49..2a1556c92 100644 --- a/pkg/registry/registry_test.go +++ b/pkg/registry/registry_test.go @@ -1,58 +1,55 @@ package registry import ( - "os" "testing" - dockerregistry "github.com/docker/docker/api/types/registry" + regconfig "github.com/regclient/regclient/config" + regplatform "github.com/regclient/regclient/types/platform" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -var ( - rc *Client -) - -func TestMain(m *testing.M) { - var err error - - rc, err = New(Options{ - ImageOs: "linux", - ImageArch: "amd64", - }) - if err != nil { - panic(err.Error()) - } - - os.Exit(m.Run()) -} - func TestNew(t *testing.T) { - assert.NotNil(t, rc) + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + }) + assert.NotNil(t, client) } -func TestNewMapsDockerRegistryAuth(t *testing.T) { - t.Parallel() - - rc, err := New(Options{ - Auth: dockerregistry.AuthConfig{ - Username: "janedoe", - Password: "s3cr3t", - IdentityToken: "token", +func TestNewMapsRegistryAuth(t *testing.T) { + client := New(Options{ + Host: ®config.Host{ + Name: "docker.io", + User: "janedoe", + Pass: "s3cr3t", + Token: "token", + TLS: regconfig.TLSInsecure, + }, + UserAgent: "diun/test", + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", }, - InsecureTLS: true, - UserAgent: "diun/test", - ImageOs: "linux", - ImageArch: "amd64", }) - require.NoError(t, err) - require.NotNil(t, rc.sysCtx) - require.NotNil(t, rc.sysCtx.DockerAuthConfig) + require.NotNil(t, client.opts.Host) + require.Equal(t, "janedoe", client.opts.Host.User) + require.Equal(t, "s3cr3t", client.opts.Host.Pass) + require.Equal(t, "token", client.opts.Host.Token) + require.Equal(t, "diun/test", client.opts.UserAgent) + require.Equal(t, regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, client.opts.Platform) +} - assert.Equal(t, "janedoe", rc.sysCtx.DockerAuthConfig.Username) - assert.Equal(t, "s3cr3t", rc.sysCtx.DockerAuthConfig.Password) - assert.Equal(t, "token", rc.sysCtx.DockerAuthConfig.IdentityToken) - assert.Equal(t, "diun/test", rc.sysCtx.DockerRegistryUserAgent) - assert.Equal(t, "linux", rc.sysCtx.OSChoice) - assert.Equal(t, "amd64", rc.sysCtx.ArchitectureChoice) +func TestNewUsesLocalPlatformByDefault(t *testing.T) { + client := New(Options{ + Platform: regplatform.Local(), + }) + require.Nil(t, client.opts.Host) + require.Equal(t, regplatform.Local(), client.opts.Platform) } diff --git a/pkg/registry/tags.go b/pkg/registry/tags.go index c51fe6a36..6fc480079 100644 --- a/pkg/registry/tags.go +++ b/pkg/registry/tags.go @@ -8,11 +8,9 @@ import ( "github.com/crazy-max/diun/v4/pkg/utl" "github.com/pkg/errors" - "go.podman.io/image/v5/docker" "golang.org/x/mod/semver" ) -// Tags holds information about image tags. type Tags struct { List []string NotIncluded int @@ -20,7 +18,6 @@ type Tags struct { Total int } -// TagsOptions holds docker tags image options type TagsOptions struct { Image Image Max int @@ -29,32 +26,31 @@ type TagsOptions struct { Exclude []string } -// Tags returns tags of a Docker repository func (c *Client) Tags(opts TagsOptions) (*Tags, error) { ctx, cancel := c.timeoutContext() defer cancel() - imgRef, err := ImageReference(opts.Image.String()) + regRef, err := opts.Image.regRef() if err != nil { - return nil, errors.Wrap(err, "cannot parse reference") + return nil, errors.Wrap(err, "cannot create regclient reference") } + regRef = regRef.SetTag("") - tags, err := docker.GetRepositoryTags(ctx, c.sysCtx, imgRef) + tags, err := c.regctl.TagList(ctx, regRef) if err != nil { - return nil, err + return nil, errors.Wrap(err, "cannot list repository tags") } + tagList := tags.Tags res := &Tags{ NotIncluded: 0, Excluded: 0, - Total: len(tags), + Total: len(tagList), } - // Sort tags - tags = SortTags(tags, opts.Sort) + tagList = SortTags(tagList, opts.Sort) - // Filter - for _, tag := range tags { + for _, tag := range tagList { if !utl.IsIncluded(tag, opts.Include) { res.NotIncluded++ continue @@ -72,7 +68,6 @@ func (c *Client) Tags(opts TagsOptions) (*Tags, error) { return res, nil } -// SortTags sorts tags list func SortTags(tags []string, sortTag SortTag) []string { switch sortTag { case SortTagReverse: @@ -114,10 +109,8 @@ func SortTags(tags []string, sortTag SortTag) []string { } } -// SortTag holds sort tag type type SortTag string -// SortTag constants const ( SortTagDefault = SortTag("default") SortTagReverse = SortTag("reverse") @@ -125,7 +118,6 @@ const ( SortTagSemver = SortTag("semver") ) -// SortTagTypes is the list of available sort tag types var SortTagTypes = []SortTag{ SortTagDefault, SortTagReverse, @@ -133,12 +125,10 @@ var SortTagTypes = []SortTag{ SortTagSemver, } -// Valid checks sort tag type is valid func (st *SortTag) Valid() bool { return st.OneOf(SortTagTypes) } -// OneOf checks if sort type is one of the values in the list func (st *SortTag) OneOf(stl []SortTag) bool { for _, n := range stl { if n == *st { diff --git a/pkg/registry/tags_test.go b/pkg/registry/tags_test.go index ecd452bed..7f77778c2 100644 --- a/pkg/registry/tags_test.go +++ b/pkg/registry/tags_test.go @@ -1,53 +1,199 @@ package registry import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" "testing" + "time" - "github.com/stretchr/testify/assert" + regconfig "github.com/regclient/regclient/config" + regplatform "github.com/regclient/regclient/types/platform" + "github.com/stretchr/testify/require" ) +func TestRepositoryTagsWithBearerPagination(t *testing.T) { + var issuedToken bool + var server *httptest.Server + server = httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + switch req.URL.Path { + case "/token": + issuedToken = true + rw.Header().Set("Content-Type", "application/json") + _, _ = rw.Write([]byte(`{"token":"test-token","expires_in":300}`)) + case "/v2/test/app/tags/list": + if req.URL.RawQuery == "n=2&last=v2" { + if req.Header.Get("Authorization") != "Bearer test-token" { + rw.WriteHeader(http.StatusUnauthorized) + return + } + rw.Header().Set("Content-Type", "application/json") + _, _ = rw.Write([]byte(`{"name":"test/app","tags":["v3"]}`)) + return + } + if req.Header.Get("Authorization") != "Bearer test-token" { + rw.Header().Set("Www-Authenticate", fmt.Sprintf(`Bearer realm="%s/token",service="registry.test"`, server.URL)) + rw.WriteHeader(http.StatusUnauthorized) + return + } + rw.Header().Set("Content-Type", "application/json") + rw.Header().Add("Link", `; rel="next"`) + _, _ = rw.Write([]byte(`{"name":"test/app","tags":["v1","v2"]}`)) + case "/v2/": + rw.WriteHeader(http.StatusOK) + default: + rw.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + imageName := strings.TrimPrefix(server.URL, "https://") + "/test/app:latest" + img, err := ParseImage(ParseImageOptions{Name: imageName}) + require.NoError(t, err) + + rc := New(Options{ + Host: ®config.Host{ + Name: img.Domain, + TLS: regconfig.TLSInsecure, + }, + Platform: regplatform.Local(), + }) + + tags, err := rc.Tags(TagsOptions{Image: img}) + require.NoError(t, err) + require.True(t, issuedToken) + require.Equal(t, []string{"v1", "v2", "v3"}, tags.List) + require.Equal(t, 3, tags.Total) +} + +func TestTagsAppliesIncludeExcludeAndMax(t *testing.T) { + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + switch req.URL.Path { + case "/v2/test/app/tags/list": + rw.Header().Set("Content-Type", "application/json") + _, _ = rw.Write([]byte(`{"name":"test/app","tags":["old","v1","v2","keep","v3"]}`)) + case "/v2/": + rw.WriteHeader(http.StatusOK) + default: + rw.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + imageName := strings.TrimPrefix(server.URL, "https://") + "/test/app:latest" + img, err := ParseImage(ParseImageOptions{Name: imageName}) + require.NoError(t, err) + + rc := New(Options{ + Host: ®config.Host{ + Name: img.Domain, + TLS: regconfig.TLSInsecure, + }, + Platform: regplatform.Local(), + }) + + tags, err := rc.Tags(TagsOptions{ + Image: img, + Max: 2, + Include: []string{`^v`, `^keep$`}, + Exclude: []string{`^v2$`}, + }) + require.NoError(t, err) + require.Equal(t, []string{"v1", "keep"}, tags.List) + require.Equal(t, 5, tags.Total) + require.Equal(t, 1, tags.NotIncluded) + require.Equal(t, 1, tags.Excluded) +} + +func TestTagsRetries429WithLegacyBackoffDelay(t *testing.T) { + var tagListRequests int + + server := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + switch req.URL.Path { + case "/v2/test/app/tags/list": + tagListRequests++ + if tagListRequests == 1 { + rw.WriteHeader(http.StatusTooManyRequests) + _, _ = rw.Write([]byte(`{"errors":[{"code":"TOOMANYREQUESTS","message":"rate limited"}]}`)) + return + } + rw.Header().Set("Content-Type", "application/json") + _, _ = rw.Write([]byte(`{"name":"test/app","tags":["v1"]}`)) + case "/v2/": + rw.WriteHeader(http.StatusOK) + default: + rw.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + imageName := strings.TrimPrefix(server.URL, "https://") + "/test/app:latest" + img, err := ParseImage(ParseImageOptions{Name: imageName}) + require.NoError(t, err) + + rc := New(Options{ + Host: ®config.Host{ + Name: img.Domain, + TLS: regconfig.TLSInsecure, + }, + Platform: regplatform.Local(), + }) + + start := time.Now() + tags, err := rc.Tags(TagsOptions{Image: img}) + elapsed := time.Since(start) + + require.NoError(t, err) + require.Equal(t, []string{"v1"}, tags.List) + require.Equal(t, 2, tagListRequests) + require.GreaterOrEqual(t, elapsed, 1500*time.Millisecond) +} + func TestTags(t *testing.T) { - assert.NotNil(t, rc) + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + }) image, err := ParseImage(ParseImageOptions{ Name: "crazymax/diun:3.0.0", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) - tags, err := rc.Tags(TagsOptions{ + tags, err := client.Tags(TagsOptions{ Image: image, }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) - assert.Greater(t, tags.Total, 0) - assert.Greater(t, len(tags.List), 0) + require.Greater(t, tags.Total, 0) + require.Greater(t, len(tags.List), 0) } func TestTagsWithDigest(t *testing.T) { - t.Parallel() - - assert.NotNil(t, rc) + client := New(Options{ + Host: regconfig.HostNewName("docker.io"), + Platform: regplatform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + }) image, err := ParseImage(ParseImageOptions{ Name: "crazymax/diun:latest@sha256:3fca3dd86c2710586208b0f92d1ec4ce25382f4cad4ae76a2275db8e8bb24031", }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) - tags, err := rc.Tags(TagsOptions{ + tags, err := client.Tags(TagsOptions{ Image: image, }) - if err != nil { - t.Error(err) - } + require.NoError(t, err) - assert.Greater(t, tags.Total, 0) - assert.Greater(t, len(tags.List), 0) + require.Greater(t, tags.Total, 0) + require.Greater(t, len(tags.List), 0) } func TestTagsSort(t *testing.T) { @@ -217,9 +363,7 @@ func TestTagsSort(t *testing.T) { }, }, } - for _, tt := range testCases { - tt := tt repotags := []string{ "0.1.0", "0.4.0", @@ -256,11 +400,9 @@ func TestTagsSort(t *testing.T) { "edge", "latest", } - t.Run(tt.name, func(t *testing.T) { - t.Parallel() tags := SortTags(repotags, tt.sortTag) - assert.Equal(t, tt.expected, tags) + require.Equal(t, tt.expected, tags) }) } } diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md deleted file mode 100644 index a7d8acbfc..000000000 --- a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -## The libtrust Project Community Code of Conduct - -The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md deleted file mode 100644 index 966f4f053..000000000 --- a/vendor/github.com/containers/libtrust/SECURITY.md +++ /dev/null @@ -1,3 +0,0 @@ -## Security and Disclosure Information Policy for the libtrust Project - -The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/libtrust/ec_key_no_openssl.go b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go deleted file mode 100644 index d6cdaca3f..000000000 --- a/vendor/github.com/containers/libtrust/ec_key_no_openssl.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !libtrust_openssl - -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "fmt" - "io" - "math/big" -) - -func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { - hasher := k.signatureAlgorithm.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return nil, nil, fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - return ecdsa.Sign(rand.Reader, k.PrivateKey, hash) -} diff --git a/vendor/github.com/containers/libtrust/ec_key_openssl.go b/vendor/github.com/containers/libtrust/ec_key_openssl.go deleted file mode 100644 index 4137511f1..000000000 --- a/vendor/github.com/containers/libtrust/ec_key_openssl.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build libtrust_openssl - -package libtrust - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/rand" - "fmt" - "io" - "math/big" -) - -func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { - hId := k.signatureAlgorithm.HashID() - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(data) - if err != nil { - return nil, nil, fmt.Errorf("error reading data: %s", err) - } - - return ecdsa.HashSign(rand.Reader, k.PrivateKey, buf.Bytes(), hId) -} diff --git a/vendor/github.com/containers/ocicrypt/LICENSE b/vendor/github.com/containers/ocicrypt/LICENSE deleted file mode 100644 index 953563530..000000000 --- a/vendor/github.com/containers/ocicrypt/LICENSE +++ /dev/null @@ -1,189 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go deleted file mode 100644 index c0c171824..000000000 --- a/vendor/github.com/containers/ocicrypt/spec/spec.go +++ /dev/null @@ -1,20 +0,0 @@ -package spec - -const ( - // MediaTypeLayerEnc is MIME type used for encrypted layers. - MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted" - // MediaTypeLayerGzipEnc is MIME type used for encrypted gzip-compressed layers. - MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted" - // MediaTypeLayerZstdEnc is MIME type used for encrypted zstd-compressed layers. - MediaTypeLayerZstdEnc = "application/vnd.oci.image.layer.v1.tar+zstd+encrypted" - // MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers. - MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted" - // MediaTypeLayerNonDistributableGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers. - MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted" - // MediaTypeLayerNonDistributableZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers. - MediaTypeLayerNonDistributableZstdEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted" - // MediaTypeLayerNonDistributableZsdtEnc is MIME type used for non distributable encrypted zstd-compressed layers. - // - // Deprecated: Use [MediaTypeLayerNonDistributableZstdEnc]. - MediaTypeLayerNonDistributableZsdtEnc = MediaTypeLayerNonDistributableZstdEnc -) diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS deleted file mode 100644 index 57af08b20..000000000 --- a/vendor/github.com/docker/cli/AUTHORS +++ /dev/null @@ -1,945 +0,0 @@ -# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT. -# This file lists all contributors to the repository. -# See scripts/docs/generate-authors.sh to make modifications. - -A. Lester Buck III -Aanand Prasad -Aaron L. Xu -Aaron Lehmann -Aaron.L.Xu -Abdur Rehman -Abhinandan Prativadi -Abin Shahab -Abreto FU -Ace Tang -Addam Hardy -Adolfo Ochagavía -Adrian Plata -Adrien Duermael -Adrien Folie -Adyanth Hosavalike -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Akhil Mohan -Akihiro Suda -Akim Demaille -Alan Thompson -Alano Terblanche -Albert Callarisa -Alberto Roura -Albin Kerouanton -Aleksa Sarai -Aleksander Piotrowski -Alessandro Boch -Alex Couture-Beil -Alex Mavrogiannis -Alex Mayer -Alexander Boyd -Alexander Chneerov -Alexander Larsson -Alexander Morozov -Alexander Ryabov -Alexandre González -Alexey Igrychev -Alexis Couvreur -Alfred Landrum -Ali Rostami -Alicia Lauerman -Allen Sun -Allie Sadler -Alvin Deng -Amen Belayneh -Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> -Amir Goldstein -Amit Krishnan -Amit Shukla -Amy Lindburg -Anca Iordache -Anda Xu -Andrea Luzzardi -Andreas Köhler -Andres G. Aragoneses -Andres Leon Rangel -Andrew France -Andrew He -Andrew Hsu -Andrew Macpherson -Andrew McDonnell -Andrew Po -Andrew-Zipperer -Andrey Petrov -Andrii Berehuliak -André Martins -Andy Goldstein -Andy Rothfusz -Anil Madhavapeddy -Ankush Agarwal -Anne Henmi -Anton Polonskiy -Antonio Murdaca -Antonis Kalipetis -Anusha Ragunathan -Ao Li -Arash Deshmeh -Archimedes Trajano -Arko Dasgupta -Arnaud Porterie -Arnaud Rebillout -Arthur Flageul -Arthur Peka -Ashly Mathew -Ashwini Oruganti -Aslam Ahemad -Austin Vazquez -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Bastiaan Bakker -BastianHofmann -Ben Bodenmiller -Ben Bonnefoy -Ben Creasy -Ben Firshman -Benjamin Boudreau -Benjamin Böhmke -Benjamin Nater -Benoit Sigoure -Bhumika Bayani -Bill Wang -Bin Liu -Bingshen Wang -Bishal Das -Bjorn Neergaard -Boaz Shuster -Boban Acimovic -Bogdan Anton -Boris Pruessmann -Brad Baker -Bradley Cicenas -Brandon Mitchell -Brandon Philips -Brent Salisbury -Bret Fisher -Brian (bex) Exelbierd -Brian Goff -Brian Tracy -Brian Wieder -Bruno Sousa -Bryan Bess -Bryan Boreham -Bryan Murphy -bryfry -Calvin Liu -Cameron Spear -Cao Weiwei -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -carsontham -Carston Schilds -Casey Korver -Ce Gao -Cedric Davies -Cesar Talledo -Cezar Sa Espinola -Chad Faragher -Chao Wang -Charles Chan -Charles Law -Charles Smith -Charlie Drage -Charlotte Mach -ChaYoung You -Chee Hau Lim -Chen Chuanliang -Chen Hanxiao -Chen Mingjie -Chen Qiu -Chris Chinchilla -Chris Couzens -Chris Gavin -Chris Gibson -Chris McKinnel -Chris Snow -Chris Vermilion -Chris Weyl -Christian Persson -Christian Stefanescu -Christophe Robin -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Jones -Christopher Petito <47751006+krissetto@users.noreply.github.com> -Christopher Petito -Christopher Svensson -Christy Norman -Chun Chen -Clinton Kitson -Coenraad Loubser -Colin Hebert -Collin Guarino -Colm Hally -Comical Derskeal <27731088+derskeal@users.noreply.github.com> -Conner Crosby -Corey Farrell -Corey Quon -Cory Bennet -Cory Snider -Craig Osterhout -Craig Wilhite -Cristian Staretu -Daehyeok Mun -Dafydd Crosby -Daisuke Ito -dalanlan -Damien Nadé -Dan Cotora -Dan Wallis -Danial Gharib -Daniel Artine -Daniel Cassidy -Daniel Dao -Daniel Farrell -Daniel Gasienica -Daniel Goosen -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Nephin -Daniel Norberg -Daniel Watkins -Daniel Zhang -Daniil Nikolenko -Danny Berger -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Dave Goodchild -Dave Henderson -Dave Tucker -David Alvarez -David Beitey -David Calavera -David Cramer -David Dooling -David Gageot -David Karlsson -David le Blanc -David Lechner -David Scott -David Sheets -David Williamson -David Xia -David Young -Deng Guangxing -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Docter -dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> -Derek McGowan -Des Preston -Deshi Xiao -Dharmit Shah -Dhawal Yogesh Bhanushali -Dieter Reuter -Dilep Dev <34891655+DilepDev@users.noreply.github.com> -Dima Stopel -Dimitry Andric -Ding Fei -Diogo Monica -Djordje Lukic -Dmitriy Fishman -Dmitry Gusev -Dmitry Smirnov -Dmitry V. Krivenok -Dominik Braun -Don Kjer -Dong Chen -DongGeon Lee -Doug Davis -Drew Erny -Ed Costello -Ed Morley <501702+edmorley@users.noreply.github.com> -Elango Sivanandam -Eli Uriegas -Eli Uriegas -Elias Faxö -Elliot Luo <956941328@qq.com> -Eng Zer Jun -Eric Bode -Eric Curtin -Eric Engestrom -Eric G. Noriega -Eric Rosenberg -Eric Sage -Eric-Olivier Lamey -Erica Windisch -Erik Hollensbe -Erik Humphrey -Erik St. Martin -Essam A. Hassan -Ethan Haynes -Euan Kemp -Eugene Yakubovich -Evan Allrich -Evan Hazlett -Evan Krall -Evan Lezar -Evelyn Xu -Everett Toews -Fabio Falci -Fabrizio Soppelsa -Felix Geyer -Felix Hupfeld -Felix Rabe -fezzik1620 -Filip Jareš -Flavio Crisciani -Florian Klein -Forest Johnson -Foysal Iqbal -François Scala -Fred Lifton -Frederic Hemberger -Frederick F. Kautz IV -Frederik Nordahl Jul Sabroe -Frieder Bluemle -Gabriel Gore -Gabriel Nicolas Avellaneda -Gabriela Georgieva -Gaetan de Villele -Gang Qiao -Gary Schaetz -Genki Takiuchi -George MacRorie -George Margaritis -George Xie -Gianluca Borello -Giau. Tran Minh -Giedrius Jonikas -Gildas Cuisinier -Gio d'Amelio -Gleb Stsenov -Goksu Toprak -Gou Rao -Govind Rai -Grace Choi -Graeme Wiebe -Grant Reaber -Greg Pflaum -Gsealy -Guilhem Lettron -Guillaume J. Charmes -Guillaume Le Floch -Guillaume Tardif -gwx296173 -Günther Jungbluth -Hakan Özler -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harold Cooper -Harry Zhang -He Simei -Hector S -Helen Xie -Henning Sprang -Henry N -Hernan Garcia -Hongbin Lu -Hossein Abbasi <16090309+hsnabszhdn@users.noreply.github.com> -Hu Keping -Huayi Zhang -Hugo Chastel -Hugo Gabriel Eyherabide -huqun -Huu Nguyen -Hyzhou Zhy -Iain MacDonald -Iain Samuel McLean Elder -Ian Campbell -Ian Philpot -Ignacio Capurro -Ilya Dmitrichenko -Ilya Khlopotov -Ilya Sotkov -Ioan Eugen Stan -Isabel Jimenez -Ivan Grcic -Ivan Grund -Ivan Markin -Jacob Atzen -Jacob Tomlinson -Jacopo Rigoli -Jaivish Kothari -Jake Lambert -Jake Sanders -Jake Stokes -Jakub Panek -James Nesbitt -James Turnbull -Jamie Hannaford -Jan Koprowski -Jan Pazdziora -Jan-Jaap Driessen -Jana Radhakrishnan -Jared Hocutt -Jasmine Hegman -Jason Hall -Jason Heiss -Jason Plum -Jay Kamat -Jean Lecordier -Jean Rouge -Jean-Christophe Sirot -Jean-Pierre Huynh -Jeff Lindsay -Jeff Nickoloff -Jeff Silberman -Jennings Zhang -Jeremy Chambers -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jesse Adametz -Jessica Frazelle -Jezeniel Zapanta -Jian Zhang -Jianyong Wu -Jie Luo -Jilles Oldenbeuving -Jim Chen -Jim Galasyn -Jim Lin -Jimmy Leger -Jimmy Song -jimmyxian -Jintao Zhang -Joao Fernandes -Joe Abbey -Joe Doliner -Joe Gordon -Joel Handwell -Joey Geiger -Joffrey F -Johan Euphrosine -Johannes 'fish' Ziemke -John Feminella -John Harris -John Howard -John Howard -John Laswell -John Maguire -John Mulhausen -John Starks -John Stephens -John Tims -John V. Martinez -John Willis -Jon Johnson -Jon Zeolla -Jonatas Baldin -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Lee -Jonathan Lomas -Jonathan McCrohan -Jonathan Warriss-Simmons -Jonh Wendell -Jordan Jennings -Jorge Vallecillo -Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> -Joseph Kern -Josh Bodah -Josh Chorlton -Josh Hawn -Josh Horwitz -Josh Soref -Julian -Julien Barbier -Julien Kassar -Julien Maitrehenry -Julio Cesar Garcia -Justas Brazauskas -Justin Chadwell -Justin Cormack -Justin Simonelis -Justyn Temme -Jyrki Puttonen -Jérémie Drouet -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu (Kennan) -Kara Alexandra -Kareem Khazem -Karthik Nayak -Kat Samperi -Kathryn Spiers -Katie McLaughlin -Ke Xu -Kei Ohmura -Keith Hudgins -Kelton Bassingthwaite -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickaël Laventure -Kevin Alvarez -Kevin Burke -Kevin Feyrer -Kevin Kern -Kevin Kirsche -Kevin Meredith -Kevin Richardson -Kevin Woblick -khaled souf -Kim Eik -Kir Kolyshkin -Kirill A. Korinsky -Kotaro Yoshimatsu -Krasi Georgiev -Kris-Mikael Krister -Kun Zhang -Kunal Kushwaha -Kyle Mitofsky -Lachlan Cooper -Lai Jiangshan -Lajos Papp -Lars Kellogg-Stedman -Laura Brehm -Laura Frank -Laurent Erignoux -Laurent Goderre -Lee Gaines -Lei Jitang -Lennie -lentil32 -Leo Gallucci -Leonid Skorospelov -Lewis Daly -Li Fu Bang -Li Yi -Li Zeghong -Liang-Chi Hsieh -Lihua Tang -Lily Guo -Lin Lu -Linus Heckemann -Liping Xue -Liron Levin -liwenqi -lixiaobing10051267 -Lloyd Dewolf -Lorenzo Fontana -Louis Opter -Lovekesh Kumar -Luca Favatella -Luca Marturana -Lucas Chan -Luis Henrique Mulinari -Luka Hartwig -Lukas Heeren -Lukasz Zajaczkowski -Lydell Manganti -Lénaïc Huard -Ma Shimiao -Mabin -Maciej Kalisz -Madhav Puri -Madhu Venugopal -Madhur Batra -Malte Janduda -Manjunath A Kumatagi -Mansi Nahar -mapk0y -Marc Bihlmaier -Marc Cornellà -Marco Mariani -Marco Spiess -Marco Vedovati -Marcus Martins -Marianna Tessel -Marius Ileana -Marius Meschter -Marius Sturm -Mark Oates -Marsh Macy -Martin Mosegaard Amdisen -Mary Anthony -Mason Fish -Mason Malone -Mateusz Major -Mathias Duedahl <64321057+Lussebullen@users.noreply.github.com> -Mathieu Champlon -Mathieu Rollet -Matt Gucci -Matt Robenolt -Matteo Orefice -Matthew Heon -Matthieu Hauglustaine -Matthieu MOREL -Mauro Porras P -Max Shytikov -Max-Julian Pogner -Maxime Petazzoni -Maximillian Fan Xavier -Mei ChunTao -Melroy van den Berg -Mert Şişmanoğlu -Metal <2466052+tedhexaflow@users.noreply.github.com> -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Crosby -Michael Friis -Michael Irwin -Michael Käufl -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Steinert -Michael Tews -Michael West -Michal Minář -Michał Czeraszkiewicz -Miguel Angel Alvarez Cabrerizo -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Casas -Mike Dalton -Mike Danese -Mike Dillon -Mike Goelzer -Mike MacCana -mikelinjie <294893458@qq.com> -Mikhail Vasin -Milind Chawre -Mindaugas Rukas -Miroslav Gula -Misty Stanley-Jones -Mohammad Banikazemi -Mohammad Hossein -Mohammed Aaqib Ansari -Mohammed Aminu Futa -Mohini Anne Dsouza -Moorthy RS -Morgan Bauer -Morten Hekkvang -Morten Linderud -Moysés Borges -Mozi <29089388+pzhlkj6612@users.noreply.github.com> -Mrunal Patel -muicoder -Murukesh Mohanan -Muthukumar R -Máximo Cuadros -Mårten Cassel -Nace Oroz -Nahum Shalman -Nalin Dahyabhai -Nao YONASHIRO -Nassim 'Nass' Eddequiouaq -Natalie Parker -Nate Brennand -Nathan Hsieh -Nathan LeClaire -Nathan McCauley -Neil Peterson -Nick Adcock -Nick Santos -Nick Sieger -Nico Stapelbroek -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nikhil Chawla -Nikolas Garofil -Nikolay Milovanov -NinaLua -Nir Soffer -Nishant Totla -NIWA Hideyuki -Noah Silas -Noah Treuhaft -O.S. Tezer -Oded Arbel -Odin Ugedal -ohmystack -OKA Naoya -Oliver Pomeroy -Olle Jonsson -Olli Janatuinen -Oscar Wieman -Otto Kekäläinen -Ovidio Mallo -Pascal Borreli -Patrick Böänziger -Patrick Daigle <114765035+pdaig@users.noreply.github.com> -Patrick Hemmer -Patrick Lang -Patrick St. laurent -Paul -Paul Kehrer -Paul Lietar -Paul Mulders -Paul Rogalski -Paul Seyfert -Paul Weaver -Pavel Pospisil -Paweł Gronowski -Paweł Pokrywka -Paweł Szczekutowicz -Peeyush Gupta -Per Lundberg -Peter Dave Hello -Peter Edge -Peter Hsu -Peter Jaffe -Peter Kehl -Peter Nagy -Peter Salvatore -Peter Waller -Phil Estes -Philip Alexander Etling -Philipp Gillé -Philipp Schmied -Phong Tran -Pieter E Smit -pixelistik -Pratik Karki -Prayag Verma -Preston Cowley -Pure White -Qiang Huang -Qinglan Peng -QQ喵 -qudongfang -Raghavendra K T -Rahul Kadyan -Rahul Zoldyck -Ravi Shekhar Jethani -Ray Tsang -Reficul -Remy Suen -Renaud Gaubert -Ricardo N Feliciano -Rich Moyse -Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com> -Richard Mathie -Richard Scothern -Rick Wieman -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Gulewich -Rob Murray -Robert Wallis -Robin Naundorf -Robin Speekenbrink -Roch Feuillade -Rodolfo Ortiz -Rogelio Canedo -Rohan Verma -Roland Kammerer -Roman Dudin -Rory Hunter -Ross Boucher -Rubens Figueiredo -Rui Cao -Rui JingAn -Ryan Belgrave -Ryan Detzel -Ryan Stelly -Ryan Wilson-Perkin -Ryan Zhang -Sainath Grandhi -Sakeven Jiang -Sally O'Malley -Sam Neirinck -Sam Thibault -Samarth Shah -Sambuddha Basu -Sami Tabet -Samuel Cochran -Samuel Karp -Sandro Jäckel -Santhosh Manohar -Sarah Sanders -Sargun Dhillon -Saswat Bhattacharya -Saurabh Kumar -Scott Brenner -Scott Collier -Sean Christopherson -Sean Rodman -Sebastiaan van Stijn -Sergey Tryuber -Serhat Gülçiçek -Sevki Hasirci -Shaun Kaasten -Sheng Yang -Shijiang Wei -Shishir Mahajan -Shoubhik Bose -Shukui Yang -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silvin Lubecki -Simei He -Simon Ferquel -Simon Heimberg -Sindhu S -Slava Semushin -Solomon Hykes -Song Gao -Spencer Brown -Spring Lee -squeegels -Srini Brahmaroutu -Stavros Panakakis -Stefan S. -Stefan Scherer -Stefan Weil -Stephane Jeandeaux -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Richards -Steven Burgess -Stoica-Marcu Floris-Andrei -Stuart Williams -Subhajit Ghosh -Sun Jianbo -Sune Keller -Sungwon Han -Sunny Gogoi -Sven Dowideit -Sylvain Baubeau -Sébastien HOUZÉ -T K Sourabh -TAGOMORI Satoshi -taiji-tech -Takeshi Koenuma -Takuya Noguchi -Taylor Jones -Teiva Harsanyi -Tejaswini Duggaraju -Tengfei Wang -Teppei Fukuda -Thatcher Peskens -Thibault Coupin -Thomas Gazagnaire -Thomas Krzero -Thomas Leonard -Thomas Léveil -Thomas Riccardi -Thomas Swift -Tianon Gravi -Tianyi Wang -Tibor Vass -Tim Dettrick -Tim Hockin -Tim Sampson -Tim Smith -Tim Waugh -Tim Welsh -Tim Wraight -timfeirg -Timothy Hobbs -Tobias Bradtke -Tobias Gesellchen -Todd Whiteman -Tom Denham -Tom Fotherby -Tom Klingenberg -Tom Milligan -Tom X. Tobin -Tomas Bäckman -Tomas Tomecek -Tomasz Kopczynski -Tomáš Hrčka -Tony Abboud -Tõnis Tiigi -Trapier Marshall -Travis Cline -Tristan Carel -Tycho Andersen -Tycho Andersen -uhayate -Ulrich Bareth -Ulysses Souza -Umesh Yadav -Vaclav Struhar -Valentin Lorentz -Vardan Pogosian -Venkateswara Reddy Bukkasamudram -Veres Lajos -Victor Vieux -Victoria Bialas -Viktor Stanchev -Ville Skyttä -Vimal Raghubir -Vincent Batts -Vincent Bernat -Vincent Demeester -Vincent Woo -Vishnu Kannan -Vivek Goyal -Wang Jie -Wang Lei -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -Wataru Ishida -Wayne Song -Wen Cheng Ma -Wenlong Zhang -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -Will Wang -William Henry -Xianglin Gao -Xiaodong Liu -Xiaodong Zhang -Xiaoxi He -Xinbo Weng -Xuecong Liao -Yan Feng -Yanqiang Miao -Yassine Tijani -Yi EungJun -Ying Li -Yong Tang -Yosef Fertel -Yu Peng -Yuan Sun -Yucheng Wu -Yue Zhang -Yunxiang Huang -Zachary Romero -Zander Mackie -zebrilee -Zeel B Patel -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhenghenghuo -Zhiwei Liang -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhuo Zhi -Álex González -Álvaro Lázaro -Átila Camurça Alves -Александр Менщиков <__Singleton__@hackerdom.ru> -徐俊杰 -林博仁 Buo-ren Lin diff --git a/vendor/github.com/docker/cli/LICENSE b/vendor/github.com/docker/cli/LICENSE deleted file mode 100644 index 9c8e20ab8..000000000 --- a/vendor/github.com/docker/cli/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2017 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE deleted file mode 100644 index 1c40faaec..000000000 --- a/vendor/github.com/docker/cli/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go deleted file mode 100644 index 5a6378050..000000000 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ /dev/null @@ -1,176 +0,0 @@ -package config - -import ( - "fmt" - "io" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "sync" - - "github.com/docker/cli/cli/config/configfile" - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/types" -) - -const ( - // EnvOverrideConfigDir is the name of the environment variable that can be - // used to override the location of the client configuration files (~/.docker). - // - // It takes priority over the default, but can be overridden by the "--config" - // command line option. - EnvOverrideConfigDir = "DOCKER_CONFIG" - - // ConfigFileName is the name of the client configuration file inside the - // config-directory. - ConfigFileName = "config.json" - configFileDir = ".docker" - contextsDir = "contexts" -) - -var ( - initConfigDir = new(sync.Once) - configDir string -) - -// resetConfigDir is used in testing to reset the "configDir" package variable -// and its sync.Once to force re-lookup between tests. -func resetConfigDir() { - configDir = "" - initConfigDir = new(sync.Once) -} - -// getHomeDir returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// On non-Windows platforms, it falls back to nss lookups, if the home -// directory cannot be obtained from environment-variables. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -// -// getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker -// as dependency for consumers that only need to read the config-file. -// -// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get -func getHomeDir() string { - home, _ := os.UserHomeDir() - if home == "" && runtime.GOOS != "windows" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// Provider defines an interface for providing the CLI config. -type Provider interface { - ConfigFile() *configfile.ConfigFile -} - -// Dir returns the directory the configuration file is stored in -func Dir() string { - initConfigDir.Do(func() { - configDir = os.Getenv(EnvOverrideConfigDir) - if configDir == "" { - configDir = filepath.Join(getHomeDir(), configFileDir) - } - }) - return configDir -} - -// ContextStoreDir returns the directory the docker contexts are stored in -func ContextStoreDir() string { - return filepath.Join(Dir(), contextsDir) -} - -// SetDir sets the directory the configuration file is stored in -func SetDir(dir string) { - // trigger the sync.Once to synchronise with Dir() - initConfigDir.Do(func() {}) - configDir = filepath.Clean(dir) -} - -// Path returns the path to a file relative to the config dir -func Path(p ...string) (string, error) { - path := filepath.Join(append([]string{Dir()}, p...)...) - if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { - return "", fmt.Errorf("path %q is outside of root config directory %q", path, Dir()) - } - return path, nil -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader. It returns an error if configData is malformed. -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration file ([ConfigFileName]) from the given directory. -// If no directory is given, it uses the default [Dir]. A [*configfile.ConfigFile] -// is returned containing the contents of the configuration file, or a default -// struct if no configfile exists in the given location. -// -// Load returns an error if a configuration file exists in the given location, -// but cannot be read, or is malformed. Consumers must handle errors to prevent -// overwriting an existing configuration file. -func Load(configDir string) (*configfile.ConfigFile, error) { - if configDir == "" { - configDir = Dir() - } - return load(configDir) -} - -func load(configDir string) (*configfile.ConfigFile, error) { - filename := filepath.Join(configDir, ConfigFileName) - configFile := configfile.New(filename) - - file, err := os.Open(filename) - if err != nil { - if os.IsNotExist(err) { - // It is OK for no configuration file to be present, in which - // case we return a default struct. - return configFile, nil - } - // Any other error happening when failing to read the file must be returned. - return configFile, fmt.Errorf("loading config file: %w", err) - } - defer func() { _ = file.Close() }() - err = configFile.LoadFromReader(file) - if err != nil { - err = fmt.Errorf("parsing config file (%s): %w", filename, err) - } - return configFile, err -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// a reference to the ConfigFile struct. If none is found or when failing to load -// the configuration file, it initializes a default ConfigFile struct. If no -// credentials-store is set in the configuration file, it attempts to discover -// the default store to use for the current platform. -// -// Important: LoadDefaultConfigFile prints a warning to stderr when failing to -// load the configuration file, but otherwise ignores errors. Consumers should -// consider using [Load] (and [credentials.DetectDefaultStore]) to detect errors -// when updating the configuration file, to prevent discarding a (malformed) -// configuration file. -func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { - configFile, err := load(Dir()) - if err != nil { - // FIXME(thaJeztah): we should not proceed here to prevent overwriting existing (but malformed) config files; see https://github.com/docker/cli/issues/5075 - _, _ = fmt.Fprintln(stderr, "WARNING: Error", err) - } - if !configFile.ContainsAuth() { - configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) - } - return configFile -} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go deleted file mode 100644 index fab3ed4cb..000000000 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ /dev/null @@ -1,441 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/memorystore" - "github.com/docker/cli/cli/config/types" - "github.com/sirupsen/logrus" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - PluginsFormat string `json:"pluginsFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` - ServicesFormat string `json:"servicesFormat,omitempty"` - TasksFormat string `json:"tasksFormat,omitempty"` - SecretFormat string `json:"secretFormat,omitempty"` - ConfigFormat string `json:"configFormat,omitempty"` - NodesFormat string `json:"nodesFormat,omitempty"` - PruneFilters []string `json:"pruneFilters,omitempty"` - Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - CurrentContext string `json:"currentContext,omitempty"` - CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` - Plugins map[string]map[string]string `json:"plugins,omitempty"` - Aliases map[string]string `json:"aliases,omitempty"` - Features map[string]string `json:"features,omitempty"` -} - -type configEnvAuth struct { - Auth string `json:"auth"` -} - -type configEnv struct { - AuthConfigs map[string]configEnvAuth `json:"auths"` -} - -// DockerEnvConfigKey is an environment variable that contains a JSON encoded -// credential config. It only supports storing the credentials as a base64 -// encoded string in the format base64("username:pat"). -// -// Adding additional fields will produce a parsing error. -// -// Example: -// -// { -// "auths": { -// "example.test": { -// "auth": base64-encoded-username-pat -// } -// } -// } -const DockerEnvConfigKey = "DOCKER_AUTH_CONFIG" - -// ProxyConfig contains proxy configuration settings -type ProxyConfig struct { - HTTPProxy string `json:"httpProxy,omitempty"` - HTTPSProxy string `json:"httpsProxy,omitempty"` - NoProxy string `json:"noProxy,omitempty"` - FTPProxy string `json:"ftpProxy,omitempty"` - AllProxy string `json:"allProxy,omitempty"` -} - -// New initializes an empty configuration file for the given filename 'fn' -func New(fn string) *ConfigFile { - return &ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - Plugins: make(map[string]map[string]string), - Aliases: make(map[string]string), - } -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - if ac.Auth != "" { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return nil -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - len(configFile.CredentialHelpers) > 0 || - len(configFile.AuthConfigs) > 0 -} - -// GetAuthConfigs returns the mapping of repo to auth configuration -func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { - if configFile.AuthConfigs == nil { - configFile.AuthConfigs = make(map[string]types.AuthConfig) - } - return configFile.AuthConfigs -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - // User-Agent header is automatically set, and should not be stored in the configuration - for v := range configFile.HTTPHeaders { - if strings.EqualFold(v, "User-Agent") { - delete(configFile.HTTPHeaders, v) - } - } - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() (retErr error) { - if configFile.Filename == "" { - return errors.New("can't save config with empty filename") - } - - dir := filepath.Dir(configFile.Filename) - if err := os.MkdirAll(dir, 0o700); err != nil { - return err - } - temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename)) - if err != nil { - return err - } - defer func() { - // ignore error as the file may already be closed when we reach this. - _ = temp.Close() - if retErr != nil { - if err := os.Remove(temp.Name()); err != nil { - logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") - } - } - }() - - err = configFile.SaveToWriter(temp) - if err != nil { - return err - } - - if err := temp.Close(); err != nil { - return fmt.Errorf("error closing temp file: %w", err) - } - - // Handle situation where the configfile is a symlink, and allow for dangling symlinks - cfgFile := configFile.Filename - if f, err := filepath.EvalSymlinks(cfgFile); err == nil { - cfgFile = f - } else if os.IsNotExist(err) { - // extract the path from the error if the configfile does not exist or is a dangling symlink - var pathError *os.PathError - if errors.As(err, &pathError) { - cfgFile = pathError.Path - } - } - - // Try copying the current config file (if any) ownership and permissions - copyFilePermissions(cfgFile, temp.Name()) - return os.Rename(temp.Name(), cfgFile) -} - -// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and -// then checking this against any environment variables provided to the container -func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { - var cfgKey string - - if _, ok := configFile.Proxies[host]; !ok { - cfgKey = "default" - } else { - cfgKey = host - } - - config := configFile.Proxies[cfgKey] - permitted := map[string]*string{ - "HTTP_PROXY": &config.HTTPProxy, - "HTTPS_PROXY": &config.HTTPSProxy, - "NO_PROXY": &config.NoProxy, - "FTP_PROXY": &config.FTPProxy, - "ALL_PROXY": &config.AllProxy, - } - m := runOpts - if m == nil { - m = make(map[string]*string) - } - for k := range permitted { - if *permitted[k] == "" { - continue - } - if _, ok := m[k]; !ok { - m[k] = permitted[k] - } - if _, ok := m[strings.ToLower(k)]; !ok { - m[strings.ToLower(k)] = permitted[k] - } - } - return m -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", errors.New("something went wrong decoding auth config") - } - userName, password, ok := strings.Cut(string(decoded), ":") - if !ok || userName == "" { - return "", "", errors.New("invalid auth configuration file") - } - return userName, strings.Trim(password, "\x00"), nil -} - -// GetCredentialsStore returns a new credentials store from the settings in the -// configuration file -func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { - store := credentials.NewFileStore(configFile) - - if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { - store = newNativeStore(configFile, helper) - } - - envConfig := os.Getenv(DockerEnvConfigKey) - if envConfig == "" { - return store - } - - authConfig, err := parseEnvConfig(envConfig) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) - return store - } - - // use DOCKER_AUTH_CONFIG if set - // it uses the native or file store as a fallback to fetch and store credentials - envStore, err := memorystore.New( - memorystore.WithAuthConfig(authConfig), - memorystore.WithFallbackStore(store), - ) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) - return store - } - - return envStore -} - -func parseEnvConfig(v string) (map[string]types.AuthConfig, error) { - envConfig := &configEnv{} - decoder := json.NewDecoder(strings.NewReader(v)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(envConfig); err != nil && !errors.Is(err, io.EOF) { - return nil, err - } - if decoder.More() { - return nil, errors.New("DOCKER_AUTH_CONFIG does not support more than one JSON object") - } - - authConfigs := make(map[string]types.AuthConfig) - for addr, envAuth := range envConfig.AuthConfigs { - if envAuth.Auth == "" { - return nil, fmt.Errorf("DOCKER_AUTH_CONFIG environment variable is missing key `auth` for %s", addr) - } - username, password, err := decodeAuth(envAuth.Auth) - if err != nil { - return nil, err - } - authConfigs[addr] = types.AuthConfig{ - Username: username, - Password: password, - ServerAddress: addr, - } - } - return authConfigs, nil -} - -// var for unit testing. -var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { - return credentials.NewNativeStore(configFile, helperSuffix) -} - -// GetAuthConfig for a repository from the credential store -func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { - return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) -} - -// getConfiguredCredentialStore returns the credential helper configured for the -// given registry, the default credsStore, or the empty string if neither are -// configured. -func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { - if c.CredentialHelpers != nil && registryHostname != "" { - if helper, exists := c.CredentialHelpers[registryHostname]; exists { - return helper - } - } - return c.CredentialsStore -} - -// GetAllCredentials returns all of the credentials stored in all of the -// configured credential stores. -func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { - auths := make(map[string]types.AuthConfig) - addAll := func(from map[string]types.AuthConfig) { - for reg, ac := range from { - auths[reg] = ac - } - } - - defaultStore := configFile.GetCredentialsStore("") - newAuths, err := defaultStore.GetAll() - if err != nil { - return nil, err - } - addAll(newAuths) - - // Auth configs from a registry-specific helper should override those from the default store. - for registryHostname := range configFile.CredentialHelpers { - newAuth, err := configFile.GetAuthConfig(registryHostname) - if err != nil { - // TODO(thaJeztah): use context-logger, so that this output can be suppressed (in tests). - logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname) - continue - } - auths[registryHostname] = newAuth - } - return auths, nil -} - -// GetFilename returns the file name that this config file is based on. -func (configFile *ConfigFile) GetFilename() string { - return configFile.Filename -} - -// PluginConfig retrieves the requested option for the given plugin. -func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { - if configFile.Plugins == nil { - return "", false - } - pluginConfig, ok := configFile.Plugins[pluginname] - if !ok { - return "", false - } - value, ok := pluginConfig[option] - return value, ok -} - -// SetPluginConfig sets the option to the given value for the given -// plugin. Passing a value of "" will remove the option. If removing -// the final config item for a given plugin then also cleans up the -// overall plugin entry. -func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { - if configFile.Plugins == nil { - configFile.Plugins = make(map[string]map[string]string) - } - pluginConfig, ok := configFile.Plugins[pluginname] - if !ok { - pluginConfig = make(map[string]string) - configFile.Plugins[pluginname] = pluginConfig - } - if value != "" { - pluginConfig[option] = value - } else { - delete(pluginConfig, option) - } - if len(pluginConfig) == 0 { - delete(configFile.Plugins, pluginname) - } -} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go deleted file mode 100644 index 06b811e7d..000000000 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build !windows - -package configfile - -import ( - "os" - "syscall" -) - -// copyFilePermissions copies file ownership and permissions from "src" to "dst", -// ignoring any error during the process. -func copyFilePermissions(src, dst string) { - var ( - mode os.FileMode = 0o600 - uid, gid int - ) - - fi, err := os.Stat(src) - if err != nil { - return - } - if fi.Mode().IsRegular() { - mode = fi.Mode() - } - if err := os.Chmod(dst, mode); err != nil { - return - } - - uid = int(fi.Sys().(*syscall.Stat_t).Uid) - gid = int(fi.Sys().(*syscall.Stat_t).Gid) - - if uid > 0 && gid > 0 { - _ = os.Chown(dst, uid, gid) - } -} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go deleted file mode 100644 index 42fffc39a..000000000 --- a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package configfile - -func copyFilePermissions(src, dst string) { - // TODO implement for Windows -} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go deleted file mode 100644 index 28d58ec48..000000000 --- a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -package credentials - -import ( - "github.com/docker/cli/cli/config/types" -) - -// Store is the interface that any credentials store must implement. -type Store interface { - // Erase removes credentials from the store for a given server. - Erase(serverAddress string) error - // Get retrieves credentials from the store for a given server. - Get(serverAddress string) (types.AuthConfig, error) - // GetAll retrieves all the credentials from the store. - GetAll() (map[string]types.AuthConfig, error) - // Store saves credentials in the store. - Store(authConfig types.AuthConfig) error -} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go deleted file mode 100644 index a36afc41f..000000000 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go +++ /dev/null @@ -1,22 +0,0 @@ -package credentials - -import "os/exec" - -// DetectDefaultStore return the default credentials store for the platform if -// no user-defined store is passed, and the store executable is available. -func DetectDefaultStore(store string) string { - if store != "" { - // use user-defined - return store - } - - platformDefault := defaultCredentialsStore() - if platformDefault == "" { - return "" - } - - if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err != nil { - return "" - } - return platformDefault -} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go deleted file mode 100644 index 5d42dec62..000000000 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go +++ /dev/null @@ -1,5 +0,0 @@ -package credentials - -func defaultCredentialsStore() string { - return "osxkeychain" -} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go deleted file mode 100644 index a9012c6d4..000000000 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -package credentials - -import ( - "os/exec" -) - -func defaultCredentialsStore() string { - if _, err := exec.LookPath("pass"); err == nil { - return "pass" - } - - return "secretservice" -} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go deleted file mode 100644 index 40c16eb83..000000000 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows && !darwin && !linux - -package credentials - -func defaultCredentialsStore() string { - return "" -} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go deleted file mode 100644 index bb799ca61..000000000 --- a/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package credentials - -func defaultCredentialsStore() string { - return "wincred" -} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go deleted file mode 100644 index c69312b01..000000000 --- a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ /dev/null @@ -1,118 +0,0 @@ -package credentials - -import ( - "fmt" - "net" - "net/url" - "os" - "strings" - "sync/atomic" - - "github.com/docker/cli/cli/config/types" -) - -type store interface { - Save() error - GetAuthConfigs() map[string]types.AuthConfig - GetFilename() string -} - -// fileStore implements a credentials store using -// the docker configuration file to keep the credentials in plain text. -type fileStore struct { - file store -} - -// NewFileStore creates a new file credentials store. -func NewFileStore(file store) Store { - return &fileStore{file: file} -} - -// Erase removes the given credentials from the file store.This function is -// idempotent and does not update the file if credentials did not change. -func (c *fileStore) Erase(serverAddress string) error { - if _, exists := c.file.GetAuthConfigs()[serverAddress]; !exists { - // nothing to do; no credentials found for the given serverAddress - return nil - } - delete(c.file.GetAuthConfigs(), serverAddress) - return c.file.Save() -} - -// Get retrieves credentials for a specific server from the file store. -func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { - authConfig, ok := c.file.GetAuthConfigs()[serverAddress] - if !ok { - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for r, ac := range c.file.GetAuthConfigs() { - if serverAddress == ConvertToHostname(r) { - return ac, nil - } - } - - authConfig = types.AuthConfig{} - } - return authConfig, nil -} - -func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { - return c.file.GetAuthConfigs(), nil -} - -// unencryptedWarning warns the user when using an insecure credential storage. -// After a deprecation period, user will get prompted if stdin and stderr are a terminal. -// Otherwise, we'll assume they want it (sadly), because people may have been scripting -// insecure logins and we don't want to break them. Maybe they'll see the warning in their -// logs and fix things. -const unencryptedWarning = ` -WARNING! Your credentials are stored unencrypted in '%s'. -Configure a credential helper to remove this warning. See -https://docs.docker.com/go/credential-store/ -` - -// alreadyPrinted ensures that we only print the unencryptedWarning once per -// CLI invocation (no need to warn the user multiple times per command). -var alreadyPrinted atomic.Bool - -// Store saves the given credentials in the file store. This function is -// idempotent and does not update the file if credentials did not change. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - authConfigs := c.file.GetAuthConfigs() - if oldAuthConfig, ok := authConfigs[authConfig.ServerAddress]; ok && oldAuthConfig == authConfig { - // Credentials didn't change, so skip updating the configuration file. - return nil - } - authConfigs[authConfig.ServerAddress] = authConfig - if err := c.file.Save(); err != nil { - return err - } - - if !alreadyPrinted.Load() && authConfig.Password != "" { - // Display a warning if we're storing the users password (not a token). - // - // FIXME(thaJeztah): make output configurable instead of hardcoding to os.Stderr - _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf(unencryptedWarning, c.file.GetFilename())) - alreadyPrinted.Store(true) - } - - return nil -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. -func ConvertToHostname(maybeURL string) string { - stripped := maybeURL - if strings.Contains(stripped, "://") { - u, err := url.Parse(stripped) - if err == nil && u.Hostname() != "" { - if u.Port() == "" { - return u.Hostname() - } - return net.JoinHostPort(u.Hostname(), u.Port()) - } - } - hostName, _, _ := strings.Cut(stripped, "/") - return hostName -} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go deleted file mode 100644 index b9af145b9..000000000 --- a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go +++ /dev/null @@ -1,147 +0,0 @@ -package credentials - -import ( - "github.com/docker/cli/cli/config/types" - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" -) - -const ( - remoteCredentialsPrefix = "docker-credential-" //nolint:gosec // ignore G101: Potential hardcoded credentials - tokenUsername = "" -) - -// nativeStore implements a credentials store -// using native keychain to keep credentials secure. -// It piggybacks into a file store to keep users' emails. -type nativeStore struct { - programFunc client.ProgramFunc - fileStore Store -} - -// NewNativeStore creates a new native store that -// uses a remote helper program to manage credentials. -func NewNativeStore(file store, helperSuffix string) Store { - name := remoteCredentialsPrefix + helperSuffix - return &nativeStore{ - programFunc: client.NewShellProgramFunc(name), - fileStore: NewFileStore(file), - } -} - -// Erase removes the given credentials from the native store. -func (c *nativeStore) Erase(serverAddress string) error { - if err := client.Erase(c.programFunc, serverAddress); err != nil { - return err - } - - // Fallback to plain text store to remove email - return c.fileStore.Erase(serverAddress) -} - -// Get retrieves credentials for a specific server from the native store. -func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { - // load user email if it exist or an empty auth config. - auth, _ := c.fileStore.Get(serverAddress) - - creds, err := c.getCredentialsFromStore(serverAddress) - if err != nil { - return auth, err - } - auth.Username = creds.Username - auth.IdentityToken = creds.IdentityToken - auth.Password = creds.Password - auth.ServerAddress = creds.ServerAddress - - return auth, nil -} - -// GetAll retrieves all the credentials from the native store. -func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { - auths, err := c.listCredentialsInStore() - if err != nil { - return nil, err - } - - // Emails are only stored in the file store. - // This call can be safely eliminated when emails are removed. - fileConfigs, _ := c.fileStore.GetAll() - - authConfigs := make(map[string]types.AuthConfig) - for registry := range auths { - creds, err := c.getCredentialsFromStore(registry) - if err != nil { - return nil, err - } - ac := fileConfigs[registry] // might contain Email - ac.Username = creds.Username - ac.Password = creds.Password - ac.IdentityToken = creds.IdentityToken - if ac.ServerAddress == "" { - ac.ServerAddress = creds.ServerAddress - } - authConfigs[registry] = ac - } - - return authConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *nativeStore) Store(authConfig types.AuthConfig) error { - if err := c.storeCredentialsInStore(authConfig); err != nil { - return err - } - authConfig.Username = "" - authConfig.Password = "" - authConfig.IdentityToken = "" - - // Fallback to old credential in plain text to save only the email - return c.fileStore.Store(authConfig) -} - -// storeCredentialsInStore executes the command to store the credentials in the native store. -func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { - creds := &credentials.Credentials{ - ServerURL: config.ServerAddress, - Username: config.Username, - Secret: config.Password, - } - - if config.IdentityToken != "" { - creds.Username = tokenUsername - creds.Secret = config.IdentityToken - } - - return client.Store(c.programFunc, creds) -} - -// getCredentialsFromStore executes the command to get the credentials from the native store. -func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { - var ret types.AuthConfig - - creds, err := client.Get(c.programFunc, serverAddress) - if err != nil { - if credentials.IsErrCredentialsNotFound(err) { - // do not return an error if the credentials are not - // in the keychain. Let docker ask for new credentials. - return ret, nil - } - return ret, err - } - - if creds.Username == tokenUsername { - ret.IdentityToken = creds.Secret - } else { - ret.Password = creds.Secret - ret.Username = creds.Username - } - - ret.ServerAddress = serverAddress - return ret, nil -} - -// listCredentialsInStore returns a listing of stored credentials as a map of -// URL -> username. -func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { - return client.List(c.programFunc) -} diff --git a/vendor/github.com/docker/cli/cli/config/memorystore/store.go b/vendor/github.com/docker/cli/cli/config/memorystore/store.go deleted file mode 100644 index f8ec62b95..000000000 --- a/vendor/github.com/docker/cli/cli/config/memorystore/store.go +++ /dev/null @@ -1,131 +0,0 @@ -// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.24 - -package memorystore - -import ( - "fmt" - "maps" - "os" - "sync" - - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/types" -) - -// notFoundErr is the error returned when a plugin could not be found. -type notFoundErr string - -func (notFoundErr) NotFound() {} - -func (e notFoundErr) Error() string { - return string(e) -} - -var errValueNotFound notFoundErr = "value not found" - -type Config struct { - lock sync.RWMutex - memoryCredentials map[string]types.AuthConfig - fallbackStore credentials.Store -} - -func (e *Config) Erase(serverAddress string) error { - e.lock.Lock() - defer e.lock.Unlock() - delete(e.memoryCredentials, serverAddress) - - if e.fallbackStore != nil { - err := e.fallbackStore.Erase(serverAddress) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) - } - } - - return nil -} - -func (e *Config) Get(serverAddress string) (types.AuthConfig, error) { - e.lock.RLock() - defer e.lock.RUnlock() - authConfig, ok := e.memoryCredentials[serverAddress] - if !ok { - if e.fallbackStore != nil { - return e.fallbackStore.Get(serverAddress) - } - return types.AuthConfig{}, errValueNotFound - } - return authConfig, nil -} - -func (e *Config) GetAll() (map[string]types.AuthConfig, error) { - e.lock.RLock() - defer e.lock.RUnlock() - creds := make(map[string]types.AuthConfig) - - if e.fallbackStore != nil { - fileCredentials, err := e.fallbackStore.GetAll() - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) - } else { - creds = fileCredentials - } - } - - maps.Copy(creds, e.memoryCredentials) - return creds, nil -} - -func (e *Config) Store(authConfig types.AuthConfig) error { - e.lock.Lock() - defer e.lock.Unlock() - e.memoryCredentials[authConfig.ServerAddress] = authConfig - - if e.fallbackStore != nil { - return e.fallbackStore.Store(authConfig) - } - return nil -} - -// WithFallbackStore sets a fallback store. -// -// Write operations will be performed on both the memory store and the -// fallback store. -// -// Read operations will first check the memory store, and if the credential -// is not found, it will then check the fallback store. -// -// Retrieving all credentials will return from both the memory store and the -// fallback store, merging the results from both stores into a single map. -// -// Data stored in the memory store will take precedence over data in the -// fallback store. -func WithFallbackStore(store credentials.Store) Options { - return func(s *Config) error { - s.fallbackStore = store - return nil - } -} - -// WithAuthConfig allows to set the initial credentials in the memory store. -func WithAuthConfig(config map[string]types.AuthConfig) Options { - return func(s *Config) error { - s.memoryCredentials = config - return nil - } -} - -type Options func(*Config) error - -// New creates a new in memory credential store -func New(opts ...Options) (credentials.Store, error) { - m := &Config{ - memoryCredentials: make(map[string]types.AuthConfig), - } - for _, opt := range opts { - if err := opt(m); err != nil { - return nil, err - } - } - return m, nil -} diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go deleted file mode 100644 index 9fe90003b..000000000 --- a/vendor/github.com/docker/cli/cli/config/types/authconfig.go +++ /dev/null @@ -1,17 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 4c35b879a..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr := daErr.(type) { - case ErrorCode: - err = daErr.WithDetail(nil) - case Error: - err = daErr - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index d77e70473..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,40 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - return json.NewEncoder(w).Encode(err) -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go deleted file mode 100644 index 7fceefbc6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ /dev/null @@ -1,1613 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/opencontainers/go-digest" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - invalidPaginationResponseDescriptor = ResponseDescriptor{ - Name: "Invalid pagination number", - Description: "The received parameter n was invalid in some way, as described by the error code. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodePaginationNumberInvalid, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } - - tooManyRequestsDescriptor = ResponseDescriptor{ - Name: "Too Many Requests", - StatusCode: http.StatusTooManyRequests, - Description: "The client made too many requests within a time interval.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeTooManyRequests, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - invalidPaginationResponseDescriptor, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - invalidPaginationResponseDescriptor, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go deleted file mode 100644 index cde011959..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go deleted file mode 100644 index 87e9f3c14..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ /dev/null @@ -1,145 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) - - ErrorCodePaginationNumberInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PAGINATION_NUMBER_INVALID", - Message: "invalid number of results requested", - Description: `Returned when the "n" parameter (number of results - to return) is not an integer, "n" is negative or "n" is bigger than - the maximum allowed.`, - HTTPStatusCode: http.StatusBadRequest, - }) -) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go deleted file mode 100644 index 9bc41a3a6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go +++ /dev/null @@ -1,161 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" - "unicode" -) - -var ( - // according to rfc7230 - reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) - reQuotedValue = regexp.MustCompile(`^[^\\"]+`) - reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) -) - -// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains -// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The -// function parses only the first element of the list, which is set by the very first proxy. It returns a map -// of corresponding key-value pairs and an unparsed slice of the input string. -// -// Examples of Forwarded header values: -// -// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown -// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" -// -// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into -// {"for": "192.0.2.43:443", "host": "registry.example.org"}. -func parseForwardedHeader(forwarded string) (map[string]string, string, error) { - // Following are states of forwarded header parser. Any state could transition to a failure. - const ( - // terminating state; can transition to Parameter - stateElement = iota - // terminating state; can transition to KeyValueDelimiter - stateParameter - // can transition to Value - stateKeyValueDelimiter - // can transition to one of { QuotedValue, PairEnd } - stateValue - // can transition to one of { EscapedCharacter, PairEnd } - stateQuotedValue - // can transition to one of { QuotedValue } - stateEscapedCharacter - // terminating state; can transition to one of { Parameter, Element } - statePairEnd - ) - - var ( - parameter string - value string - parse = forwarded[:] - res = map[string]string{} - state = stateElement - ) - -Loop: - for { - // skip spaces unless in quoted value - if state != stateQuotedValue && state != stateEscapedCharacter { - parse = strings.TrimLeftFunc(parse, unicode.IsSpace) - } - - if len(parse) == 0 { - if state != stateElement && state != statePairEnd && state != stateParameter { - return nil, parse, fmt.Errorf("unexpected end of input") - } - // terminating - break - } - - switch state { - // terminate at list element delimiter - case stateElement: - if parse[0] == ',' { - parse = parse[1:] - break Loop - } - state = stateParameter - - // parse parameter (the key of key-value pair) - case stateParameter: - match := reToken.FindString(parse) - if len(match) == 0 { - return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) - } - parameter = strings.ToLower(match) - parse = parse[len(match):] - state = stateKeyValueDelimiter - - // parse '=' - case stateKeyValueDelimiter: - if parse[0] != '=' { - return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) - } - parse = parse[1:] - state = stateValue - - // parse value or quoted value - case stateValue: - if parse[0] == '"' { - parse = parse[1:] - state = stateQuotedValue - } else { - value = reToken.FindString(parse) - if len(value) == 0 { - return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) - } - if _, exists := res[parameter]; exists { - return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) - } - res[parameter] = value - parse = parse[len(value):] - value = "" - state = statePairEnd - } - - // parse a part of quoted value until the first backslash - case stateQuotedValue: - match := reQuotedValue.FindString(parse) - value += match - parse = parse[len(match):] - switch { - case len(parse) == 0: - return nil, parse, fmt.Errorf("unterminated quoted string") - case parse[0] == '"': - res[parameter] = value - value = "" - parse = parse[1:] - state = statePairEnd - case parse[0] == '\\': - parse = parse[1:] - state = stateEscapedCharacter - } - - // parse escaped character in a quoted string, ignore the backslash - // transition back to QuotedValue state - case stateEscapedCharacter: - c := reEscapedCharacter.FindString(parse) - if len(c) == 0 { - return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) - } - value += c - parse = parse[1:] - state = stateQuotedValue - - // expect either a new key-value pair, new list or end of input - case statePairEnd: - switch parse[0] { - case ';': - parse = parse[1:] - state = stateParameter - case ',': - state = stateElement - default: - return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) - } - } - } - - return res, parse, nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go deleted file mode 100644 index 9612ac2e5..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/routes.go +++ /dev/null @@ -1,40 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go deleted file mode 100644 index ab6406335..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ /dev/null @@ -1,254 +0,0 @@ -package v2 - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var ( - scheme = "http" - host = r.Host - ) - - if r.TLS != nil { - scheme = "https" - } else if len(r.URL.Scheme) > 0 { - scheme = r.URL.Scheme - } - - // Handle fowarded headers - // Prefer "Forwarded" header as defined by rfc7239 if given - // see https://tools.ietf.org/html/rfc7239 - if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { - forwardedHeader, _, err := parseForwardedHeader(forwarded) - if err == nil { - if fproto := forwardedHeader["proto"]; len(fproto) > 0 { - scheme = fproto - } - if fhost := forwardedHeader["host"]; len(fhost) > 0 { - host = fhost - } - } - } else { - if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { - scheme = forwardedProto - } - if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - default: - return "", fmt.Errorf("reference must have a tag or digest") - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go deleted file mode 100644 index 7ca5ab722..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ /dev/null @@ -1,114 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -// isValidCredsMessage checks if 'msg' contains invalid credentials error message. -// It returns whether the logs are free of invalid credentials errors and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. -func isValidCredsMessage(msg string) error { - if credentials.IsCredentialsMissingServerURLMessage(msg) { - return credentials.NewErrCredentialsMissingServerURL() - } - if credentials.IsCredentialsMissingUsernameMessage(msg) { - return credentials.NewErrCredentialsMissingUsername() - } - return nil -} - -// Store uses an external program to save credentials. -func Store(program ProgramFunc, creds *credentials.Credentials) error { - cmd := program(credentials.ActionStore) - - buffer := new(bytes.Buffer) - if err := json.NewEncoder(buffer).Encode(creds); err != nil { - return err - } - cmd.Input(buffer) - - out, err := cmd.Output() - if err != nil { - if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { - err = isValidErr - } - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) - } - - return nil -} - -// Get executes an external program to get the credentials from a native store. -func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program(credentials.ActionGet) - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - if credentials.IsErrCredentialsNotFoundMessage(string(out)) { - return nil, credentials.NewErrCredentialsNotFound() - } - - if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) - } - - resp := &credentials.Credentials{ - ServerURL: serverURL, - } - - if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Erase executes a program to remove the server credentials from the native store. -func Erase(program ProgramFunc, serverURL string) error { - cmd := program(credentials.ActionErase) - cmd.Input(strings.NewReader(serverURL)) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// List executes a program to list server credentials in the native store. -func List(program ProgramFunc) (map[string]string, error) { - cmd := program(credentials.ActionList) - cmd.Input(strings.NewReader("unused")) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) - } - - var resp map[string]string - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { - return nil, err - } - - return resp, nil -} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go deleted file mode 100644 index 93863480b..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ /dev/null @@ -1,57 +0,0 @@ -package client - -import ( - "io" - "os" - "os/exec" -) - -// Program is an interface to execute external programs. -type Program interface { - Output() ([]byte, error) - Input(in io.Reader) -} - -// ProgramFunc is a type of function that initializes programs based on arguments. -type ProgramFunc func(args ...string) Program - -// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell]. -func NewShellProgramFunc(command string) ProgramFunc { - return func(args ...string) Program { - return createProgramCmdRedirectErr(command, args, nil) - } -} - -// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command -// in a [Shell] with the given environment variables. -func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc { - return func(args ...string) Program { - return createProgramCmdRedirectErr(command, args, env) - } -} - -func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell { - ec := exec.Command(command, args...) - if env != nil { - for k, v := range *env { - ec.Env = append(ec.Environ(), k+"="+v) - } - } - ec.Stderr = os.Stderr - return &Shell{cmd: ec} -} - -// Shell invokes shell commands to talk with a remote credentials-helper. -type Shell struct { - cmd *exec.Cmd -} - -// Output returns responses from the remote credentials-helper. -func (s *Shell) Output() ([]byte, error) { - return s.cmd.Output() -} - -// Input sets the input to send to a remote credentials-helper. -func (s *Shell) Input(in io.Reader) { - s.cmd.Stdin = in -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go deleted file mode 100644 index eac551884..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ /dev/null @@ -1,209 +0,0 @@ -package credentials - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "strings" -) - -// Action defines the name of an action (sub-command) supported by a -// credential-helper binary. It is an alias for "string", and mostly -// for convenience. -type Action = string - -// List of actions (sub-commands) supported by credential-helper binaries. -const ( - ActionStore Action = "store" - ActionGet Action = "get" - ActionErase Action = "erase" - ActionList Action = "list" - ActionVersion Action = "version" -) - -// Credentials holds the information shared between docker and the credentials store. -type Credentials struct { - ServerURL string - Username string - Secret string -} - -// isValid checks the integrity of Credentials object such that no credentials lack -// a server URL or a username. -// It returns whether the credentials are valid and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername -func (c *Credentials) isValid() (bool, error) { - if len(c.ServerURL) == 0 { - return false, NewErrCredentialsMissingServerURL() - } - - if len(c.Username) == 0 { - return false, NewErrCredentialsMissingUsername() - } - - return true, nil -} - -// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. -// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, -// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" -var CredsLabel = "Docker Credentials" - -// SetCredsLabel is a simple setter for CredsLabel -func SetCredsLabel(label string) { - CredsLabel = label -} - -// Serve initializes the credentials-helper and parses the action argument. -// This function is designed to be called from a command line interface. -// It uses os.Args[1] as the key for the action. -// It uses os.Stdin as input and os.Stdout as output. -// This function terminates the program with os.Exit(1) if there is an error. -func Serve(helper Helper) { - if len(os.Args) != 2 { - _, _ = fmt.Fprintln(os.Stdout, usage()) - os.Exit(1) - } - - switch os.Args[1] { - case "--version", "-v": - _ = PrintVersion(os.Stdout) - os.Exit(0) - case "--help", "-h": - _, _ = fmt.Fprintln(os.Stdout, usage()) - os.Exit(0) - } - - if err := HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout); err != nil { - _, _ = fmt.Fprintln(os.Stdout, err) - os.Exit(1) - } -} - -func usage() string { - return fmt.Sprintf("Usage: %s ", Name) -} - -// HandleCommand runs a helper to execute a credential action. -func HandleCommand(helper Helper, action Action, in io.Reader, out io.Writer) error { - switch action { - case ActionStore: - return Store(helper, in) - case ActionGet: - return Get(helper, in, out) - case ActionErase: - return Erase(helper, in) - case ActionList: - return List(helper, out) - case ActionVersion: - return PrintVersion(out) - default: - return fmt.Errorf("%s: unknown action: %s", Name, action) - } -} - -// Store uses a helper and an input reader to save credentials. -// The reader must contain the JSON serialization of a Credentials struct. -func Store(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - var creds Credentials - if err := json.NewDecoder(buffer).Decode(&creds); err != nil { - return err - } - - if ok, err := creds.isValid(); !ok { - return err - } - - return helper.Add(&creds) -} - -// Get retrieves the credentials for a given server url. -// The reader must contain the server URL to search. -// The writer is used to write the JSON serialization of the credentials. -func Get(helper Helper, reader io.Reader, writer io.Writer) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - username, secret, err := helper.Get(serverURL) - if err != nil { - return err - } - - buffer.Reset() - err = json.NewEncoder(buffer).Encode(Credentials{ - ServerURL: serverURL, - Username: username, - Secret: secret, - }) - if err != nil { - return err - } - - _, _ = fmt.Fprint(writer, buffer.String()) - return nil -} - -// Erase removes credentials from the store. -// The reader must contain the server URL to remove. -func Erase(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - return helper.Delete(serverURL) -} - -// List returns all the serverURLs of keys in -// the OS store as a list of strings -func List(helper Helper, writer io.Writer) error { - accts, err := helper.List() - if err != nil { - return err - } - return json.NewEncoder(writer).Encode(accts) -} - -// PrintVersion outputs the current version. -func PrintVersion(writer io.Writer) error { - _, _ = fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) - return nil -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go deleted file mode 100644 index 2283d5a44..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ /dev/null @@ -1,124 +0,0 @@ -package credentials - -import ( - "errors" - "strings" -) - -const ( - // ErrCredentialsNotFound standardizes the not found error, so every helper returns - // the same message and docker can handle it properly. - errCredentialsNotFoundMessage = "credentials not found in native keychain" - - // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize - // invalid credentials or credentials management operations - errCredentialsMissingServerURLMessage = "no credentials server URL" - errCredentialsMissingUsernameMessage = "no credentials username" -) - -// errCredentialsNotFound represents an error -// raised when credentials are not in the store. -type errCredentialsNotFound struct{} - -// Error returns the standard error message -// for when the credentials are not in the store. -func (errCredentialsNotFound) Error() string { - return errCredentialsNotFoundMessage -} - -// NotFound implements the [ErrNotFound][errdefs.ErrNotFound] interface. -// -// [errdefs.ErrNotFound]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrNotFound -func (errCredentialsNotFound) NotFound() {} - -// NewErrCredentialsNotFound creates a new error -// for when the credentials are not in the store. -func NewErrCredentialsNotFound() error { - return errCredentialsNotFound{} -} - -// IsErrCredentialsNotFound returns true if the error -// was caused by not having a set of credentials in a store. -func IsErrCredentialsNotFound(err error) bool { - var target errCredentialsNotFound - return errors.As(err, &target) -} - -// IsErrCredentialsNotFoundMessage returns true if the error -// was caused by not having a set of credentials in a store. -// -// This function helps to check messages returned by an -// external program via its standard output. -func IsErrCredentialsNotFoundMessage(err string) bool { - return strings.TrimSpace(err) == errCredentialsNotFoundMessage -} - -// errCredentialsMissingServerURL represents an error raised -// when the credentials object has no server URL or when no -// server URL is provided to a credentials operation requiring -// one. -type errCredentialsMissingServerURL struct{} - -func (errCredentialsMissingServerURL) Error() string { - return errCredentialsMissingServerURLMessage -} - -// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter] -// interface. -// -// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter -func (errCredentialsMissingServerURL) InvalidParameter() {} - -// errCredentialsMissingUsername represents an error raised -// when the credentials object has no username or when no -// username is provided to a credentials operation requiring -// one. -type errCredentialsMissingUsername struct{} - -func (errCredentialsMissingUsername) Error() string { - return errCredentialsMissingUsernameMessage -} - -// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter] -// interface. -// -// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter -func (errCredentialsMissingUsername) InvalidParameter() {} - -// NewErrCredentialsMissingServerURL creates a new error for -// errCredentialsMissingServerURL. -func NewErrCredentialsMissingServerURL() error { - return errCredentialsMissingServerURL{} -} - -// NewErrCredentialsMissingUsername creates a new error for -// errCredentialsMissingUsername. -func NewErrCredentialsMissingUsername() error { - return errCredentialsMissingUsername{} -} - -// IsCredentialsMissingServerURL returns true if the error -// was an errCredentialsMissingServerURL. -func IsCredentialsMissingServerURL(err error) bool { - var target errCredentialsMissingServerURL - return errors.As(err, &target) -} - -// IsCredentialsMissingServerURLMessage checks for an -// errCredentialsMissingServerURL in the error message. -func IsCredentialsMissingServerURLMessage(err string) bool { - return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage -} - -// IsCredentialsMissingUsername returns true if the error -// was an errCredentialsMissingUsername. -func IsCredentialsMissingUsername(err error) bool { - var target errCredentialsMissingUsername - return errors.As(err, &target) -} - -// IsCredentialsMissingUsernameMessage checks for an -// errCredentialsMissingUsername in the error message. -func IsCredentialsMissingUsernameMessage(err string) bool { - return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go deleted file mode 100644 index 135acd254..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go +++ /dev/null @@ -1,14 +0,0 @@ -package credentials - -// Helper is the interface a credentials store helper must implement. -type Helper interface { - // Add appends credentials to the store. - Add(*Credentials) error - // Delete removes credentials from the store. - Delete(serverURL string) error - // Get retrieves credentials from the store. - // It returns username and secret as strings. - Get(serverURL string) (string, string, error) - // List returns the stored serverURLs and their associated usernames. - List() (map[string]string, error) -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go deleted file mode 100644 index 84377c263..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go +++ /dev/null @@ -1,16 +0,0 @@ -package credentials - -var ( - // Name is filled at linking time - Name = "" - - // Package is filled at linking time - Package = "github.com/docker/docker-credential-helpers" - - // Version holds the complete version number. Filled in at linking time. - Version = "v0.0.0+unknown" - - // Revision is filled with the VCS (e.g. git) revision being used to build - // the program at linking time. - Revision = "" -) diff --git a/vendor/github.com/containers/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/containers/libtrust/CONTRIBUTING.md rename to vendor/github.com/docker/libtrust/CONTRIBUTING.md diff --git a/vendor/github.com/containers/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE similarity index 100% rename from vendor/github.com/containers/libtrust/LICENSE rename to vendor/github.com/docker/libtrust/LICENSE diff --git a/vendor/github.com/containers/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS similarity index 100% rename from vendor/github.com/containers/libtrust/MAINTAINERS rename to vendor/github.com/docker/libtrust/MAINTAINERS diff --git a/vendor/github.com/containers/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md similarity index 100% rename from vendor/github.com/containers/libtrust/README.md rename to vendor/github.com/docker/libtrust/README.md diff --git a/vendor/github.com/containers/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go similarity index 100% rename from vendor/github.com/containers/libtrust/certificates.go rename to vendor/github.com/docker/libtrust/certificates.go diff --git a/vendor/github.com/containers/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go similarity index 100% rename from vendor/github.com/containers/libtrust/doc.go rename to vendor/github.com/docker/libtrust/doc.go diff --git a/vendor/github.com/containers/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go similarity index 98% rename from vendor/github.com/containers/libtrust/ec_key.go rename to vendor/github.com/docker/libtrust/ec_key.go index 0ee1b9110..00bbe4b3c 100644 --- a/vendor/github.com/containers/libtrust/ec_key.go +++ b/vendor/github.com/docker/libtrust/ec_key.go @@ -269,11 +269,17 @@ func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byt // The given hashId is only a suggestion, and since EC keys only support // on signature/hash algorithm given the curve name, we disregard it for // the elliptic curve JWK signature implementation. - r, s, err := k.sign(data, hashID) + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) + return nil, "", fmt.Errorf("error reading data to sign: %s", err) } + hash := hasher.Sum(nil) + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } rBytes, sBytes := r.Bytes(), s.Bytes() octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 // MUST include leading zeros in the output diff --git a/vendor/github.com/containers/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go similarity index 100% rename from vendor/github.com/containers/libtrust/filter.go rename to vendor/github.com/docker/libtrust/filter.go diff --git a/vendor/github.com/containers/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go similarity index 100% rename from vendor/github.com/containers/libtrust/hash.go rename to vendor/github.com/docker/libtrust/hash.go diff --git a/vendor/github.com/containers/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go similarity index 100% rename from vendor/github.com/containers/libtrust/jsonsign.go rename to vendor/github.com/docker/libtrust/jsonsign.go diff --git a/vendor/github.com/containers/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go similarity index 100% rename from vendor/github.com/containers/libtrust/key.go rename to vendor/github.com/docker/libtrust/key.go diff --git a/vendor/github.com/containers/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go similarity index 100% rename from vendor/github.com/containers/libtrust/key_files.go rename to vendor/github.com/docker/libtrust/key_files.go diff --git a/vendor/github.com/containers/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go similarity index 100% rename from vendor/github.com/containers/libtrust/key_manager.go rename to vendor/github.com/docker/libtrust/key_manager.go diff --git a/vendor/github.com/containers/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go similarity index 100% rename from vendor/github.com/containers/libtrust/rsa_key.go rename to vendor/github.com/docker/libtrust/rsa_key.go diff --git a/vendor/github.com/containers/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go similarity index 100% rename from vendor/github.com/containers/libtrust/util.go rename to vendor/github.com/docker/libtrust/util.go diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig deleted file mode 100644 index c6b74c3e0..000000000 --- a/vendor/github.com/gorilla/mux/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] -indent_style = tab -indent_size = 4 - -[*.md] -indent_size = 4 -trim_trailing_whitespace = false - -eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore deleted file mode 100644 index 84039fec6..000000000 --- a/vendor/github.com/gorilla/mux/.gitignore +++ /dev/null @@ -1 +0,0 @@ -coverage.coverprofile diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile deleted file mode 100644 index 98f5ab75f..000000000 --- a/vendor/github.com/gorilla/mux/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') -GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -GO_SEC=$(shell which gosec 2> /dev/null || echo '') -GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest - -GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') -GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest - -.PHONY: golangci-lint -golangci-lint: - $(if $(GO_LINT), ,go install $(GO_LINT_URI)) - @echo "##### Running golangci-lint" - golangci-lint run -v - -.PHONY: gosec -gosec: - $(if $(GO_SEC), ,go install $(GO_SEC_URI)) - @echo "##### Running gosec" - gosec ./... - -.PHONY: govulncheck -govulncheck: - $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) - @echo "##### Running govulncheck" - govulncheck ./... - -.PHONY: verify -verify: golangci-lint gosec govulncheck - -.PHONY: test -test: - @echo "##### Running tests" - go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index 382513d57..000000000 --- a/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,812 +0,0 @@ -# gorilla/mux - -![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg) -[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux) -[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) - - -![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) - -Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to -their respective handler. - -The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - -* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. -* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. -* URL hosts, paths and query values can have variables with an optional regular expression. -* Registered URLs can be built, or "reversed", which helps maintaining references to resources. -* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. - ---- - -* [Install](#install) -* [Examples](#examples) -* [Matching Routes](#matching-routes) -* [Static Files](#static-files) -* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) -* [Registered URLs](#registered-urls) -* [Walking Routes](#walking-routes) -* [Graceful Shutdown](#graceful-shutdown) -* [Middleware](#middleware) -* [Handling CORS Requests](#handling-cors-requests) -* [Testing Handlers](#testing-handlers) -* [Full Example](#full-example) - ---- - -## Install - -With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: - -```sh -go get -u github.com/gorilla/mux -``` - -## Examples - -Let's start registering a couple of URL paths and handlers: - -```go -func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) -} -``` - -Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. - -Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/products/{key}", ProductHandler) -r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: - -```go -func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, "Category: %v\n", vars["category"]) -} -``` - -And this is all you need to know about the basic usage. More advanced options are explained below. - -### Matching Routes - -Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: - -```go -r := mux.NewRouter() -// Only matches if domain is "www.example.com". -r.Host("www.example.com") -// Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.example.com") -``` - -There are several other matchers that can be added. To match path prefixes: - -```go -r.PathPrefix("/products/") -``` - -...or HTTP methods: - -```go -r.Methods("GET", "POST") -``` - -...or URL schemes: - -```go -r.Schemes("https") -``` - -...or header values: - -```go -r.Headers("X-Requested-With", "XMLHttpRequest") -``` - -...or query values: - -```go -r.Queries("key", "value") -``` - -...or to use a custom matcher function: - -```go -r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 -}) -``` - -...and finally, it is possible to combine several matchers in a single route: - -```go -r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") -``` - -Routes are tested in the order they were added to the router. If two routes match, the first one wins: - -```go -r := mux.NewRouter() -r.HandleFunc("/specific", specificHandler) -r.PathPrefix("/").Handler(catchAllHandler) -``` - -Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". - -For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - -```go -r := mux.NewRouter() -s := r.Host("www.example.com").Subrouter() -``` - -Then register routes in the subrouter: - -```go -s.HandleFunc("/products/", ProductsHandler) -s.HandleFunc("/products/{key}", ProductHandler) -s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: - -```go -r := mux.NewRouter() -s := r.PathPrefix("/products").Subrouter() -// "/products/" -s.HandleFunc("/", ProductsHandler) -// "/products/{key}/" -s.HandleFunc("/{key}/", ProductHandler) -// "/products/{key}/details" -s.HandleFunc("/{key}/details", ProductDetailsHandler) -``` - - -### Static Files - -Note that the path provided to `PathPrefix()` represents a "wildcard": calling -`PathPrefix("/static/").Handler(...)` means that the handler will be passed any -request that matches "/static/\*". This makes it easy to serve static files with mux: - -```go -func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Serving Single Page Applications - -Most of the time it makes sense to serve your SPA on a separate web server from your API, -but sometimes it's desirable to serve them both from one place. It's possible to write a simple -handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage -mux's powerful routing for your API endpoints. - -```go -package main - -import ( - "encoding/json" - "log" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/gorilla/mux" -) - -// spaHandler implements the http.Handler interface, so we can use it -// to respond to HTTP requests. The path to the static directory and -// path to the index file within that static directory are used to -// serve the SPA in the given static directory. -type spaHandler struct { - staticPath string - indexPath string -} - -// ServeHTTP inspects the URL path to locate a file within the static dir -// on the SPA handler. If a file is found, it will be served. If not, the -// file located at the index path on the SPA handler will be served. This -// is suitable behavior for serving an SPA (single page application). -func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Join internally call path.Clean to prevent directory traversal - path := filepath.Join(h.staticPath, r.URL.Path) - - // check whether a file exists or is a directory at the given path - fi, err := os.Stat(path) - if os.IsNotExist(err) || fi.IsDir() { - // file does not exist or path is a directory, serve index.html - http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) - return - } - - if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // otherwise, use http.FileServer to serve the static file - http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) -} - -func main() { - router := mux.NewRouter() - - router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { - // an example API handler - json.NewEncoder(w).Encode(map[string]bool{"ok": true}) - }) - - spa := spaHandler{staticPath: "build", indexPath: "index.html"} - router.PathPrefix("/").Handler(spa) - - srv := &http.Server{ - Handler: router, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Registered URLs - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") -``` - -To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - -```go -url, err := r.Get("article").URL("category", "technology", "id", "42") -``` - -...and the result will be a `url.URL` with the following path: - -``` -"/articles/technology/42" -``` - -This also works for host and query value variables: - -```go -r := mux.NewRouter() -r.Host("{subdomain}.example.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - -// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") -``` - -All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - -```go -r.HeadersRegexp("Content-Type", "application/(text|json)") -``` - -...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - -There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - -```go -// "http://news.example.com/" -host, err := r.Get("article").URLHost("subdomain", "news") - -// "/articles/technology/42" -path, err := r.Get("article").URLPath("category", "technology", "id", "42") -``` - -And if you use subrouters, host and path defined separately can be built as well: - -```go -r := mux.NewRouter() -s := r.Host("{subdomain}.example.com").Subrouter() -s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - -// "http://news.example.com/articles/technology/42" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -``` - -To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available: -```go -r := mux.NewRouter() -r.Host("{domain}"). - Path("/{group}/{item_id}"). - Queries("some_data1", "{some_data1}"). - Queries("some_data2", "{some_data2}"). - Name("article") - -// Will print [domain group item_id some_data1 some_data2] -fmt.Println(r.Get("article").GetVarNames()) - -``` -### Walking Routes - -The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, -the following prints all of the registered routes: - -```go -package main - -import ( - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" -) - -func handler(w http.ResponseWriter, r *http.Request) { - return -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.HandleFunc("/products", handler).Methods("POST") - r.HandleFunc("/articles", handler).Methods("GET") - r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") - r.HandleFunc("/authors", handler).Queries("surname", "{surname}") - err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - pathTemplate, err := route.GetPathTemplate() - if err == nil { - fmt.Println("ROUTE:", pathTemplate) - } - pathRegexp, err := route.GetPathRegexp() - if err == nil { - fmt.Println("Path regexp:", pathRegexp) - } - queriesTemplates, err := route.GetQueriesTemplates() - if err == nil { - fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) - } - queriesRegexps, err := route.GetQueriesRegexp() - if err == nil { - fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) - } - methods, err := route.GetMethods() - if err == nil { - fmt.Println("Methods:", strings.Join(methods, ",")) - } - fmt.Println() - return nil - }) - - if err != nil { - fmt.Println(err) - } - - http.Handle("/", r) -} -``` - -### Graceful Shutdown - -Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: - -```go -package main - -import ( - "context" - "flag" - "log" - "net/http" - "os" - "os/signal" - "time" - - "github.com/gorilla/mux" -) - -func main() { - var wait time.Duration - flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") - flag.Parse() - - r := mux.NewRouter() - // Add your routes as needed - - srv := &http.Server{ - Addr: "0.0.0.0:8080", - // Good practice to set timeouts to avoid Slowloris attacks. - WriteTimeout: time.Second * 15, - ReadTimeout: time.Second * 15, - IdleTimeout: time.Second * 60, - Handler: r, // Pass our instance of gorilla/mux in. - } - - // Run our server in a goroutine so that it doesn't block. - go func() { - if err := srv.ListenAndServe(); err != nil { - log.Println(err) - } - }() - - c := make(chan os.Signal, 1) - // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) - // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. - signal.Notify(c, os.Interrupt) - - // Block until we receive our signal. - <-c - - // Create a deadline to wait for. - ctx, cancel := context.WithTimeout(context.Background(), wait) - defer cancel() - // Doesn't block if no connections, but will otherwise wait - // until the timeout deadline. - srv.Shutdown(ctx) - // Optionally, you could run srv.Shutdown in a goroutine and block on - // <-ctx.Done() if your application should wait for other services - // to finalize based on context cancellation. - log.Println("shutting down") - os.Exit(0) -} -``` - -### Middleware - -Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. -Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. - -Mux middlewares are defined using the de facto standard type: - -```go -type MiddlewareFunc func(http.Handler) http.Handler -``` - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. - -A very basic middleware which logs the URI of the request being handled could be written as: - -```go -func loggingMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) -} -``` - -Middlewares can be added to a router using `Router.Use()`: - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) -r.Use(loggingMiddleware) -``` - -A more complex authentication middleware, which maps session token to users, could be written as: - -```go -// Define our struct -type authenticationMiddleware struct { - tokenUsers map[string]string -} - -// Initialize it somewhere -func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" -} - -// Middleware function, which will be called for each request -func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - // Pass down the request to the next middleware (or final handler) - next.ServeHTTP(w, r) - } else { - // Write an error and stop the handler chain - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) -} -``` - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) - -amw := authenticationMiddleware{tokenUsers: make(map[string]string)} -amw.Populate() - -r.Use(amw.Middleware) -``` - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. - -### Handling CORS Requests - -[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. - -* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` -* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route -* If you do not specify any methods, then: -> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. - -Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: - -```go -package main - -import ( - "net/http" - "github.com/gorilla/mux" -) - -func main() { - r := mux.NewRouter() - - // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers - r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) - r.Use(mux.CORSMethodMiddleware(r)) - - http.ListenAndServe(":8080", r) -} - -func fooHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - if r.Method == http.MethodOptions { - return - } - - w.Write([]byte("foo")) -} -``` - -And an request to `/foo` using something like: - -```bash -curl localhost:8080/foo -v -``` - -Would look like: - -```bash -* Trying ::1... -* TCP_NODELAY set -* Connected to localhost (::1) port 8080 (#0) -> GET /foo HTTP/1.1 -> Host: localhost:8080 -> User-Agent: curl/7.59.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS -< Access-Control-Allow-Origin: * -< Date: Fri, 28 Jun 2019 20:13:30 GMT -< Content-Length: 3 -< Content-Type: text/plain; charset=utf-8 -< -* Connection #0 to host localhost left intact -foo -``` - -### Testing Handlers - -Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. - -First, our simple HTTP handler: - -```go -// endpoints.go -package main - -func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { - // A very simple health check. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - // In the future we could report back on the status of our DB, or our cache - // (e.g. Redis) by performing a simple PING, and include them in the response. - io.WriteString(w, `{"alive": true}`) -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/health", HealthCheckHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test code: - -```go -// endpoints_test.go -package main - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestHealthCheckHandler(t *testing.T) { - // Create a request to pass to our handler. We don't have any query parameters for now, so we'll - // pass 'nil' as the third parameter. - req, err := http.NewRequest("GET", "/health", nil) - if err != nil { - t.Fatal(err) - } - - // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. - rr := httptest.NewRecorder() - handler := http.HandlerFunc(HealthCheckHandler) - - // Our handlers satisfy http.Handler, so we can call their ServeHTTP method - // directly and pass in our Request and ResponseRecorder. - handler.ServeHTTP(rr, req) - - // Check the status code is what we expect. - if status := rr.Code; status != http.StatusOK { - t.Errorf("handler returned wrong status code: got %v want %v", - status, http.StatusOK) - } - - // Check the response body is what we expect. - expected := `{"alive": true}` - if rr.Body.String() != expected { - t.Errorf("handler returned unexpected body: got %v want %v", - rr.Body.String(), expected) - } -} -``` - -In the case that our routes have [variables](#examples), we can pass those in the request. We could write -[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple -possible route variables as needed. - -```go -// endpoints.go -func main() { - r := mux.NewRouter() - // A route with a route variable: - r.HandleFunc("/metrics/{type}", MetricsHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test file, with a table-driven test of `routeVariables`: - -```go -// endpoints_test.go -func TestMetricsHandler(t *testing.T) { - tt := []struct{ - routeVariable string - shouldPass bool - }{ - {"goroutines", true}, - {"heap", true}, - {"counters", true}, - {"queries", true}, - {"adhadaeqm3k", false}, - } - - for _, tc := range tt { - path := fmt.Sprintf("/metrics/%s", tc.routeVariable) - req, err := http.NewRequest("GET", path, nil) - if err != nil { - t.Fatal(err) - } - - rr := httptest.NewRecorder() - - // To add the vars to the context, - // we need to create a router through which we can pass the request. - router := mux.NewRouter() - router.HandleFunc("/metrics/{type}", MetricsHandler) - router.ServeHTTP(rr, req) - - // In this case, our MetricsHandler returns a non-200 response - // for a route variable it doesn't know about. - if rr.Code == http.StatusOK && !tc.shouldPass { - t.Errorf("handler should have failed on routeVariable %s: got %v want %v", - tc.routeVariable, rr.Code, http.StatusOK) - } - } -} -``` - -## Full Example - -Here's a complete, runnable example of a small `mux` based server: - -```go -package main - -import ( - "net/http" - "log" - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - log.Fatal(http.ListenAndServe(":8000", r)) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index 80601351f..000000000 --- a/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - - Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - - URL hosts, paths and query values can have variables with an optional - regular expression. - - Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - - Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - - It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: - - r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -Note that if any capturing groups are present, mux will panic() during parsing. To prevent -this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to -"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably -when capturing groups were present. - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Note that the path provided to PathPrefix() represents a "wildcard": calling -PathPrefix("/static/").Handler(...) means that the handler will be passed any -request that matches "/static/*". This makes it easy to serve static files with mux: - - func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) - } - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host and query value variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. - - type MiddlewareFunc func(http.Handler) http.Handler - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). - -A very basic middleware which logs the URI of the request being handled could be written as: - - func simpleMw(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) - } - -Middlewares can be added to a router using `Router.Use()`: - - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.Use(simpleMw) - -A more complex authentication middleware, which maps session token to users, could be written as: - - // Define our struct - type authenticationMiddleware struct { - tokenUsers map[string]string - } - - // Initialize it somewhere - func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" - } - - // Middleware function, which will be called for each request - func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - next.ServeHTTP(w, r) - } else { - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) - } - - r := mux.NewRouter() - r.HandleFunc("/", handler) - - amw := authenticationMiddleware{tokenUsers: make(map[string]string)} - amw.Populate() - - r.Use(amw.Middleware) - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. -*/ -package mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go deleted file mode 100644 index cb51c565e..000000000 --- a/vendor/github.com/gorilla/mux/middleware.go +++ /dev/null @@ -1,74 +0,0 @@ -package mux - -import ( - "net/http" - "strings" -) - -// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. -// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed -// to it, and then calls the handler passed as parameter to the MiddlewareFunc. -type MiddlewareFunc func(http.Handler) http.Handler - -// middleware interface is anything which implements a MiddlewareFunc named Middleware. -type middleware interface { - Middleware(handler http.Handler) http.Handler -} - -// Middleware allows MiddlewareFunc to implement the middleware interface. -func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { - return mw(handler) -} - -// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) Use(mwf ...MiddlewareFunc) { - for _, fn := range mwf { - r.middlewares = append(r.middlewares, fn) - } -} - -// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) useInterface(mw middleware) { - r.middlewares = append(r.middlewares, mw) -} - -// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header -// on requests for routes that have an OPTIONS method matcher to all the method matchers on -// the route. Routes that do not explicitly handle OPTIONS requests will not be processed -// by the middleware. See examples for usage. -func CORSMethodMiddleware(r *Router) MiddlewareFunc { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - allMethods, err := getAllMethodsForRoute(r, req) - if err == nil { - for _, v := range allMethods { - if v == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) - } - } - } - - next.ServeHTTP(w, req) - }) - } -} - -// getAllMethodsForRoute returns all the methods from method matchers matching a given -// request. -func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { - var allMethods []string - - for _, route := range r.routes { - var match RouteMatch - if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { - methods, err := route.GetMethods() - if err != nil { - return nil, err - } - - allMethods = append(allMethods, methods...) - } - } - - return allMethods, nil -} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index 1e089906f..000000000 --- a/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,608 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "context" - "errors" - "fmt" - "net/http" - "path" - "regexp" -) - -var ( - // ErrMethodMismatch is returned when the method in the request does not match - // the method defined against the route. - ErrMethodMismatch = errors.New("method is not allowed") - // ErrNotFound is returned when no route match is found. - ErrNotFound = errors.New("no matching route was found") -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route)} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - // This can be used to render your own 404 Not Found errors. - NotFoundHandler http.Handler - - // Configurable Handler to be used when the request method does not match the route. - // This can be used to render your own 405 Method Not Allowed errors. - MethodNotAllowedHandler http.Handler - - // Routes to be matched, in order. - routes []*Route - - // Routes by name for URL building. - namedRoutes map[string]*Route - - // If true, do not clear the request context after handling the request. - // - // Deprecated: No effect, since the context is stored on the request itself. - KeepContext bool - - // Slice of middlewares to be called after a match is found - middlewares []middleware - - // configuration shared with `Route` - routeConf -} - -// common route configuration shared between `Router` and `Route` -type routeConf struct { - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - - // Manager for the variables from host and path. - regexp routeRegexpGroup - - // List of matchers. - matchers []matcher - - // The scheme used when building URLs. - buildScheme string - - buildVarsFunc BuildVarsFunc -} - -// returns an effective deep copy of `routeConf` -func copyRouteConf(r routeConf) routeConf { - c := r - - if r.regexp.path != nil { - c.regexp.path = copyRouteRegexp(r.regexp.path) - } - - if r.regexp.host != nil { - c.regexp.host = copyRouteRegexp(r.regexp.host) - } - - c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) - for _, q := range r.regexp.queries { - c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) - } - - c.matchers = make([]matcher, len(r.matchers)) - copy(c.matchers, r.matchers) - - return c -} - -func copyRouteRegexp(r *routeRegexp) *routeRegexp { - c := *r - return &c -} - -// Match attempts to match the given request against the router's registered routes. -// -// If the request matches a route of this router or one of its subrouters the Route, -// Handler, and Vars fields of the the match argument are filled and this function -// returns true. -// -// If the request does not match any of this router's or its subrouters' routes -// then this function returns false. If available, a reason for the match failure -// will be filled in the match argument's MatchErr field. If the match failure type -// (eg: not found) has a registered handler, the handler is assigned to the Handler -// field of the match argument. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - // Build middleware chain if no error was found - if match.MatchErr == nil { - for i := len(r.middlewares) - 1; i >= 0; i-- { - match.Handler = r.middlewares[i].Middleware(match.Handler) - } - } - return true - } - } - - if match.MatchErr == ErrMethodMismatch { - if r.MethodNotAllowedHandler != nil { - match.Handler = r.MethodNotAllowedHandler - return true - } - - return false - } - - // Closest match for a router (includes sub-routers) - if r.NotFoundHandler != nil { - match.Handler = r.NotFoundHandler - match.MatchErr = ErrNotFound - return true - } - - match.MatchErr = ErrNotFound - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if !r.skipClean { - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Clean path to canonical form and redirect. - if p := cleanPath(path); p != path { - - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - req = requestWithVars(req, match.Vars) - req = requestWithRoute(req, match.Route) - } - - if handler == nil && match.MatchErr == ErrMethodMismatch { - handler = methodNotAllowedHandler() - } - - if handler == nil { - handler = http.NotFoundHandler() - } - - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.namedRoutes[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.namedRoutes[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will perform a redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for -// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed -// request will be made as a GET by most clients. Use middleware or client settings -// to modify this behaviour as needed. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// SkipClean defines the path cleaning behaviour for new routes. The initial -// value is false. Users should be careful about which routes are not cleaned -// -// When true, if the route path is "/path//to", it will remain with the double -// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ -// -// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will -// become /fetch/http/xkcd.com/534 -func (r *Router) SkipClean(value bool) *Router { - r.skipClean = value - return r -} - -// UseEncodedPath tells the router to match the encoded original path -// to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". -// -// If not called, the router will match the unencoded path to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" -func (r *Router) UseEncodedPath() *Router { - r.useEncodedPath = true - return r -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - // initialize a route with a copy of the parent router's configuration - route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.routes = append(r.routes, route) - return route -} - -// Name registers a new route with a name. -// See Route.Name(). -func (r *Router) Name(name string) *Route { - return r.NewRoute().Name(name) -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVarsFunc registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - if err != nil { - return err - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string - - // MatchErr is set to appropriate matching error - // It is set to ErrMethodMismatch if there is a mismatch in - // the request method and route method - MatchErr error -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := r.Context().Value(varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns. -func CurrentRoute(r *http.Request) *Route { - if rv := r.Context().Value(routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func requestWithVars(r *http.Request, vars map[string]string) *http.Request { - ctx := context.WithValue(r.Context(), varsKey, vars) - return r.WithContext(ctx) -} - -func requestWithRoute(r *http.Request, route *Route) *http.Request { - ctx := context.WithValue(r.Context(), routeKey, route) - return r.WithContext(ctx) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// mapFromPairsToRegex converts variadic string parameters to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// methodNotAllowed replies to the request with an HTTP status code 405. -func methodNotAllowed(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusMethodNotAllowed) -} - -// methodNotAllowedHandler returns a simple request handler -// that replies to each request with a status code 405. -func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 5d05cfa0e..000000000 --- a/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -type routeRegexpOptions struct { - strictSlash bool - useEncodedPath bool -} - -type regexpType int - -const ( - regexpTypePath regexpType = iota - regexpTypeHost - regexpTypePrefix - regexpTypeQuery -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if typ == regexpTypeQuery { - defaultPattern = ".*" - } else if typ == regexpTypeHost { - defaultPattern = "[^.]+" - } - // Only match strict slash if not matching - if typ != regexpTypePath { - options.strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if options.strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) - - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if options.strictSlash { - pattern.WriteString("[/]?") - } - if typ == regexpTypeQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if typ != regexpTypePrefix { - pattern.WriteByte('$') - } - - var wildcardHostPort bool - if typ == regexpTypeHost { - if !strings.Contains(pattern.String(), ":") { - wildcardHostPort = true - } - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - - // Check for capturing groups which used to work in older versions - if reg.NumSubexp() != len(idxs)/2 { - panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + - "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") - } - - // Done! - return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - wildcardHostPort: wildcardHostPort, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // The type of match - regexpType regexpType - // Options for matching - options routeRegexpOptions - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp - // Wildcard host-port (no strict port match in hostname) - wildcardHostPort bool -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType == regexpTypeHost { - host := getHost(req) - if r.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - return r.regexp.MatchString(host) - } - - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() - } - return r.regexp.MatchString(path) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - if r.regexpType == regexpTypeQuery { - value = url.QueryEscape(value) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getURLQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getURLQuery(req *http.Request) string { - if r.regexpType != regexpTypeQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) - if ok { - return templateKey + "=" + val - } - return "" -} - -// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. -// If key was not found, empty string and false is returned. -func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { - query := []byte(rawQuery) - for len(query) > 0 { - foundKey := query - if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { - foundKey, query = foundKey[:i], foundKey[i+1:] - } else { - query = query[:0] - } - if len(foundKey) == 0 { - continue - } - var value []byte - if i := bytes.IndexByte(foundKey, '='); i >= 0 { - foundKey, value = foundKey[:i], foundKey[i+1:] - } - if len(foundKey) < len(key) { - // Cannot possibly be key. - continue - } - keyString, err := url.QueryUnescape(string(foundKey)) - if err != nil { - continue - } - if keyString != key { - continue - } - valueString, err := url.QueryUnescape(string(value)) - if err != nil { - continue - } - return valueString, true - } - return "", false -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getURLQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - var idxs []int - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - host := getHost(req) - if v.host.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - matches := v.host.regexp.FindStringSubmatchIndex(host) - if len(matches) > 0 { - extractVars(host, matches, v.host.varsN, m.Vars) - } - } - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Store path variables. - if v.path != nil { - matches := v.path.regexp.FindStringSubmatchIndex(path) - if len(matches) > 0 { - extractVars(path, matches, v.path.varsN, m.Vars) - // Check if we should redirect. - if v.path.options.strictSlash { - p1 := strings.HasSuffix(path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryURL := q.getURLQuery(req) - matches := q.regexp.FindStringSubmatchIndex(queryURL) - if len(matches) > 0 { - extractVars(queryURL, matches, q.varsN, m.Vars) - } - } -} - -// getHost tries its best to return the request host. -// According to section 14.23 of RFC 2616 the Host header -// can include the port number if the default value of 80 is not used. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - return r.Host -} - -func extractVars(input string, matches []int, names []string, output map[string]string) { - for i, name := range names { - output[name] = input[matches[2*i+2]:matches[2*i+3]] - } -} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index e8f11df22..000000000 --- a/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,765 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Request handler for the route. - handler http.Handler - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - // "global" reference to all named routes - namedRoutes map[string]*Route - - // config possibly passed in from `Router` - routeConf -} - -// SkipClean reports whether path cleaning is enabled for this route via -// Router.SkipClean. -func (r *Route) SkipClean() bool { - return r.skipClean -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - - var matchErr error - - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - if _, ok := m.(methodMatcher); ok { - matchErr = ErrMethodMismatch - continue - } - - // Ignore ErrNotFound errors. These errors arise from match call - // to Subrouters. - // - // This prevents subsequent matching subrouters from failing to - // run middleware. If not ignored, the middleware would see a - // non-nil MatchErr and be skipped, even when there was a - // matching route. - if match.MatchErr == ErrNotFound { - match.MatchErr = nil - } - - matchErr = nil // nolint:ineffassign - return false - } else { - // Multiple routes may share the same path but use different HTTP methods. For instance: - // Route 1: POST "/users/{id}". - // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+". - // - // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2", - // The router should return a "Not Found" error as no route fully matches this request. - if match.MatchErr == ErrMethodMismatch { - match.MatchErr = nil - } - } - } - - if matchErr != nil { - match.MatchErr = matchErr - return false - } - - if match.MatchErr == ErrMethodMismatch && r.handler != nil { - // We found a route which matches request method, clear MatchErr - match.MatchErr = nil - // Then override the mis-matched handler - match.Handler = r.handler - } - - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - - // Set variables. - r.regexp.setMatch(req, match, r) - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// It is an error to call Name more than once on a route. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.namedRoutes[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { - if r.err != nil { - return r.err - } - if typ == regexpTypePath || typ == regexpTypePrefix { - if len(tpl) > 0 && tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ - strictSlash: r.strictSlash, - useEncodedPath: r.useEncodedPath, - }) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if typ == regexpTypeHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if typ == regexpTypeQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter().NewRoute() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// If the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex -// support. For example: -// -// r := mux.NewRouter().NewRoute() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// If the value is an empty string, it will match any value if the key is set. -// Use the start and end of string anchors (^ and $) to match an exact value. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter().NewRoute() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypeHost) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -// Match returns the match for a given request. -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter().NewRoute() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePath) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter().NewRoute() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// If the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - scheme := r.URL.Scheme - // https://golang.org/pkg/net/http/#Request - // "For [most] server requests, fields other than Path and RawQuery will be - // empty." - // Since we're an http muxer, the scheme is either going to be http or https - // though, so we can just set it based on the tls termination state. - if scheme == "" { - if r.TLS == nil { - scheme = "http" - } else { - scheme = "https" - } - } - return matchInArray(m, scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -// If the request's URL has a scheme set, it will be matched against. -// Generally, the URL scheme will only be set if a previous handler set it, -// such as the ProxyHeaders handler from gorilla/handlers. -// If unset, the scheme will be determined based on the request's TLS -// termination state. -// The first argument to Schemes will be used when constructing a route URL. -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - if len(schemes) > 0 { - r.buildScheme = schemes[0] - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - if r.buildVarsFunc != nil { - // compose the old and new functions - old := r.buildVarsFunc - r.buildVarsFunc = func(m map[string]string) map[string]string { - return f(old(m)) - } - } else { - r.buildVarsFunc = f - } - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter().NewRoute() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - // initialize a subrouter with a copy of the parent route's configuration - router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// The scheme of the resulting url will be the first argument that was passed to Schemes: -// -// // url.String() will be "https://example.com" -// r := mux.NewRouter().NewRoute() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - queries := make([]string, 0, len(r.regexp.queries)) - if r.regexp.host != nil { - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - scheme = "http" - if r.buildScheme != "" { - scheme = r.buildScheme - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - for _, q := range r.regexp.queries { - var query string - if query, err = q.url(values); err != nil { - return nil, err - } - queries = append(queries, query) - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - RawQuery: strings.Join(queries, "&"), - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - u := &url.URL{ - Scheme: "http", - Host: host, - } - if r.buildScheme != "" { - u.Scheme = r.buildScheme - } - return u, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// GetPathTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route doesn't have a path") - } - return r.regexp.path.template, nil -} - -// GetPathRegexp returns the expanded regular expression used to match route path. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathRegexp() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route does not have a path") - } - return r.regexp.path.regexp.String(), nil -} - -// GetQueriesRegexp returns the expanded regular expressions used to match the -// route queries. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not have queries. -func (r *Route) GetQueriesRegexp() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - queries := make([]string, 0, len(r.regexp.queries)) - for _, query := range r.regexp.queries { - queries = append(queries, query.regexp.String()) - } - return queries, nil -} - -// GetQueriesTemplates returns the templates used to build the -// query matching. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define queries. -func (r *Route) GetQueriesTemplates() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - queries := make([]string, 0, len(r.regexp.queries)) - for _, query := range r.regexp.queries { - queries = append(queries, query.template) - } - return queries, nil -} - -// GetMethods returns the methods the route matches against -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if route does not have methods. -func (r *Route) GetMethods() ([]string, error) { - if r.err != nil { - return nil, r.err - } - for _, m := range r.matchers { - if methods, ok := m.(methodMatcher); ok { - return []string(methods), nil - } - } - return nil, errors.New("mux: route doesn't have methods") -} - -// GetHostTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a host. -func (r *Route) GetHostTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.host == nil { - return "", errors.New("mux: route doesn't have a host") - } - return r.regexp.host.template, nil -} - -// GetVarNames returns the names of all variables added by regexp matchers -// These can be used to know which route variables should be passed into r.URL() -func (r *Route) GetVarNames() ([]string, error) { - if r.err != nil { - return nil, r.err - } - var varNames []string - if r.regexp.host != nil { - varNames = append(varNames, r.regexp.host.varsN...) - } - if r.regexp.path != nil { - varNames = append(varNames, r.regexp.path.varsN...) - } - for _, regx := range r.regexp.queries { - varNames = append(varNames, regx.varsN...) - } - return varNames, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go deleted file mode 100644 index 5f5c496de..000000000 --- a/vendor/github.com/gorilla/mux/test_helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import "net/http" - -// SetURLVars sets the URL variables for the given request, to be accessed via -// mux.Vars for testing route behaviour. Arguments are not modified, a shallow -// copy is returned. -// -// This API should only be used for testing purposes; it provides a way to -// inject variables into the request context. Alternatively, URL variables -// can be set by making a route that captures the required variables, -// starting a server and sending the request to that server. -func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return requestWithVars(r, val) -} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 000000000..402433593 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 000000000..d31b37815 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 000000000..4528059ca --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +version: 2 + +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + version_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/moby/sys/user/LICENSE b/vendor/github.com/klauspost/compress/LICENSE similarity index 67% rename from vendor/github.com/moby/sys/user/LICENSE rename to vendor/github.com/klauspost/compress/LICENSE index d64569567..87d557477 100644 --- a/vendor/github.com/moby/sys/user/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -1,3 +1,35 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* Apache License Version 2.0, January 2004 @@ -187,7 +219,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2016-2017 The New York Times Company Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -200,3 +232,73 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 000000000..5125c1f26 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,693 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped/zstd HTTP requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# package usage + +Use `go get github.com/klauspost/compress@latest` to add it to your project. + +This package will support the current Go version and 2 versions back. + +* Use the `nounsafe` tag to disable all use of the "unsafe" package. +* Use the `noasm` tag to disable all assembly across packages. + +Use the links above for more information on each. + +# changelog +* Jan 16th, 2026 [1.18.3](https://github.com/klauspost/compress/releases/tag/v1.18.3) + * Downstream CVE-2025-61728. See [golang/go#77102](https://github.com/golang/go/issues/77102). + +* Dec 1st, 2025 - [1.18.2](https://github.com/klauspost/compress/releases/tag/v1.18.2) + * flate: Fix invalid encoding on level 9 with single value input in https://github.com/klauspost/compress/pull/1115 + * flate: reduce stateless allocations by @RXamzin in https://github.com/klauspost/compress/pull/1106 + +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) - RETRACTED + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + +* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) + * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 + * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 + * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043 + * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045 + * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048 + * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 + * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 + +
+ See changes to v1.17.x + +* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) + * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 + * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 + * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011 + * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013 + +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+
+ See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard library [deflate](https://godoc.org/github.com/klauspost/compress/flate), [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip), and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). Simply replace the import path to use them: + +Typical speed is about 2x of the standard library packages. + +| old import | new import | Documentation | +|------------------|---------------------------------------|-------------------------------------------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop-in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages implement the same API as the standard library, so you can use the original godoc documentation: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compression described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. + + + + diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 000000000..ca6685e2b --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 000000000..ea5a692d5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 000000000..ea7324da6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 000000000..f65eb3909 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 000000000..d58b3fe42 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := range nbBytes { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 000000000..abade2d60 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 000000000..8c8baa4fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for range v { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 000000000..535cbadfd --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 000000000..aff942205 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 000000000..b3d262958 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 000000000..8b6e5c663 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 000000000..bfc7a523d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,224 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/internal/le" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekByteFast requires that at least one byte is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + low := le.Load32(b.in, b.off-4) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = le.Load64(b.in, b.off-8) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + low := le.Load32(b.in, b.off-4) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + low := le.Load32(b.in, b.off-4) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + b.value = le.Load64(b.in, b.off-8) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + low := le.Load32(b.in, b.off-4) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 000000000..41db94cde --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := range nbBytes { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 000000000..a97cf1b5d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := range 4 { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := range 4 { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := range 4 { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 000000000..7d0efa881 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1161 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := range 3 { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := min(offset+remainBytes, len(out)) + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := range 3 { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := min(offset+remainBytes, len(out)) + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errors, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 000000000..99ddd4af9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,223 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := range 3 { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := min(offset+remainBytes, len(out)) + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 000000000..c4c7ab2d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 000000000..908c17de6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 000000000..67d9e05b6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := range maxSymbolValue { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := range maxSymbolValue { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 000000000..3954c5121 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 000000000..e802579c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 000000000..4465fbe9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go new file mode 100644 index 000000000..e54909e16 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/le.go @@ -0,0 +1,5 @@ +package le + +type Indexer interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go new file mode 100644 index 000000000..4f2a0d8c5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -0,0 +1,42 @@ +//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine + +package le + +import ( + "encoding/binary" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + return b[i] +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + return binary.LittleEndian.Uint16(b[i:]) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + binary.LittleEndian.PutUint16(b, v) +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + binary.LittleEndian.PutUint32(b, v) +} + +// Store64 will store v at b. +func Store64[I Indexer](b []byte, i I, v uint64) { + binary.LittleEndian.PutUint64(b[i:], v) +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go new file mode 100644 index 000000000..218a38bc4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -0,0 +1,52 @@ +// We enable 64 bit LE platforms: + +//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine + +package le + +import ( + "unsafe" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + //return binary.LittleEndian.Uint32(b[i:]) + //return *(*uint32)(unsafe.Pointer(&b[i])) + return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + //return binary.LittleEndian.Uint64(b[i:]) + //return *(*uint64)(unsafe.Pointer(&b[i])) + return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store64 will store v at b[i:]. +func Store64[I Indexer](b []byte, i I, v uint64) { + *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v +} diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE similarity index 83% rename from vendor/github.com/gorilla/mux/LICENSE rename to vendor/github.com/klauspost/compress/internal/snapref/LICENSE index bb9d80bc9..6050c10f4 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -1,16 +1,16 @@ -Copyright (c) 2023 The Gorilla Authors. All rights reserved. +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 000000000..a2c82fcd2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := range len(magicBody) { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 000000000..77395a6b8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 000000000..860a99416 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,291 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 000000000..2754bac6f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 000000000..34d01f4aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 000000000..81bda5e29 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,3 @@ +module github.com/klauspost/compress + +go 1.22 diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 000000000..c11d7fa28 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features. + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 000000000..d41e3e170 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,135 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "math/bits" + + "github.com/klauspost/compress/internal/le" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + cursor int // offset where next read should end + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.cursor = len(in) + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + b.cursor -= 8 + b.value = le.Load64(b.in, b.cursor) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.cursor >= 4 { + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * b.cursor) + for b.cursor > 0 { + b.cursor -= 1 + b.value = (b.value << 8) | uint64(b.in[b.cursor]) + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.cursor == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(b.cursor) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + b.cursor = 0 + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 000000000..b22b297e6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := range nbBytes { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 000000000..2329e996f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,712 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "hash/crc32" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() any { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() any { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } + for i := range uint(3) { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + if debugDecoder { + println("Reading table for", tableIndex(i)) + } + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 000000000..fd35ea148 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,892 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "slices" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + cnt := int(slices.Max(hist[:maxSym])) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1]))) + b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1]))) + b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1]))) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 000000000..01a01e486 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 000000000..55a388553 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 000000000..0e59a242d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 000000000..6a5a2988b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 000000000..c7e500f02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,957 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Initialize dict map if needed. + if d.o.dicts == nil { + d.o.dicts = make(map[uint32]*dict) + } + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes read and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// ResetWithOptions will reset the decoder and apply the given options +// for the next stream or DecodeAll operation. +// Options are applied on top of the existing options. +// Some options cannot be changed on reset and will return an error. +func (d *Decoder) ResetWithOptions(r io.Reader, opts ...DOption) error { + d.o.resetOpt = true + defer func() { d.o.resetOpt = false }() + for _, o := range opts { + if err := o(&d.o); err != nil { + return err + } + } + return d.Reset(r) +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + frame.history.decoders.br.cursor = 0 + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := min( + // Cap to 1 MB. + len(input)*2, 1<<20) + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.o.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 000000000..537627a07 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,213 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts map[uint32]*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int + resetOpt bool +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +// Cannot be changed with ResetWithOptions. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { + if o.resetOpt && b != o.lowMem { + return errors.New("WithDecoderLowmem cannot be changed on Reset") + } + o.lowMem = b + return nil + } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// The value supplied must be at least 0. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +// Cannot be changed with ResetWithOptions. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 0") + } + newVal := n + if n == 0 { + newVal = runtime.GOMAXPROCS(0) + } + if o.resetOpt && newVal != o.concurrent { + return errors.New("WithDecoderConcurrency cannot be changed on Reset") + } + o.concurrent = newVal + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +// Can be changed with ResetWithOptions. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// Can be changed with ResetWithOptions. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + if o.dicts == nil { + o.dicts = make(map[uint32]*dict) + } + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts[d.id] = d + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +// Can be changed with ResetWithOptions. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + if o.dicts == nil { + o.dicts = make(map[uint32]*dict) + } + o.dicts[id] = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +// Can be changed with ResetWithOptions. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +// Can be changed with ResetWithOptions. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +// Cannot be changed with ResetWithOptions. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + if o.resetOpt && size != o.decodeBufsBelow { + return errors.New("WithDecodeBuffersBelow cannot be changed on Reset") + } + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +// Can be changed with ResetWithOptions. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} + +// WithDecoderDictDelete removes dictionaries by ID. +// If no ids are passed, all dictionaries are deleted. +// Should be used with ResetWithOptions. +func WithDecoderDictDelete(ids ...uint32) DOption { + return func(o *decoderOptions) error { + if len(ids) == 0 { + clear(o.dicts) + } + for _, id := range ids { + delete(o.dicts, id) + } + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 000000000..2ffbfdf37 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,559 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...any) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...any) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...any) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if int(offset) >= len(o.History) { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := min(litTotal, huff0.BlockSizeMax/2) + huffBuff := make([]byte, 0, avgSize) + // Target size + div := max(litTotal/avgSize, 1) + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := range 255 { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := range 128 { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 000000000..c1192ec38 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,171 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 7 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := max( + // Keep minimum window. + int32(1)< e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("t (%d) < 0", t) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 000000000..c1581cfcb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,549 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := max( + // Huffman can never go < 1 bit/byte + int32((compress.ShannonEntropyBits(src)*1024)/len(src)), 1024) + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := max(s-e.maxMatchOff, 0) + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := min(s, sLimit+4) + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := min(s, sLimit-4) + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 000000000..85dcd28c3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1234 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := max(s-e.maxMatchOff, 0) + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := max(s-e.maxMatchOff, 0) + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := max(s-e.maxMatchOff, 0) + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := max(s-e.maxMatchOff, 0) + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := max(s-e.maxMatchOff, 0) + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := max(s-e.maxMatchOff, 0) + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 000000000..cf8cad00d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1105 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := max(s-e.maxMatchOff, 0) + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := max(s-e.maxMatchOff, 0) + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := max(s-e.maxMatchOff, 0) + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := max(s-e.maxMatchOff, 0) + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := max(s-e.maxMatchOff, 0) + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := max(s-e.maxMatchOff, 0) + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 000000000..9180a3a58 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,873 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := max(s-e.maxMatchOff, 0) + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := max(s-e.maxMatchOff, 0) + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := max(s-e.maxMatchOff, 0) + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := max(s-e.maxMatchOff, 0) + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := max(s-e.maxMatchOff, 0) + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := max(s-e.maxMatchOff, 0) + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 000000000..19e730acc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,658 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetWithOptions will re-initialize the writer and apply the given options +// as a new, independent stream. +// Options are applied on top of the existing options. +// Some options cannot be changed on reset and will return an error. +func (e *Encoder) ResetWithOptions(w io.Writer, opts ...EOption) error { + e.o.resetOpt = true + defer func() { e.o.resetOpt = false }() + for _, o := range opts { + if err := o(&e.o); err != nil { + return err + } + } + e.Reset(w) + return nil +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + if final { + s.eofWritten = true + } + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 000000000..8e0f5cac7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,377 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + resetOpt bool + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +// Can be changed with ResetWithOptions. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 0. +// When a value of 0 is provided GOMAXPROCS will be used. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +// Cannot be changed with ResetWithOptions. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n < 0 { + return errors.New("concurrency must at least 0") + } + if n == 0 { + n = runtime.GOMAXPROCS(0) + } + if o.resetOpt && n != o.concurrent { + return errors.New("WithEncoderConcurrency cannot be changed on Reset") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +// Cannot be changed with ResetWithOptions. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + if o.resetOpt && n != o.windowSize { + return errors.New("WithWindowSize cannot be changed on Reset") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +// Can be changed with ResetWithOptions. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +// Cannot be changed with ResetWithOptions. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + if o.resetOpt && l != o.level { + return errors.New("WithEncoderLevel cannot be changed on Reset") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +// Can be changed with ResetWithOptions. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +// Can be changed with ResetWithOptions. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +// Can be changed with ResetWithOptions. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +// Can be changed with ResetWithOptions. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +// Cannot be changed with ResetWithOptions. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + if o.resetOpt && b != o.lowMem { + return errors.New("WithLowerEncoderMem cannot be changed on Reset") + } + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// Can be changed with ResetWithOptions. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +// Can be changed with ResetWithOptions. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} + +// WithEncoderDictDelete clears the dictionary, so no dictionary will be used. +// Should be used with ResetWithOptions. +func WithEncoderDictDelete() EOption { + return func(o *encoderOptions) error { + o.dict = nil + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 000000000..d88f067e5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,412 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = max(d.FrameContentSize, MinWindowSize) + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 000000000..667ca0679 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 000000000..2f8860a72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 000000000..d04a829b0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 000000000..bcde39869 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 000000000..8adfebb02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 000000000..3a0f4e7fb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for range v { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 000000000..474cb77d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 000000000..5d73c21eb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 000000000..09164856d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt similarity index 64% rename from vendor/github.com/docker/docker-credential-helpers/LICENSE rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt index 1ea555e2a..24b53065f 100644 --- a/vendor/github.com/docker/docker-credential-helpers/LICENSE +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -1,4 +1,6 @@ -Copyright (c) 2016 David Calavera +Copyright (c) 2016 Caleb Spare + +MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -13,8 +15,8 @@ included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 000000000..777290d44 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 000000000..fc40c8200 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 000000000..ddb63aa91 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 000000000..ae7d4d329 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(s *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD s+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 000000000..d4221edf4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 000000000..0be16cefc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 000000000..6f3b0cb10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 000000000..f41932b7a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 000000000..0782b86e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 000000000..bea1779e9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,38 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "math/bits" + + "github.com/klauspost/compress/internal/le" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + left -= 8 + } + a = a[n:] + b = b[n:] + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 000000000..0bfb0e43c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,500 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 000000000..1f8c3cec2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,388 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 000000000..a708ca6d3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 40(CX), BX + MOVQ (CX), AX + MOVQ 32(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 40(CX), BX + MOVQ (CX), AX + MOVQ 32(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 40(BX), DX + MOVQ (BX), CX + MOVQ 32(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 40(BX), DX + MOVQ (BX), CX + MOVQ 32(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 40(CX), BX + MOVQ (CX), AX + MOVQ 32(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 40(BX), DX + MOVQ (BX), CX + MOVQ 32(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 40(CX), BX + MOVQ (CX), AX + MOVQ 32(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 40(BX), DX + MOVQ (BX), CX + MOVQ 32(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 000000000..7cec2197c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 000000000..65045eabd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,112 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/vendor/github.com/klauspost/compress/zstd/simple_go124.go new file mode 100644 index 000000000..2efc0497b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/simple_go124.go @@ -0,0 +1,56 @@ +// Copyright 2025+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +//go:build go1.24 + +package zstd + +import ( + "errors" + "runtime" + "sync" + "weak" +) + +var weakMu sync.Mutex +var simpleEnc weak.Pointer[Encoder] +var simpleDec weak.Pointer[Decoder] + +// EncodeTo appends the encoded data from src to dst. +func EncodeTo(dst []byte, src []byte) []byte { + weakMu.Lock() + enc := simpleEnc.Value() + if enc == nil { + var err error + enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true)) + if err != nil { + panic("failed to create simple encoder: " + err.Error()) + } + simpleEnc = weak.Make(enc) + } + weakMu.Unlock() + + return enc.EncodeAll(src, dst) +} + +// DecodeTo appends the decoded data from src to dst. +// The maximum decoded size is 1GiB, +// not including what may already be in dst. +func DecodeTo(dst []byte, src []byte) ([]byte, error) { + weakMu.Lock() + dec := simpleDec.Value() + if dec == nil { + var err error + dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30)) + if err != nil { + weakMu.Unlock() + return nil, errors.New("failed to create simple decoder: " + err.Error()) + } + runtime.SetFinalizer(dec, func(d *Decoder) { + d.Close() + }) + simpleDec = weak.Make(dec) + } + weakMu.Unlock() + return dec.DecodeAll(src, dst) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 000000000..336c28893 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, r.err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, r.err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := range len(snappyMagicBody) { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 000000000..3198d7189 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() any { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 000000000..1a869710d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,126 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "errors" + "log" + "math" + + "github.com/klauspost/compress/internal/le" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...any) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...any) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return le.Load32(b, i) +} + +func load6432(b []byte, i int32) uint64 { + return le.Load64(b, i) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/moby/sys/capability/CHANGELOG.md b/vendor/github.com/moby/sys/capability/CHANGELOG.md deleted file mode 100644 index 299b36d92..000000000 --- a/vendor/github.com/moby/sys/capability/CHANGELOG.md +++ /dev/null @@ -1,124 +0,0 @@ -# Changelog -This file documents all notable changes made to this project since the initial fork -from https://github.com/syndtr/gocapability/commit/42c35b4376354fd5. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [0.4.0] - 2024-11-11 - -### Added -* New separate API for ambient ([GetAmbient], [SetAmbient], [ResetAmbient]) - and bound ([GetBound], [DropBound]) capabilities, modelled after libcap. (#176) - -### Fixed -* [Apply] now returns an error if called for non-zero `pid`. Before this change, - it could silently change some capabilities of the current process, instead of - the one identified by the `pid`. (#168, #174) -* Fixed tests that change capabilities to be run in a separate process. (#173) -* Other improvements in tests. (#169, #170) - -### Changed -* Use raw syscalls (which are slightly faster). (#176) -* Most tests are now limited to testing the public API of the package. (#162) -* Simplify parsing /proc/*pid*/status, add a test case. (#162) -* Optimize the number of syscall to set ambient capabilities in Apply - by clearing them first; add a test case. (#163, #164) -* Better documentation for [Apply], [NewFile], [NewFile2], [NewPid], [NewPid2]. (#175) - -### Removed -* `.golangci.yml` and `.codespellrc` are no longer part of the package. (#158) - -## [0.3.0] - 2024-09-25 - -### Added -* Added [ListKnown] and [ListSupported] functions. (#153) -* [LastCap] is now available on non-Linux platforms (where it returns an error). (#152) - -### Changed -* [List] is now deprecated in favor of [ListKnown] and [ListSupported]. (#153) - -### Fixed -* Various documentation improvements. (#151) -* Fix "generated code" comment. (#153) - -## [0.2.0] - 2024-09-16 - -This is the first release after the move to a new home in -github.com/moby/sys/capability. - -### Fixed - * Fixed URLs in documentation to reflect the new home. - -## [0.1.1] - 2024-08-01 - -This is a maintenance release, fixing a few minor issues. - -### Fixed - * Fixed future kernel compatibility, for real this time. [#11] - * Fixed [LastCap] to be a function. [#12] - -## [0.1.0] - 2024-07-31 - -This is an initial release since the fork. - -### Breaking changes - - * The `CAP_LAST_CAP` variable is removed; users need to modify the code to - use [LastCap] to get the value. [#6] - * The code now requires Go >= 1.21. - -### Added - * `go.mod` and `go.sum` files. [#2] - * New [LastCap] function. [#6] - * Basic CI using GHA infra. [#8], [#9] - * README and CHANGELOG. [#10] - -### Fixed - * Fixed ambient capabilities error handling in [Apply]. [#3] - * Fixed future kernel compatibility. [#1] - * Fixed various linter warnings. [#4], [#7] - -### Changed - * Go build tags changed from old-style (`+build`) to new Go 1.17+ style (`go:build`). [#2] - -### Removed - * Removed support for capabilities v1 and v2. [#1] - * Removed init function so programs that use this package start faster. [#6] - * Removed `CAP_LAST_CAP` (use [LastCap] instead). [#6] - - -[Apply]: https://pkg.go.dev/github.com/moby/sys/capability#Capabilities.Apply -[DropBound]: https://pkg.go.dev/github.com/moby/sys/capability#DropBound -[GetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#GetAmbient -[GetBound]: https://pkg.go.dev/github.com/moby/sys/capability#GetBound -[LastCap]: https://pkg.go.dev/github.com/moby/sys/capability#LastCap -[ListKnown]: https://pkg.go.dev/github.com/moby/sys/capability#ListKnown -[ListSupported]: https://pkg.go.dev/github.com/moby/sys/capability#ListSupported -[List]: https://pkg.go.dev/github.com/moby/sys/capability#List -[NewFile2]: https://pkg.go.dev/github.com/moby/sys/capability#NewFile2 -[NewFile]: https://pkg.go.dev/github.com/moby/sys/capability#NewFile -[NewPid2]: https://pkg.go.dev/github.com/moby/sys/capability#NewPid2 -[NewPid]: https://pkg.go.dev/github.com/moby/sys/capability#NewPid -[ResetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#ResetAmbient -[SetAmbient]: https://pkg.go.dev/github.com/moby/sys/capability#SetAmbient - - -[0.4.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.4.0 -[0.3.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.3.0 -[0.2.0]: https://github.com/moby/sys/releases/tag/capability%2Fv0.2.0 -[0.1.1]: https://github.com/kolyshkin/capability/compare/v0.1.0...v0.1.1 -[0.1.0]: https://github.com/kolyshkin/capability/compare/42c35b4376354fd5...v0.1.0 - - -[#1]: https://github.com/kolyshkin/capability/pull/1 -[#2]: https://github.com/kolyshkin/capability/pull/2 -[#3]: https://github.com/kolyshkin/capability/pull/3 -[#4]: https://github.com/kolyshkin/capability/pull/4 -[#6]: https://github.com/kolyshkin/capability/pull/6 -[#7]: https://github.com/kolyshkin/capability/pull/7 -[#8]: https://github.com/kolyshkin/capability/pull/8 -[#9]: https://github.com/kolyshkin/capability/pull/9 -[#10]: https://github.com/kolyshkin/capability/pull/10 -[#11]: https://github.com/kolyshkin/capability/pull/11 -[#12]: https://github.com/kolyshkin/capability/pull/12 diff --git a/vendor/github.com/moby/sys/capability/LICENSE b/vendor/github.com/moby/sys/capability/LICENSE deleted file mode 100644 index 08adcd6ec..000000000 --- a/vendor/github.com/moby/sys/capability/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright 2023 The Capability Authors. -Copyright 2013 Suryandaru Triandana -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/moby/sys/capability/README.md b/vendor/github.com/moby/sys/capability/README.md deleted file mode 100644 index 84b74871a..000000000 --- a/vendor/github.com/moby/sys/capability/README.md +++ /dev/null @@ -1,13 +0,0 @@ -This is a fork of (apparently no longer maintained) -https://github.com/syndtr/gocapability package. It provides basic primitives to -work with [Linux capabilities][capabilities(7)]. - -For changes, see [CHANGELOG.md](./CHANGELOG.md). - -[![Go Reference](https://pkg.go.dev/badge/github.com/moby/sys/capability/capability.svg)](https://pkg.go.dev/github.com/moby/sys/capability) - -## Alternatives - - * https://pkg.go.dev/kernel.org/pub/linux/libs/security/libcap/cap - -[capabilities(7)]: https://man7.org/linux/man-pages/man7/capabilities.7.html diff --git a/vendor/github.com/moby/sys/capability/capability.go b/vendor/github.com/moby/sys/capability/capability.go deleted file mode 100644 index 11e47bed7..000000000 --- a/vendor/github.com/moby/sys/capability/capability.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2023 The Capability Authors. -// Copyright 2013 Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package capability provides utilities for manipulating POSIX capabilities. -package capability - -type Capabilities interface { - // Get check whether a capability present in the given - // capabilities set. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Get(which CapType, what Cap) bool - - // Empty check whether all capability bits of the given capabilities - // set are zero. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Empty(which CapType) bool - - // Full check whether all capability bits of the given capabilities - // set are one. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Full(which CapType) bool - - // Set sets capabilities of the given capabilities sets. The - // 'which' value should be one or combination (OR'ed) of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Set(which CapType, caps ...Cap) - - // Unset unsets capabilities of the given capabilities sets. The - // 'which' value should be one or combination (OR'ed) of EFFECTIVE, - // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. - Unset(which CapType, caps ...Cap) - - // Fill sets all bits of the given capabilities kind to one. The - // 'kind' value should be one or combination (OR'ed) of CAPS, - // BOUNDS or AMBS. - Fill(kind CapType) - - // Clear sets all bits of the given capabilities kind to zero. The - // 'kind' value should be one or combination (OR'ed) of CAPS, - // BOUNDS or AMBS. - Clear(kind CapType) - - // String return current capabilities state of the given capabilities - // set as string. The 'which' value should be one of EFFECTIVE, - // PERMITTED, INHERITABLE BOUNDING or AMBIENT - StringCap(which CapType) string - - // String return current capabilities state as string. - String() string - - // Load load actual capabilities value. This will overwrite all - // outstanding changes. - Load() error - - // Apply apply the capabilities settings, so all changes made by - // [Set], [Unset], [Fill], or [Clear] will take effect. - Apply(kind CapType) error -} - -// NewPid initializes a new [Capabilities] object for given pid when -// it is nonzero, or for the current process if pid is 0. -// -// Deprecated: replace with [NewPid2] followed by optional [Capabilities.Load] -// (only if needed). For example, replace: -// -// c, err := NewPid(0) -// if err != nil { -// return err -// } -// -// with: -// -// c, err := NewPid2(0) -// if err != nil { -// return err -// } -// err = c.Load() -// if err != nil { -// return err -// } -func NewPid(pid int) (Capabilities, error) { - c, err := newPid(pid) - if err != nil { - return c, err - } - err = c.Load() - return c, err -} - -// NewPid2 initializes a new [Capabilities] object for given pid when -// it is nonzero, or for the current process if pid is 0. This -// does not load the process's current capabilities; if needed, -// call [Capabilities.Load]. -func NewPid2(pid int) (Capabilities, error) { - return newPid(pid) -} - -// NewFile initializes a new Capabilities object for given file path. -// -// Deprecated: replace with [NewFile2] followed by optional [Capabilities.Load] -// (only if needed). For example, replace: -// -// c, err := NewFile(path) -// if err != nil { -// return err -// } -// -// with: -// -// c, err := NewFile2(path) -// if err != nil { -// return err -// } -// err = c.Load() -// if err != nil { -// return err -// } -func NewFile(path string) (Capabilities, error) { - c, err := newFile(path) - if err != nil { - return c, err - } - err = c.Load() - return c, err -} - -// NewFile2 creates a new initialized [Capabilities] object for given -// file path. This does not load the process's current capabilities; -// if needed, call [Capabilities.Load]. -func NewFile2(path string) (Capabilities, error) { - return newFile(path) -} - -// LastCap returns highest valid capability of the running kernel, -// or an error if it can not be obtained. -// -// See also: [ListSupported]. -func LastCap() (Cap, error) { - return lastCap() -} - -// GetAmbient determines if a specific ambient capability is raised in the -// calling thread. -func GetAmbient(c Cap) (bool, error) { - return getAmbient(c) -} - -// SetAmbient raises or lowers specified ambient capabilities for the calling -// thread. To complete successfully, the prevailing effective capability set -// must have a raised CAP_SETPCAP. Further, to raise a specific ambient -// capability the inheritable and permitted sets of the calling thread must -// already contain the specified capability. -func SetAmbient(raise bool, caps ...Cap) error { - return setAmbient(raise, caps...) -} - -// ResetAmbient resets all of the ambient capabilities for the calling thread -// to their lowered value. -func ResetAmbient() error { - return resetAmbient() -} - -// GetBound determines if a specific bounding capability is raised in the -// calling thread. -func GetBound(c Cap) (bool, error) { - return getBound(c) -} - -// DropBound lowers the specified bounding set capability. -func DropBound(caps ...Cap) error { - return dropBound(caps...) -} diff --git a/vendor/github.com/moby/sys/capability/capability_linux.go b/vendor/github.com/moby/sys/capability/capability_linux.go deleted file mode 100644 index 234b1efb2..000000000 --- a/vendor/github.com/moby/sys/capability/capability_linux.go +++ /dev/null @@ -1,591 +0,0 @@ -// Copyright 2023 The Capability Authors. -// Copyright 2013 Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package capability - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - "syscall" -) - -const ( - linuxCapVer1 = 0x19980330 // No longer supported. - linuxCapVer2 = 0x20071026 // No longer supported. - linuxCapVer3 = 0x20080522 -) - -var lastCap = sync.OnceValues(func() (Cap, error) { - f, err := os.Open("/proc/sys/kernel/cap_last_cap") - if err != nil { - return 0, err - } - - buf := make([]byte, 11) - l, err := f.Read(buf) - f.Close() - if err != nil { - return 0, err - } - buf = buf[:l] - - last, err := strconv.Atoi(strings.TrimSpace(string(buf))) - if err != nil { - return 0, err - } - return Cap(last), nil -}) - -func capUpperMask() uint32 { - last, err := lastCap() - if err != nil || last < 32 { - return 0 - } - return (uint32(1) << (uint(last) - 31)) - 1 -} - -func mkStringCap(c Capabilities, which CapType) (ret string) { - last, err := lastCap() - if err != nil { - return "" - } - for i, first := Cap(0), true; i <= last; i++ { - if !c.Get(which, i) { - continue - } - if first { - first = false - } else { - ret += ", " - } - ret += i.String() - } - return -} - -func mkString(c Capabilities, max CapType) (ret string) { - ret = "{" - for i := CapType(1); i <= max; i <<= 1 { - ret += " " + i.String() + "=\"" - if c.Empty(i) { - ret += "empty" - } else if c.Full(i) { - ret += "full" - } else { - ret += c.StringCap(i) - } - ret += "\"" - } - ret += " }" - return -} - -var capVersion = sync.OnceValues(func() (uint32, error) { - var hdr capHeader - err := capget(&hdr, nil) - return hdr.version, err -}) - -func newPid(pid int) (c Capabilities, retErr error) { - ver, err := capVersion() - if err != nil { - retErr = fmt.Errorf("unable to get capability version from the kernel: %w", err) - return - } - switch ver { - case linuxCapVer1, linuxCapVer2: - retErr = errors.New("old/unsupported capability version (kernel older than 2.6.26?)") - default: - // Either linuxCapVer3, or an unknown/future version (such as v4). - // In the latter case, we fall back to v3 as the latest version known - // to this package, as kernel should be backward-compatible to v3. - p := new(capsV3) - p.hdr.version = linuxCapVer3 - p.hdr.pid = int32(pid) - c = p - } - return -} - -func ignoreEINVAL(err error) error { - if errors.Is(err, syscall.EINVAL) { - err = nil - } - return err -} - -type capsV3 struct { - hdr capHeader - data [2]capData - bounds [2]uint32 - ambient [2]uint32 -} - -func (c *capsV3) Get(which CapType, what Cap) bool { - var i uint - if what > 31 { - i = uint(what) >> 5 - what %= 32 - } - - switch which { - case EFFECTIVE: - return (1< 31 { - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data[i].effective |= 1 << uint(what) - } - if which&PERMITTED != 0 { - c.data[i].permitted |= 1 << uint(what) - } - if which&INHERITABLE != 0 { - c.data[i].inheritable |= 1 << uint(what) - } - if which&BOUNDING != 0 { - c.bounds[i] |= 1 << uint(what) - } - if which&AMBIENT != 0 { - c.ambient[i] |= 1 << uint(what) - } - } -} - -func (c *capsV3) Unset(which CapType, caps ...Cap) { - for _, what := range caps { - var i uint - if what > 31 { - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data[i].effective &= ^(1 << uint(what)) - } - if which&PERMITTED != 0 { - c.data[i].permitted &= ^(1 << uint(what)) - } - if which&INHERITABLE != 0 { - c.data[i].inheritable &= ^(1 << uint(what)) - } - if which&BOUNDING != 0 { - c.bounds[i] &= ^(1 << uint(what)) - } - if which&AMBIENT != 0 { - c.ambient[i] &= ^(1 << uint(what)) - } - } -} - -func (c *capsV3) Fill(kind CapType) { - if kind&CAPS == CAPS { - c.data[0].effective = 0xffffffff - c.data[0].permitted = 0xffffffff - c.data[0].inheritable = 0 - c.data[1].effective = 0xffffffff - c.data[1].permitted = 0xffffffff - c.data[1].inheritable = 0 - } - - if kind&BOUNDS == BOUNDS { - c.bounds[0] = 0xffffffff - c.bounds[1] = 0xffffffff - } - if kind&AMBS == AMBS { - c.ambient[0] = 0xffffffff - c.ambient[1] = 0xffffffff - } -} - -func (c *capsV3) Clear(kind CapType) { - if kind&CAPS == CAPS { - c.data[0].effective = 0 - c.data[0].permitted = 0 - c.data[0].inheritable = 0 - c.data[1].effective = 0 - c.data[1].permitted = 0 - c.data[1].inheritable = 0 - } - - if kind&BOUNDS == BOUNDS { - c.bounds[0] = 0 - c.bounds[1] = 0 - } - if kind&AMBS == AMBS { - c.ambient[0] = 0 - c.ambient[1] = 0 - } -} - -func (c *capsV3) StringCap(which CapType) (ret string) { - return mkStringCap(c, which) -} - -func (c *capsV3) String() (ret string) { - return mkString(c, BOUNDING) -} - -func (c *capsV3) Load() (err error) { - err = capget(&c.hdr, &c.data[0]) - if err != nil { - return - } - - path := "/proc/self/status" - if c.hdr.pid != 0 { - path = fmt.Sprintf("/proc/%d/status", c.hdr.pid) - } - - f, err := os.Open(path) - if err != nil { - return - } - b := bufio.NewReader(f) - for { - line, e := b.ReadString('\n') - if e != nil { - if e != io.EOF { - err = e - } - break - } - if val, ok := strings.CutPrefix(line, "CapBnd:\t"); ok { - _, err = fmt.Sscanf(val, "%08x%08x", &c.bounds[1], &c.bounds[0]) - if err != nil { - break - } - continue - } - if val, ok := strings.CutPrefix(line, "CapAmb:\t"); ok { - _, err = fmt.Sscanf(val, "%08x%08x", &c.ambient[1], &c.ambient[0]) - if err != nil { - break - } - continue - } - } - f.Close() - - return -} - -func (c *capsV3) Apply(kind CapType) error { - if c.hdr.pid != 0 { - return errors.New("unable to modify capabilities of another process") - } - last, err := LastCap() - if err != nil { - return err - } - if kind&BOUNDS == BOUNDS { - var data [2]capData - err = capget(&c.hdr, &data[0]) - if err != nil { - return err - } - if (1< 0, nil -} - -func setAmbient(raise bool, caps ...Cap) error { - op := pr_CAP_AMBIENT_RAISE - if !raise { - op = pr_CAP_AMBIENT_LOWER - } - for _, val := range caps { - err := prctl(pr_CAP_AMBIENT, op, uintptr(val)) - if err != nil { - return err - } - } - return nil -} - -func resetAmbient() error { - return prctl(pr_CAP_AMBIENT, pr_CAP_AMBIENT_CLEAR_ALL, 0) -} - -func getBound(c Cap) (bool, error) { - res, err := prctlRetInt(syscall.PR_CAPBSET_READ, uintptr(c), 0) - if err != nil { - return false, err - } - return res > 0, nil -} - -func dropBound(caps ...Cap) error { - for _, val := range caps { - err := prctl(syscall.PR_CAPBSET_DROP, uintptr(val), 0) - if err != nil { - return err - } - } - return nil -} - -func newFile(path string) (c Capabilities, err error) { - c = &capsFile{path: path} - return -} - -type capsFile struct { - path string - data vfscapData -} - -func (c *capsFile) Get(which CapType, what Cap) bool { - var i uint - if what > 31 { - if c.data.version == 1 { - return false - } - i = uint(what) >> 5 - what %= 32 - } - - switch which { - case EFFECTIVE: - return (1< 31 { - if c.data.version == 1 { - continue - } - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data.effective[i] |= 1 << uint(what) - } - if which&PERMITTED != 0 { - c.data.data[i].permitted |= 1 << uint(what) - } - if which&INHERITABLE != 0 { - c.data.data[i].inheritable |= 1 << uint(what) - } - } -} - -func (c *capsFile) Unset(which CapType, caps ...Cap) { - for _, what := range caps { - var i uint - if what > 31 { - if c.data.version == 1 { - continue - } - i = uint(what) >> 5 - what %= 32 - } - - if which&EFFECTIVE != 0 { - c.data.effective[i] &= ^(1 << uint(what)) - } - if which&PERMITTED != 0 { - c.data.data[i].permitted &= ^(1 << uint(what)) - } - if which&INHERITABLE != 0 { - c.data.data[i].inheritable &= ^(1 << uint(what)) - } - } -} - -func (c *capsFile) Fill(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective[0] = 0xffffffff - c.data.data[0].permitted = 0xffffffff - c.data.data[0].inheritable = 0 - if c.data.version == 2 { - c.data.effective[1] = 0xffffffff - c.data.data[1].permitted = 0xffffffff - c.data.data[1].inheritable = 0 - } - } -} - -func (c *capsFile) Clear(kind CapType) { - if kind&CAPS == CAPS { - c.data.effective[0] = 0 - c.data.data[0].permitted = 0 - c.data.data[0].inheritable = 0 - if c.data.version == 2 { - c.data.effective[1] = 0 - c.data.data[1].permitted = 0 - c.data.data[1].inheritable = 0 - } - } -} - -func (c *capsFile) StringCap(which CapType) (ret string) { - return mkStringCap(c, which) -} - -func (c *capsFile) String() (ret string) { - return mkString(c, INHERITABLE) -} - -func (c *capsFile) Load() (err error) { - return getVfsCap(c.path, &c.data) -} - -func (c *capsFile) Apply(kind CapType) (err error) { - if kind&CAPS == CAPS { - return setVfsCap(c.path, &c.data) - } - return -} diff --git a/vendor/github.com/moby/sys/capability/capability_noop.go b/vendor/github.com/moby/sys/capability/capability_noop.go deleted file mode 100644 index b766e444f..000000000 --- a/vendor/github.com/moby/sys/capability/capability_noop.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2023 The Capability Authors. -// Copyright 2013 Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux - -package capability - -import "errors" - -var errNotSup = errors.New("not supported") - -func newPid(_ int) (Capabilities, error) { - return nil, errNotSup -} - -func newFile(_ string) (Capabilities, error) { - return nil, errNotSup -} - -func lastCap() (Cap, error) { - return -1, errNotSup -} - -func getAmbient(_ Cap) (bool, error) { - return false, errNotSup -} - -func setAmbient(_ bool, _ ...Cap) error { - return errNotSup -} - -func resetAmbient() error { - return errNotSup -} - -func getBound(_ Cap) (bool, error) { - return false, errNotSup -} - -func dropBound(_ ...Cap) error { - return errNotSup -} diff --git a/vendor/github.com/moby/sys/capability/enum.go b/vendor/github.com/moby/sys/capability/enum.go deleted file mode 100644 index f88593310..000000000 --- a/vendor/github.com/moby/sys/capability/enum.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2024 The Capability Authors. -// Copyright 2013 Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package capability - -import "slices" - -type CapType uint - -func (c CapType) String() string { - switch c { - case EFFECTIVE: - return "effective" - case PERMITTED: - return "permitted" - case INHERITABLE: - return "inheritable" - case BOUNDING: - return "bounding" - case CAPS: - return "caps" - case AMBIENT: - return "ambient" - } - return "unknown" -} - -const ( - EFFECTIVE CapType = 1 << iota - PERMITTED - INHERITABLE - BOUNDING - AMBIENT - - CAPS = EFFECTIVE | PERMITTED | INHERITABLE - BOUNDS = BOUNDING - AMBS = AMBIENT -) - -//go:generate go run enumgen/gen.go -type Cap int - -// POSIX-draft defined capabilities and Linux extensions. -// -// Defined in https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h -const ( - // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this - // overrides the restriction of changing file ownership and group - // ownership. - CAP_CHOWN = Cap(0) - - // Override all DAC access, including ACL execute access if - // [_POSIX_ACL] is defined. Excluding DAC access covered by - // CAP_LINUX_IMMUTABLE. - CAP_DAC_OVERRIDE = Cap(1) - - // Overrides all DAC restrictions regarding read and search on files - // and directories, including ACL restrictions if [_POSIX_ACL] is - // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. - CAP_DAC_READ_SEARCH = Cap(2) - - // Overrides all restrictions about allowed operations on files, where - // file owner ID must be equal to the user ID, except where CAP_FSETID - // is applicable. It doesn't override MAC and DAC restrictions. - CAP_FOWNER = Cap(3) - - // Overrides the following restrictions that the effective user ID - // shall match the file owner ID when setting the S_ISUID and S_ISGID - // bits on that file; that the effective group ID (or one of the - // supplementary group IDs) shall match the file owner ID when setting - // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are - // cleared on successful return from chown(2) (not implemented). - CAP_FSETID = Cap(4) - - // Overrides the restriction that the real or effective user ID of a - // process sending a signal must match the real or effective user ID - // of the process receiving the signal. - CAP_KILL = Cap(5) - - // Allows setgid(2) manipulation - // Allows setgroups(2) - // Allows forged gids on socket credentials passing. - CAP_SETGID = Cap(6) - - // Allows set*uid(2) manipulation (including fsuid). - // Allows forged pids on socket credentials passing. - CAP_SETUID = Cap(7) - - // Linux-specific capabilities - - // Without VFS support for capabilities: - // Transfer any capability in your permitted set to any pid, - // remove any capability in your permitted set from any pid - // With VFS support for capabilities (neither of above, but) - // Add any capability from current's capability bounding set - // to the current process' inheritable set - // Allow taking bits out of capability bounding set - // Allow modification of the securebits for a process - CAP_SETPCAP = Cap(8) - - // Allow modification of S_IMMUTABLE and S_APPEND file attributes - CAP_LINUX_IMMUTABLE = Cap(9) - - // Allows binding to TCP/UDP sockets below 1024 - // Allows binding to ATM VCIs below 32 - CAP_NET_BIND_SERVICE = Cap(10) - - // Allow broadcasting, listen to multicast - CAP_NET_BROADCAST = Cap(11) - - // Allow interface configuration - // Allow administration of IP firewall, masquerading and accounting - // Allow setting debug option on sockets - // Allow modification of routing tables - // Allow setting arbitrary process / process group ownership on - // sockets - // Allow binding to any address for transparent proxying (also via NET_RAW) - // Allow setting TOS (type of service) - // Allow setting promiscuous mode - // Allow clearing driver statistics - // Allow multicasting - // Allow read/write of device-specific registers - // Allow activation of ATM control sockets - CAP_NET_ADMIN = Cap(12) - - // Allow use of RAW sockets - // Allow use of PACKET sockets - // Allow binding to any address for transparent proxying (also via NET_ADMIN) - CAP_NET_RAW = Cap(13) - - // Allow locking of shared memory segments - // Allow mlock and mlockall (which doesn't really have anything to do - // with IPC) - CAP_IPC_LOCK = Cap(14) - - // Override IPC ownership checks - CAP_IPC_OWNER = Cap(15) - - // Insert and remove kernel modules - modify kernel without limit - CAP_SYS_MODULE = Cap(16) - - // Allow ioperm/iopl access - // Allow sending USB messages to any device via /proc/bus/usb - CAP_SYS_RAWIO = Cap(17) - - // Allow use of chroot() - CAP_SYS_CHROOT = Cap(18) - - // Allow ptrace() of any process - CAP_SYS_PTRACE = Cap(19) - - // Allow configuration of process accounting - CAP_SYS_PACCT = Cap(20) - - // Allow configuration of the secure attention key - // Allow administration of the random device - // Allow examination and configuration of disk quotas - // Allow setting the domainname - // Allow setting the hostname - // Allow calling bdflush() - // Allow mount() and umount(), setting up new smb connection - // Allow some autofs root ioctls - // Allow nfsservctl - // Allow VM86_REQUEST_IRQ - // Allow to read/write pci config on alpha - // Allow irix_prctl on mips (setstacksize) - // Allow flushing all cache on m68k (sys_cacheflush) - // Allow removing semaphores - // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores - // and shared memory - // Allow locking/unlocking of shared memory segment - // Allow turning swap on/off - // Allow forged pids on socket credentials passing - // Allow setting readahead and flushing buffers on block devices - // Allow setting geometry in floppy driver - // Allow turning DMA on/off in xd driver - // Allow administration of md devices (mostly the above, but some - // extra ioctls) - // Allow tuning the ide driver - // Allow access to the nvram device - // Allow administration of apm_bios, serial and bttv (TV) device - // Allow manufacturer commands in isdn CAPI support driver - // Allow reading non-standardized portions of pci configuration space - // Allow DDI debug ioctl on sbpcd driver - // Allow setting up serial ports - // Allow sending raw qic-117 commands - // Allow enabling/disabling tagged queuing on SCSI controllers and sending - // arbitrary SCSI commands - // Allow setting encryption key on loopback filesystem - // Allow setting zone reclaim policy - // Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility - CAP_SYS_ADMIN = Cap(21) - - // Allow use of reboot() - CAP_SYS_BOOT = Cap(22) - - // Allow raising priority and setting priority on other (different - // UID) processes - // Allow use of FIFO and round-robin (realtime) scheduling on own - // processes and setting the scheduling algorithm used by another - // process. - // Allow setting cpu affinity on other processes - CAP_SYS_NICE = Cap(23) - - // Override resource limits. Set resource limits. - // Override quota limits. - // Override reserved space on ext2 filesystem - // Modify data journaling mode on ext3 filesystem (uses journaling - // resources) - // NOTE: ext2 honors fsuid when checking for resource overrides, so - // you can override using fsuid too - // Override size restrictions on IPC message queues - // Allow more than 64hz interrupts from the real-time clock - // Override max number of consoles on console allocation - // Override max number of keymaps - // Control memory reclaim behavior - CAP_SYS_RESOURCE = Cap(24) - - // Allow manipulation of system clock - // Allow irix_stime on mips - // Allow setting the real-time clock - CAP_SYS_TIME = Cap(25) - - // Allow configuration of tty devices - // Allow vhangup() of tty - CAP_SYS_TTY_CONFIG = Cap(26) - - // Allow the privileged aspects of mknod() - CAP_MKNOD = Cap(27) - - // Allow taking of leases on files - CAP_LEASE = Cap(28) - - CAP_AUDIT_WRITE = Cap(29) - CAP_AUDIT_CONTROL = Cap(30) - CAP_SETFCAP = Cap(31) - - // Override MAC access. - // The base kernel enforces no MAC policy. - // An LSM may enforce a MAC policy, and if it does and it chooses - // to implement capability based overrides of that policy, this is - // the capability it should use to do so. - CAP_MAC_OVERRIDE = Cap(32) - - // Allow MAC configuration or state changes. - // The base kernel requires no MAC configuration. - // An LSM may enforce a MAC policy, and if it does and it chooses - // to implement capability based checks on modifications to that - // policy or the data required to maintain it, this is the - // capability it should use to do so. - CAP_MAC_ADMIN = Cap(33) - - // Allow configuring the kernel's syslog (printk behaviour) - CAP_SYSLOG = Cap(34) - - // Allow triggering something that will wake the system - CAP_WAKE_ALARM = Cap(35) - - // Allow preventing system suspends - CAP_BLOCK_SUSPEND = Cap(36) - - // Allow reading the audit log via multicast netlink socket - CAP_AUDIT_READ = Cap(37) - - // Allow system performance and observability privileged operations - // using perf_events, i915_perf and other kernel subsystems - CAP_PERFMON = Cap(38) - - // CAP_BPF allows the following BPF operations: - // - Creating all types of BPF maps - // - Advanced verifier features - // - Indirect variable access - // - Bounded loops - // - BPF to BPF function calls - // - Scalar precision tracking - // - Larger complexity limits - // - Dead code elimination - // - And potentially other features - // - Loading BPF Type Format (BTF) data - // - Retrieve xlated and JITed code of BPF programs - // - Use bpf_spin_lock() helper - // - // CAP_PERFMON relaxes the verifier checks further: - // - BPF progs can use of pointer-to-integer conversions - // - speculation attack hardening measures are bypassed - // - bpf_probe_read to read arbitrary kernel memory is allowed - // - bpf_trace_printk to print kernel memory is allowed - // - // CAP_SYS_ADMIN is required to use bpf_probe_write_user. - // - // CAP_SYS_ADMIN is required to iterate system wide loaded - // programs, maps, links, BTFs and convert their IDs to file descriptors. - // - // CAP_PERFMON and CAP_BPF are required to load tracing programs. - // CAP_NET_ADMIN and CAP_BPF are required to load networking programs. - CAP_BPF = Cap(39) - - // Allow checkpoint/restore related operations. - // Introduced in kernel 5.9 - CAP_CHECKPOINT_RESTORE = Cap(40) -) - -// List returns the list of all capabilities known to the package. -// -// Deprecated: use [ListKnown] or [ListSupported] instead. -func List() []Cap { - return ListKnown() -} - -// ListKnown returns the list of all capabilities known to the package. -func ListKnown() []Cap { - return list() -} - -// ListSupported returns the list of all capabilities known to the package, -// except those that are not supported by the currently running Linux kernel. -func ListSupported() ([]Cap, error) { - last, err := LastCap() - if err != nil { - return nil, err - } - return slices.DeleteFunc(list(), func(c Cap) bool { - // Remove caps not supported by the kernel. - return c > last - }), nil -} diff --git a/vendor/github.com/moby/sys/capability/enum_gen.go b/vendor/github.com/moby/sys/capability/enum_gen.go deleted file mode 100644 index f72cd43a6..000000000 --- a/vendor/github.com/moby/sys/capability/enum_gen.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by go generate; DO NOT EDIT. - -package capability - -func (c Cap) String() string { - switch c { - case CAP_CHOWN: - return "chown" - case CAP_DAC_OVERRIDE: - return "dac_override" - case CAP_DAC_READ_SEARCH: - return "dac_read_search" - case CAP_FOWNER: - return "fowner" - case CAP_FSETID: - return "fsetid" - case CAP_KILL: - return "kill" - case CAP_SETGID: - return "setgid" - case CAP_SETUID: - return "setuid" - case CAP_SETPCAP: - return "setpcap" - case CAP_LINUX_IMMUTABLE: - return "linux_immutable" - case CAP_NET_BIND_SERVICE: - return "net_bind_service" - case CAP_NET_BROADCAST: - return "net_broadcast" - case CAP_NET_ADMIN: - return "net_admin" - case CAP_NET_RAW: - return "net_raw" - case CAP_IPC_LOCK: - return "ipc_lock" - case CAP_IPC_OWNER: - return "ipc_owner" - case CAP_SYS_MODULE: - return "sys_module" - case CAP_SYS_RAWIO: - return "sys_rawio" - case CAP_SYS_CHROOT: - return "sys_chroot" - case CAP_SYS_PTRACE: - return "sys_ptrace" - case CAP_SYS_PACCT: - return "sys_pacct" - case CAP_SYS_ADMIN: - return "sys_admin" - case CAP_SYS_BOOT: - return "sys_boot" - case CAP_SYS_NICE: - return "sys_nice" - case CAP_SYS_RESOURCE: - return "sys_resource" - case CAP_SYS_TIME: - return "sys_time" - case CAP_SYS_TTY_CONFIG: - return "sys_tty_config" - case CAP_MKNOD: - return "mknod" - case CAP_LEASE: - return "lease" - case CAP_AUDIT_WRITE: - return "audit_write" - case CAP_AUDIT_CONTROL: - return "audit_control" - case CAP_SETFCAP: - return "setfcap" - case CAP_MAC_OVERRIDE: - return "mac_override" - case CAP_MAC_ADMIN: - return "mac_admin" - case CAP_SYSLOG: - return "syslog" - case CAP_WAKE_ALARM: - return "wake_alarm" - case CAP_BLOCK_SUSPEND: - return "block_suspend" - case CAP_AUDIT_READ: - return "audit_read" - case CAP_PERFMON: - return "perfmon" - case CAP_BPF: - return "bpf" - case CAP_CHECKPOINT_RESTORE: - return "checkpoint_restore" - } - return "unknown" -} - -func list() []Cap { - return []Cap{ - CAP_CHOWN, - CAP_DAC_OVERRIDE, - CAP_DAC_READ_SEARCH, - CAP_FOWNER, - CAP_FSETID, - CAP_KILL, - CAP_SETGID, - CAP_SETUID, - CAP_SETPCAP, - CAP_LINUX_IMMUTABLE, - CAP_NET_BIND_SERVICE, - CAP_NET_BROADCAST, - CAP_NET_ADMIN, - CAP_NET_RAW, - CAP_IPC_LOCK, - CAP_IPC_OWNER, - CAP_SYS_MODULE, - CAP_SYS_RAWIO, - CAP_SYS_CHROOT, - CAP_SYS_PTRACE, - CAP_SYS_PACCT, - CAP_SYS_ADMIN, - CAP_SYS_BOOT, - CAP_SYS_NICE, - CAP_SYS_RESOURCE, - CAP_SYS_TIME, - CAP_SYS_TTY_CONFIG, - CAP_MKNOD, - CAP_LEASE, - CAP_AUDIT_WRITE, - CAP_AUDIT_CONTROL, - CAP_SETFCAP, - CAP_MAC_OVERRIDE, - CAP_MAC_ADMIN, - CAP_SYSLOG, - CAP_WAKE_ALARM, - CAP_BLOCK_SUSPEND, - CAP_AUDIT_READ, - CAP_PERFMON, - CAP_BPF, - CAP_CHECKPOINT_RESTORE, - } -} diff --git a/vendor/github.com/moby/sys/capability/syscall_linux.go b/vendor/github.com/moby/sys/capability/syscall_linux.go deleted file mode 100644 index 2d8faa85f..000000000 --- a/vendor/github.com/moby/sys/capability/syscall_linux.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2024 The Capability Authors. -// Copyright 2013 Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package capability - -import ( - "syscall" - "unsafe" -) - -type capHeader struct { - version uint32 - pid int32 -} - -type capData struct { - effective uint32 - permitted uint32 - inheritable uint32 -} - -func capget(hdr *capHeader, data *capData) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = e1 - } - return -} - -func capset(hdr *capHeader, data *capData) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = e1 - } - return -} - -// not yet in syscall -const ( - pr_CAP_AMBIENT = 47 - pr_CAP_AMBIENT_IS_SET = uintptr(1) - pr_CAP_AMBIENT_RAISE = uintptr(2) - pr_CAP_AMBIENT_LOWER = uintptr(3) - pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4) -) - -func prctl(option int, arg2, arg3 uintptr) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_PRCTL, uintptr(option), arg2, arg3) - if e1 != 0 { - err = e1 - } - return -} - -func prctlRetInt(option int, arg2, arg3 uintptr) (int, error) { - ret, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, uintptr(option), arg2, arg3) - if err != 0 { - return 0, err - } - return int(ret), nil -} - -const ( - vfsXattrName = "security.capability" - - vfsCapVerMask = 0xff000000 - vfsCapVer1 = 0x01000000 - vfsCapVer2 = 0x02000000 - - vfsCapFlagMask = ^vfsCapVerMask - vfsCapFlageffective = 0x000001 - - vfscapDataSizeV1 = 4 * (1 + 2*1) - vfscapDataSizeV2 = 4 * (1 + 2*2) -) - -type vfscapData struct { - magic uint32 - data [2]struct { - permitted uint32 - inheritable uint32 - } - effective [2]uint32 - version int8 -} - -var _vfsXattrName *byte - -func init() { - _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName) -} - -func getVfsCap(path string, dest *vfscapData) (err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := syscall.RawSyscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) - if e1 != 0 { - if e1 == syscall.ENODATA { - dest.version = 2 - return - } - err = e1 - } - switch dest.magic & vfsCapVerMask { - case vfsCapVer1: - dest.version = 1 - if r0 != vfscapDataSizeV1 { - return syscall.EINVAL - } - dest.data[1].permitted = 0 - dest.data[1].inheritable = 0 - case vfsCapVer2: - dest.version = 2 - if r0 != vfscapDataSizeV2 { - return syscall.EINVAL - } - default: - return syscall.EINVAL - } - if dest.magic&vfsCapFlageffective != 0 { - dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable - dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable - } else { - dest.effective[0] = 0 - dest.effective[1] = 0 - } - return -} - -func setVfsCap(path string, data *vfscapData) (err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(path) - if err != nil { - return - } - var size uintptr - if data.version == 1 { - data.magic = vfsCapVer1 - size = vfscapDataSizeV1 - } else if data.version == 2 { - data.magic = vfsCapVer2 - if data.effective[0] != 0 || data.effective[1] != 0 { - data.magic |= vfsCapFlageffective - } - size = vfscapDataSizeV2 - } else { - return syscall.EINVAL - } - _, _, e1 := syscall.RawSyscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/moby/sys/mountinfo/LICENSE b/vendor/github.com/moby/sys/mountinfo/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/moby/sys/mountinfo/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/sys/mountinfo/doc.go b/vendor/github.com/moby/sys/mountinfo/doc.go deleted file mode 100644 index b80e05efd..000000000 --- a/vendor/github.com/moby/sys/mountinfo/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package mountinfo provides a set of functions to retrieve information about OS mounts. -// -// Currently it supports Linux. For historical reasons, there is also some support for FreeBSD and OpenBSD, -// and a shallow implementation for Windows, but in general this is Linux-only package, so -// the rest of the document only applies to Linux, unless explicitly specified otherwise. -// -// In Linux, information about mounts seen by the current process is available from -// /proc/self/mountinfo. Note that due to mount namespaces, different processes can -// see different mounts. A per-process mountinfo table is available from /proc//mountinfo, -// where is a numerical process identifier. -// -// In general, /proc is not a very efficient interface, and mountinfo is not an exception. -// For example, there is no way to get information about a specific mount point (i.e. it -// is all-or-nothing). This package tries to hide the /proc ineffectiveness by using -// parse filters while reading mountinfo. A filter can skip some entries, or stop -// processing the rest of the file once the needed information is found. -// -// For mountinfo filters that accept path as an argument, the path must be absolute, -// having all symlinks resolved, and being cleaned (i.e. no extra slashes or dots). -// One way to achieve all of the above is to employ filepath.Abs followed by -// filepath.EvalSymlinks (the latter calls filepath.Clean on the result so -// there is no need to explicitly call filepath.Clean). -// -// NOTE that in many cases there is no need to consult mountinfo at all. Here are some -// of the cases where mountinfo should not be parsed: -// -// 1. Before performing a mount. Usually, this is not needed, but if required (say to -// prevent over-mounts), to check whether a directory is mounted, call os.Lstat -// on it and its parent directory, and compare their st.Sys().(*syscall.Stat_t).Dev -// fields -- if they differ, then the directory is the mount point. NOTE this does -// not work for bind mounts. Optionally, the filesystem type can also be checked -// by calling unix.Statfs and checking the Type field (i.e. filesystem type). -// -// 2. After performing a mount. If there is no error returned, the mount succeeded; -// checking the mount table for a new mount is redundant and expensive. -// -// 3. Before performing an unmount. It is more efficient to do an unmount and ignore -// a specific error (EINVAL) which tells the directory is not mounted. -// -// 4. After performing an unmount. If there is no error returned, the unmount succeeded. -// -// 5. To find the mount point root of a specific directory. You can perform os.Stat() -// on the directory and traverse up until the Dev field of a parent directory differs. -package mountinfo diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go deleted file mode 100644 index 58f13c269..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go +++ /dev/null @@ -1,101 +0,0 @@ -package mountinfo - -import ( - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -// MountedFast is a method of detecting a mount point without reading -// mountinfo from procfs. A caller can only trust the result if no error -// and sure == true are returned. Otherwise, other methods (e.g. parsing -// /proc/mounts) have to be used. If unsure, use Mounted instead (which -// uses MountedFast, but falls back to parsing mountinfo if needed). -// -// If a non-existent path is specified, an appropriate error is returned. -// In case the caller is not interested in this particular error, it should -// be handled separately using e.g. errors.Is(err, fs.ErrNotExist). -// -// This function is only available on Linux. When available (since kernel -// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise, -// the implementation falls back to using stat(2), which can reliably detect -// normal (but not bind) mounts. -func MountedFast(path string) (mounted, sure bool, err error) { - // Root is always mounted. - if path == string(os.PathSeparator) { - return true, true, nil - } - - path, err = normalizePath(path) - if err != nil { - return false, false, err - } - mounted, sure, err = mountedFast(path) - return -} - -// mountedByOpenat2 is a method of detecting a mount that works for all kinds -// of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel. -func mountedByOpenat2(path string) (bool, error) { - dir, last := filepath.Split(path) - - dirfd, err := unix.Openat2(unix.AT_FDCWD, dir, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - }) - if err != nil { - return false, &os.PathError{Op: "openat2", Path: dir, Err: err} - } - fd, err := unix.Openat2(dirfd, last, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC | unix.O_NOFOLLOW, - Resolve: unix.RESOLVE_NO_XDEV, - }) - _ = unix.Close(dirfd) - switch err { - case nil: // definitely not a mount - _ = unix.Close(fd) - return false, nil - case unix.EXDEV: // definitely a mount - return true, nil - } - // not sure - return false, &os.PathError{Op: "openat2", Path: path, Err: err} -} - -// mountedFast is similar to MountedFast, except it expects a normalized path. -func mountedFast(path string) (mounted, sure bool, err error) { - // Root is always mounted. - if path == string(os.PathSeparator) { - return true, true, nil - } - - // Try a fast path, using openat2() with RESOLVE_NO_XDEV. - mounted, err = mountedByOpenat2(path) - if err == nil { - return mounted, true, nil - } - - // Another fast path: compare st.st_dev fields. - mounted, err = mountedByStat(path) - // This does not work for bind mounts, so false negative - // is possible, therefore only trust if return is true. - if mounted && err == nil { - return true, true, nil - } - - return -} - -func mounted(path string) (bool, error) { - path, err := normalizePath(path) - if err != nil { - return false, err - } - mounted, sure, err := mountedFast(path) - if sure && err == nil { - return mounted, nil - } - - // Fallback to parsing mountinfo. - return mountedByMountinfo(path) -} diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go deleted file mode 100644 index c7b7678f9..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build linux || freebsd || openbsd || darwin -// +build linux freebsd openbsd darwin - -package mountinfo - -import ( - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -func mountedByStat(path string) (bool, error) { - var st unix.Stat_t - - if err := unix.Lstat(path, &st); err != nil { - return false, &os.PathError{Op: "stat", Path: path, Err: err} - } - dev := st.Dev - parent := filepath.Dir(path) - if err := unix.Lstat(parent, &st); err != nil { - return false, &os.PathError{Op: "stat", Path: parent, Err: err} - } - if dev != st.Dev { - // Device differs from that of parent, - // so definitely a mount point. - return true, nil - } - // NB: this does not detect bind mounts on Linux. - return false, nil -} - -func normalizePath(path string) (realPath string, err error) { - if realPath, err = filepath.Abs(path); err != nil { - return "", err - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", err - } - if _, err := os.Stat(realPath); err != nil { - return "", err - } - return realPath, nil -} - -func mountedByMountinfo(path string) (bool, error) { - entries, err := GetMounts(SingleEntryFilter(path)) - if err != nil { - return false, err - } - - return len(entries) > 0, nil -} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go deleted file mode 100644 index 574aeb876..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo.go +++ /dev/null @@ -1,67 +0,0 @@ -package mountinfo - -import ( - "os" -) - -// GetMounts retrieves a list of mounts for the current running process, -// with an optional filter applied (use nil for no filter). -func GetMounts(f FilterFunc) ([]*Info, error) { - return parseMountTable(f) -} - -// Mounted determines if a specified path is a mount point. In case of any -// error, false (and an error) is returned. -// -// If a non-existent path is specified, an appropriate error is returned. -// In case the caller is not interested in this particular error, it should -// be handled separately using e.g. errors.Is(err, fs.ErrNotExist). -func Mounted(path string) (bool, error) { - // root is always mounted - if path == string(os.PathSeparator) { - return true, nil - } - return mounted(path) -} - -// Info reveals information about a particular mounted filesystem. This -// struct is populated from the content in the /proc//mountinfo file. -type Info struct { - // ID is a unique identifier of the mount (may be reused after umount). - ID int - - // Parent is the ID of the parent mount (or of self for the root - // of this mount namespace's mount tree). - Parent int - - // Major and Minor are the major and the minor components of the Dev - // field of unix.Stat_t structure returned by unix.*Stat calls for - // files on this filesystem. - Major, Minor int - - // Root is the pathname of the directory in the filesystem which forms - // the root of this mount. - Root string - - // Mountpoint is the pathname of the mount point relative to the - // process's root directory. - Mountpoint string - - // Options is a comma-separated list of mount options. - Options string - - // Optional are zero or more fields of the form "tag[:value]", - // separated by a space. Currently, the possible optional fields are - // "shared", "master", "propagate_from", and "unbindable". For more - // information, see mount_namespaces(7) Linux man page. - Optional string - - // FSType is the filesystem type in the form "type[.subtype]". - FSType string - - // Source is filesystem-specific information, or "none". - Source string - - // VFSOptions is a comma-separated list of superblock options. - VFSOptions string -} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go deleted file mode 100644 index 8420f58c7..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go +++ /dev/null @@ -1,56 +0,0 @@ -//go:build freebsd || openbsd || darwin -// +build freebsd openbsd darwin - -package mountinfo - -import "golang.org/x/sys/unix" - -// parseMountTable returns information about mounted filesystems -func parseMountTable(filter FilterFunc) ([]*Info, error) { - count, err := unix.Getfsstat(nil, unix.MNT_WAIT) - if err != nil { - return nil, err - } - - entries := make([]unix.Statfs_t, count) - _, err = unix.Getfsstat(entries, unix.MNT_WAIT) - if err != nil { - return nil, err - } - - var out []*Info - for _, entry := range entries { - var skip, stop bool - mountinfo := getMountinfo(&entry) - - if filter != nil { - // filter out entries we're not interested in - skip, stop = filter(mountinfo) - if skip { - continue - } - } - - out = append(out, mountinfo) - if stop { - break - } - } - return out, nil -} - -func mounted(path string) (bool, error) { - path, err := normalizePath(path) - if err != nil { - return false, err - } - // Fast path: compare st.st_dev fields. - // This should always work for FreeBSD and OpenBSD. - mounted, err := mountedByStat(path) - if err == nil { - return mounted, nil - } - - // Fallback to parsing mountinfo - return mountedByMountinfo(path) -} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go deleted file mode 100644 index 16079c3c5..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go +++ /dev/null @@ -1,63 +0,0 @@ -package mountinfo - -import "strings" - -// FilterFunc is a type defining a callback function for GetMount(), -// used to filter out mountinfo entries we're not interested in, -// and/or stop further processing if we found what we wanted. -// -// It takes a pointer to the Info struct (fully populated with all available -// fields on the GOOS platform), and returns two booleans: -// -// skip: true if the entry should be skipped; -// -// stop: true if parsing should be stopped after the entry. -type FilterFunc func(*Info) (skip, stop bool) - -// PrefixFilter discards all entries whose mount points do not start with, or -// are equal to the path specified in prefix. The prefix path must be absolute, -// have all symlinks resolved, and cleaned (i.e. no extra slashes or dots). -// -// PrefixFilter treats prefix as a path, not a partial prefix, which means that -// given "/foo", "/foo/bar" and "/foobar" entries, PrefixFilter("/foo") returns -// "/foo" and "/foo/bar", and discards "/foobar". -func PrefixFilter(prefix string) FilterFunc { - return func(m *Info) (bool, bool) { - skip := !strings.HasPrefix(m.Mountpoint+"/", prefix+"/") - return skip, false - } -} - -// SingleEntryFilter looks for a specific entry. -func SingleEntryFilter(mp string) FilterFunc { - return func(m *Info) (bool, bool) { - if m.Mountpoint == mp { - return false, true // don't skip, stop now - } - return true, false // skip, keep going - } -} - -// ParentsFilter returns all entries whose mount points -// can be parents of a path specified, discarding others. -// -// For example, given /var/lib/docker/something, entries -// like /var/lib/docker, /var and / are returned. -func ParentsFilter(path string) FilterFunc { - return func(m *Info) (bool, bool) { - skip := !strings.HasPrefix(path, m.Mountpoint) - return skip, false - } -} - -// FSTypeFilter returns all entries that match provided fstype(s). -func FSTypeFilter(fstype ...string) FilterFunc { - return func(m *Info) (bool, bool) { - for _, t := range fstype { - if m.FSType == t { - return false, false // don't skip, keep going - } - } - return true, false // skip, keep going - } -} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go deleted file mode 100644 index ecaaa7a9c..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsdlike.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build freebsd || darwin -// +build freebsd darwin - -package mountinfo - -import "golang.org/x/sys/unix" - -func getMountinfo(entry *unix.Statfs_t) *Info { - return &Info{ - Mountpoint: unix.ByteSliceToString(entry.Mntonname[:]), - FSType: unix.ByteSliceToString(entry.Fstypename[:]), - Source: unix.ByteSliceToString(entry.Mntfromname[:]), - } -} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go deleted file mode 100644 index b32b5c9b1..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go +++ /dev/null @@ -1,250 +0,0 @@ -package mountinfo - -import ( - "bufio" - "fmt" - "io" - "os" - "runtime" - "strconv" - "strings" - "sync" - - "golang.org/x/sys/unix" -) - -// GetMountsFromReader retrieves a list of mounts from the -// reader provided, with an optional filter applied (use nil -// for no filter). This can be useful in tests or benchmarks -// that provide fake mountinfo data, or when a source other -// than /proc/thread-self/mountinfo needs to be read from. -// -// This function is Linux-specific. -func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { - s := bufio.NewScanner(r) - out := []*Info{} - for s.Scan() { - var err error - - /* - See http://man7.org/linux/man-pages/man5/proc.5.html - - 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options - - In other words, we have: - * 6 mandatory fields (1)..(6) - * 0 or more optional fields (7) - * a separator field (8) - * 3 mandatory fields (9)..(11) - */ - - text := s.Text() - fields := strings.Split(text, " ") - numFields := len(fields) - if numFields < 10 { - // should be at least 10 fields - return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields) - } - - // separator field - sepIdx := numFields - 4 - // In Linux <= 3.9 mounting a cifs with spaces in a share - // name (like "//srv/My Docs") _may_ end up having a space - // in the last field of mountinfo (like "unc=//serv/My Docs"). - // Since kernel 3.10-rc1, cifs option "unc=" is ignored, - // so spaces should not appear. - // - // Check for a separator, and work around the spaces bug - for fields[sepIdx] != "-" { - sepIdx-- - if sepIdx == 5 { - return nil, fmt.Errorf("parsing '%s' failed: missing - separator", text) - } - } - - p := &Info{} - - p.Mountpoint, err = unescape(fields[4]) - if err != nil { - return nil, fmt.Errorf("parsing '%s' failed: mount point: %w", fields[4], err) - } - p.FSType, err = unescape(fields[sepIdx+1]) - if err != nil { - return nil, fmt.Errorf("parsing '%s' failed: fstype: %w", fields[sepIdx+1], err) - } - p.Source, err = unescape(fields[sepIdx+2]) - if err != nil { - return nil, fmt.Errorf("parsing '%s' failed: source: %w", fields[sepIdx+2], err) - } - p.VFSOptions = fields[sepIdx+3] - - // ignore any numbers parsing errors, as there should not be any - p.ID, _ = strconv.Atoi(fields[0]) - p.Parent, _ = strconv.Atoi(fields[1]) - mm := strings.SplitN(fields[2], ":", 3) - if len(mm) != 2 { - return nil, fmt.Errorf("parsing '%s' failed: unexpected major:minor pair %s", text, mm) - } - p.Major, _ = strconv.Atoi(mm[0]) - p.Minor, _ = strconv.Atoi(mm[1]) - - p.Root, err = unescape(fields[3]) - if err != nil { - return nil, fmt.Errorf("parsing '%s' failed: root: %w", fields[3], err) - } - - p.Options = fields[5] - - // zero or more optional fields - p.Optional = strings.Join(fields[6:sepIdx], " ") - - // Run the filter after parsing all fields. - var skip, stop bool - if filter != nil { - skip, stop = filter(p) - if skip { - continue - } - } - - out = append(out, p) - if stop { - break - } - } - if err := s.Err(); err != nil { - return nil, err - } - return out, nil -} - -var ( - haveProcThreadSelf bool - haveProcThreadSelfOnce sync.Once -) - -func parseMountTable(filter FilterFunc) (_ []*Info, err error) { - haveProcThreadSelfOnce.Do(func() { - _, err := os.Stat("/proc/thread-self/mountinfo") - haveProcThreadSelf = err == nil - }) - - // We need to lock ourselves to the current OS thread in order to make sure - // that the thread referenced by /proc/thread-self stays alive until we - // finish parsing the file. - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - var f *os.File - if haveProcThreadSelf { - f, err = os.Open("/proc/thread-self/mountinfo") - } else { - // On pre-3.17 kernels (such as CentOS 7), we don't have - // /proc/thread-self/ so we need to manually construct - // /proc/self/task// as a fallback. - f, err = os.Open("/proc/self/task/" + strconv.Itoa(unix.Gettid()) + "/mountinfo") - if os.IsNotExist(err) { - // If /proc/self/task/... failed, it means that our active pid - // namespace doesn't match the pid namespace of the /proc mount. In - // this case we just have to make do with /proc/self, since there - // is no other way of figuring out our tid in a parent pid - // namespace on pre-3.17 kernels. - f, err = os.Open("/proc/self/mountinfo") - } - } - if err != nil { - return nil, err - } - defer f.Close() - - return GetMountsFromReader(f, filter) -} - -// PidMountInfo retrieves the list of mounts from a given process' mount -// namespace. Unless there is a need to get mounts from a mount namespace -// different from that of a calling process, use GetMounts. -// -// This function is Linux-specific. -// -// Deprecated: this will be removed before v1; use GetMountsFromReader with -// opened /proc//mountinfo as an argument instead. -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return GetMountsFromReader(f, nil) -} - -// A few specific characters in mountinfo path entries (root and mountpoint) -// are escaped using a backslash followed by a character's ascii code in octal. -// -// space -- as \040 -// tab (aka \t) -- as \011 -// newline (aka \n) -- as \012 -// backslash (aka \\) -- as \134 -// -// This function converts path from mountinfo back, i.e. it unescapes the above sequences. -func unescape(path string) (string, error) { - // try to avoid copying - if strings.IndexByte(path, '\\') == -1 { - return path, nil - } - - // The following code is UTF-8 transparent as it only looks for some - // specific characters (backslash and 0..7) with values < utf8.RuneSelf, - // and everything else is passed through as is. - buf := make([]byte, len(path)) - bufLen := 0 - for i := 0; i < len(path); i++ { - if path[i] != '\\' { - buf[bufLen] = path[i] - bufLen++ - continue - } - s := path[i:] - if len(s) < 4 { - // too short - return "", fmt.Errorf("bad escape sequence %q: too short", s) - } - c := s[1] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7': - v := c - '0' - for j := 2; j < 4; j++ { // one digit already; two more - if s[j] < '0' || s[j] > '7' { - return "", fmt.Errorf("bad escape sequence %q: not a digit", s[:3]) - } - x := s[j] - '0' - v = (v << 3) | x - } - if v > 255 { - return "", fmt.Errorf("bad escape sequence %q: out of range" + s[:3]) - } - buf[bufLen] = v - bufLen++ - i += 3 - continue - default: - return "", fmt.Errorf("bad escape sequence %q: not a digit" + s[:3]) - - } - } - - return string(buf[:bufLen]), nil -} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go deleted file mode 100644 index f682c2d3b..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_openbsd.go +++ /dev/null @@ -1,11 +0,0 @@ -package mountinfo - -import "golang.org/x/sys/unix" - -func getMountinfo(entry *unix.Statfs_t) *Info { - return &Info{ - Mountpoint: unix.ByteSliceToString(entry.F_mntonname[:]), - FSType: unix.ByteSliceToString(entry.F_fstypename[:]), - Source: unix.ByteSliceToString(entry.F_mntfromname[:]), - } -} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go deleted file mode 100644 index c2e64bc81..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build !windows && !linux && !freebsd && !openbsd && !darwin -// +build !windows,!linux,!freebsd,!openbsd,!darwin - -package mountinfo - -import ( - "fmt" - "runtime" -) - -var errNotImplemented = fmt.Errorf("not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) - -func parseMountTable(_ FilterFunc) ([]*Info, error) { - return nil, errNotImplemented -} - -func mounted(path string) (bool, error) { - return false, errNotImplemented -} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go deleted file mode 100644 index 13fad165e..000000000 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package mountinfo - -func parseMountTable(_ FilterFunc) ([]*Info, error) { - // Do NOT return an error! - return nil, nil -} - -func mounted(_ string) (bool, error) { - return false, nil -} diff --git a/vendor/github.com/moby/sys/user/idtools.go b/vendor/github.com/moby/sys/user/idtools.go deleted file mode 100644 index 595b7a927..000000000 --- a/vendor/github.com/moby/sys/user/idtools.go +++ /dev/null @@ -1,141 +0,0 @@ -package user - -import ( - "fmt" - "os" -) - -// MkdirOpt is a type for options to pass to Mkdir calls -type MkdirOpt func(*mkdirOptions) - -type mkdirOptions struct { - onlyNew bool -} - -// WithOnlyNew is an option for MkdirAllAndChown that will only change ownership and permissions -// on newly created directories. If the directory already exists, it will not be modified -func WithOnlyNew(o *mkdirOptions) { - o.onlyNew = true -} - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. By default, if the directory already exists, this -// function will still change ownership and permissions. If WithOnlyNew is passed as an -// option, then only the newly created directories will have ownership and permissions changed. -func MkdirAllAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { - var options mkdirOptions - for _, opt := range opts { - opt(&options) - } - - return mkdirAs(path, mode, uid, gid, true, options.onlyNew) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// By default, if the directory already exists, this function still changes ownership and permissions. -// If WithOnlyNew is passed as an option, then only the newly created directory will have ownership -// and permissions changed. -// Note that unlike os.Mkdir(), this function does not return IsExist error -// in case path already exists. -func MkdirAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error { - var options mkdirOptions - for _, opt := range opts { - opt(&options) - } - return mkdirAs(path, mode, uid, gid, false, options.onlyNew) -} - -// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - uid, err := toHost(0, uidMap) - if err != nil { - return -1, -1, err - } - gid, err := toHost(0, gidMap) - if err != nil { - return -1, -1, err - } - return uid, gid, nil -} - -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (int64(hostID) >= m.ParentID) && (int64(hostID) <= (m.ParentID + m.Count - 1)) { - contID := int(m.ID + (int64(hostID) - m.ParentID)) - return contID, nil - } - } - return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) -} - -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (int64(contID) >= m.ID) && (int64(contID) <= (m.ID + m.Count - 1)) { - hostID := int(m.ParentID + (int64(contID) - m.ID)) - return hostID, nil - } - } - return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) -} - -// IdentityMapping contains a mappings of UIDs and GIDs. -// The zero value represents an empty mapping. -type IdentityMapping struct { - UIDMaps []IDMap `json:"UIDMaps"` - GIDMaps []IDMap `json:"GIDMaps"` -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i IdentityMapping) RootPair() (int, int) { - uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps) - return uid, gid -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -func (i IdentityMapping) ToHost(uid, gid int) (int, int, error) { - var err error - ruid, rgid := i.RootPair() - - if uid != ruid { - ruid, err = toHost(uid, i.UIDMaps) - if err != nil { - return ruid, rgid, err - } - } - - if gid != rgid { - rgid, err = toHost(gid, i.GIDMaps) - } - return ruid, rgid, err -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i IdentityMapping) ToContainer(uid, gid int) (int, int, error) { - ruid, err := toContainer(uid, i.UIDMaps) - if err != nil { - return -1, -1, err - } - rgid, err := toContainer(gid, i.GIDMaps) - return ruid, rgid, err -} - -// Empty returns true if there are no id mappings -func (i IdentityMapping) Empty() bool { - return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 -} diff --git a/vendor/github.com/moby/sys/user/idtools_unix.go b/vendor/github.com/moby/sys/user/idtools_unix.go deleted file mode 100644 index 4e39d2446..000000000 --- a/vendor/github.com/moby/sys/user/idtools_unix.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:build !windows - -package user - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "syscall" -) - -func mkdirAs(path string, mode os.FileMode, uid, gid int, mkAll, onlyNew bool) error { - path, err := filepath.Abs(path) - if err != nil { - return err - } - - stat, err := os.Stat(path) - if err == nil { - if !stat.IsDir() { - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - if onlyNew { - return nil - } - - // short-circuit -- we were called with an existing directory and chown was requested - return setPermissions(path, mode, uid, gid, stat) - } - - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If onlyNew is true, we won't - // chown the full directory path if it exists - var paths []string - if os.IsNotExist(err) { - paths = append(paths, path) - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err = os.Stat(dirPath); os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err = os.MkdirAll(path, mode); err != nil { - return err - } - } else if err = os.Mkdir(path, mode); err != nil { - return err - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err = setPermissions(pathComponent, mode, uid, gid, nil); err != nil { - return err - } - } - return nil -} - -// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested -// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the -// dir is on an NFS share, so don't call chown unless we absolutely must. -// Likewise for setting permissions. -func setPermissions(p string, mode os.FileMode, uid, gid int, stat os.FileInfo) error { - if stat == nil { - var err error - stat, err = os.Stat(p) - if err != nil { - return err - } - } - if stat.Mode().Perm() != mode.Perm() { - if err := os.Chmod(p, mode.Perm()); err != nil { - return err - } - } - ssi := stat.Sys().(*syscall.Stat_t) - if ssi.Uid == uint32(uid) && ssi.Gid == uint32(gid) { - return nil - } - return os.Chown(p, uid, gid) -} - -// LoadIdentityMapping takes a requested username and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func LoadIdentityMapping(name string) (IdentityMapping, error) { - // TODO: Consider adding support for calling out to "getent" - usr, err := LookupUser(name) - if err != nil { - return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %w", name, err) - } - - subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr) - if err != nil { - return IdentityMapping{}, err - } - subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr) - if err != nil { - return IdentityMapping{}, err - } - - return IdentityMapping{ - UIDMaps: subuidRanges, - GIDMaps: subgidRanges, - }, nil -} - -func lookupSubRangesFile(path string, usr User) ([]IDMap, error) { - uidstr := strconv.Itoa(usr.Uid) - rangeList, err := ParseSubIDFileFilter(path, func(sid SubID) bool { - return sid.Name == usr.Name || sid.Name == uidstr - }) - if err != nil { - return nil, err - } - if len(rangeList) == 0 { - return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name) - } - - idMap := []IDMap{} - - var containerID int64 - for _, idrange := range rangeList { - idMap = append(idMap, IDMap{ - ID: containerID, - ParentID: idrange.SubID, - Count: idrange.Count, - }) - containerID = containerID + idrange.Count - } - return idMap, nil -} diff --git a/vendor/github.com/moby/sys/user/idtools_windows.go b/vendor/github.com/moby/sys/user/idtools_windows.go deleted file mode 100644 index 9de730caf..000000000 --- a/vendor/github.com/moby/sys/user/idtools_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package user - -import ( - "os" -) - -// This is currently a wrapper around [os.MkdirAll] since currently -// permissions aren't set through this path, the identity isn't utilized. -// Ownership is handled elsewhere, but in the future could be support here -// too. -func mkdirAs(path string, _ os.FileMode, _, _ int, _, _ bool) error { - return os.MkdirAll(path, 0) -} diff --git a/vendor/github.com/moby/sys/user/lookup_unix.go b/vendor/github.com/moby/sys/user/lookup_unix.go deleted file mode 100644 index f95c1409f..000000000 --- a/vendor/github.com/moby/sys/user/lookup_unix.go +++ /dev/null @@ -1,157 +0,0 @@ -//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" - "strconv" - - "golang.org/x/sys/unix" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUserFunc(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUserFunc(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupUserFunc(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, ErrNoPasswdEntries - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroupFunc(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroupFunc(func(g Group) bool { - return g.Gid == gid - }) -} - -func lookupGroupFunc(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, ErrNoGroupEntries - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(unix.Getuid()) -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(unix.Getgid()) -} - -func currentUserSubIDs(fileName string) ([]SubID, error) { - u, err := CurrentUser() - if err != nil { - return nil, err - } - filter := func(entry SubID) bool { - return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) - } - return ParseSubIDFileFilter(fileName, filter) -} - -func CurrentUserSubUIDs() ([]SubID, error) { - return currentUserSubIDs("/etc/subuid") -} - -func CurrentUserSubGIDs() ([]SubID, error) { - return currentUserSubIDs("/etc/subgid") -} - -func CurrentProcessUIDMap() ([]IDMap, error) { - return ParseIDMapFile("/proc/self/uid_map") -} - -func CurrentProcessGIDMap() ([]IDMap, error) { - return ParseIDMapFile("/proc/self/gid_map") -} diff --git a/vendor/github.com/moby/sys/user/user.go b/vendor/github.com/moby/sys/user/user.go deleted file mode 100644 index 198c49367..000000000 --- a/vendor/github.com/moby/sys/user/user.go +++ /dev/null @@ -1,604 +0,0 @@ -package user - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minID = 0 - maxID = 1<<31 - 1 // for 32-bit systems compatibility -) - -var ( - // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. - ErrNoPasswdEntries = errors.New("no matching entries in passwd file") - // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. - ErrNoGroupEntries = errors.New("no matching entries in group file") - // ErrRange is returned if a UID or GID is outside of the valid range. - ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -// SubID represents an entry in /etc/sub{u,g}id -type SubID struct { - Name string - SubID int64 - Count int64 -} - -// IDMap represents an entry in /proc/PID/{u,g}id_map -type IDMap struct { - ID int64 - ParentID int64 - Count int64 -} - -func parseLine(line []byte, v ...interface{}) { - parseParts(bytes.Split(line, []byte(":")), v...) -} - -func parseParts(parts [][]byte, v ...interface{}) { - if len(parts) == 0 { - return - } - - for i, p := range parts { - // Ignore cases where we don't have enough fields to populate the arguments. - // Some configuration files like to misbehave. - if len(v) <= i { - break - } - - // Use the type of the argument to figure out how to parse it, scanf() style. - // This is legit. - switch e := v[i].(type) { - case *string: - *e = string(p) - case *int: - // "numbers", with conversion errors ignored because of some misbehaving configuration files. - *e, _ = strconv.Atoi(string(p)) - case *int64: - *e, _ = strconv.ParseInt(string(p), 10, 64) - case *[]string: - // Comma-separated lists. - if len(p) != 0 { - *e = strings.Split(string(p), ",") - } else { - *e = []string{} - } - default: - // Someone goof'd when writing code using this function. Scream so they can hear us. - panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, errors.New("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, errors.New("nil source for group-formatted data") - } - rd := bufio.NewReader(r) - out := []Group{} - - // Read the file line-by-line. - for { - var ( - isPrefix bool - wholeLine []byte - err error - ) - - // Read the next line. We do so in chunks (as much as reader's - // buffer is able to keep), check if we read enough columns - // already on each step and store final result in wholeLine. - for { - var line []byte - line, isPrefix, err = rd.ReadLine() - if err != nil { - // We should return no error if EOF is reached - // without a match. - if err == io.EOF { - err = nil - } - return out, err - } - - // Simple common case: line is short enough to fit in a - // single reader's buffer. - if !isPrefix && len(wholeLine) == 0 { - wholeLine = line - break - } - - wholeLine = append(wholeLine, line...) - - // Check if we read the whole line already. - if !isPrefix { - break - } - } - - // There's no spec for /etc/passwd or /etc/group, but we try to follow - // the same rules as the glibc parser, which allows comments and blank - // space at the beginning of a line. - wholeLine = bytes.TrimSpace(wholeLine) - if len(wholeLine) == 0 || wholeLine[0] == '#' { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List) - - if filter == nil || filter(p) { - out = append(out, p) - } - } -} - -type ExecUser struct { - Uid int - Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - var passwd, group io.Reader - - if passwdFile, err := os.Open(passwdPath); err == nil { - passwd = passwdFile - defer passwdFile.Close() - } - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// - "" -// - "user" -// - "uid" -// - "user:group" -// - "uid:gid -// - "user:gid" -// - "uid:group" -// -// It should be noted that if you specify a numeric user or group id, they will -// not be evaluated as usernames (only the metadata will be filled). So attempting -// to parse a user with user.Name = "1337" will produce the user with a UID of -// 1337. -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax - var userArg, groupArg string - parseLine([]byte(userSpec), &userArg, &groupArg) - - // Convert userArg and groupArg to be numeric, so we don't have to execute - // Atoi *twice* for each iteration over lines. - uidArg, uidErr := strconv.Atoi(userArg) - gidArg, gidErr := strconv.Atoi(groupArg) - - // Find the matching user. - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - // Default to current state of the user. - return u.Uid == user.Uid - } - - if uidErr == nil { - // If the userArg is numeric, always treat it as a UID. - return uidArg == u.Uid - } - - return u.Name == userArg - }) - - // If we can't find the user, we have to bail. - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) - } - - var matchedUserName string - if len(users) > 0 { - // First match wins, even if there's more than one matching entry. - matchedUserName = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // If we can't find a user with the given username, the only other valid - // option is if it's a numeric username with no associated entry in passwd. - - if uidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) - } - user.Uid = uidArg - - // Must be inside valid uid range. - if user.Uid < minID || user.Uid > maxID { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - - // On to the groups. If we matched a username, we need to do this because of - // the supplementary group IDs. - if groupArg != "" || matchedUserName != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // If the group argument isn't explicit, we'll just search for it. - if groupArg == "" { - // Check if user is a member of this group. - for _, u := range g.List { - if u == matchedUserName { - return true - } - } - return false - } - - if gidErr == nil { - // If the groupArg is numeric, always treat it as a GID. - return gidArg == g.Gid - } - - return g.Name == groupArg - }) - if err != nil && group != nil { - return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) - } - - // Only start modifying user.Gid if it is in explicit form. - if groupArg != "" { - if len(groups) > 0 { - // First match wins, even if there's more than one matching entry. - user.Gid = groups[0].Gid - } else { - // If we can't find a group with the given name, the only other valid - // option is if it's a numeric group name with no associated entry in group. - - if gidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) - } - user.Gid = gidArg - - // Must be inside valid gid range. - if user.Gid < minID || user.Gid > maxID { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - } else if len(groups) > 0 { - // Supplementary group ids only make sense if in the implicit form. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id -// against the given /etc/group formatted data. If a group name cannot -// be found, an error will be returned. If a group id cannot be found, -// or the given group data is nil, the id will be returned as-is -// provided it is in the legal range. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - groups := []Group{} - if group != nil { - var err error - groups, err = ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) - } - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.ParseInt(ag, 10, 64) - if err != nil { - // Not a numeric ID either. - return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) - } - // Ensure gid is inside gid range. - if gid < minID || gid > maxID { - return nil, ErrRange - } - gidMap[int(gid)] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups -// that opens the groupPath given and gives it as an argument to -// GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - var group io.Reader - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - return GetAdditionalGroups(additionalGroups, group) -} - -func ParseSubIDFile(path string) ([]SubID, error) { - subid, err := os.Open(path) - if err != nil { - return nil, err - } - defer subid.Close() - return ParseSubID(subid) -} - -func ParseSubID(subid io.Reader) ([]SubID, error) { - return ParseSubIDFilter(subid, nil) -} - -func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { - subid, err := os.Open(path) - if err != nil { - return nil, err - } - defer subid.Close() - return ParseSubIDFilter(subid, filter) -} - -func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { - if r == nil { - return nil, errors.New("nil source for subid-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []SubID{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 5 subuid - p := SubID{} - parseLine(line, &p.Name, &p.SubID, &p.Count) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -func ParseIDMapFile(path string) ([]IDMap, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - defer r.Close() - return ParseIDMap(r) -} - -func ParseIDMap(r io.Reader) ([]IDMap, error) { - return ParseIDMapFilter(r, nil) -} - -func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - defer r.Close() - return ParseIDMapFilter(r, filter) -} - -func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { - if r == nil { - return nil, errors.New("nil source for idmap-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []IDMap{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 7 user_namespaces - p := IDMap{} - parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} diff --git a/vendor/github.com/moby/sys/user/user_fuzzer.go b/vendor/github.com/moby/sys/user/user_fuzzer.go deleted file mode 100644 index e018eae61..000000000 --- a/vendor/github.com/moby/sys/user/user_fuzzer.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package user - -import ( - "io" - "strings" -) - -func IsDivisbleBy(n int, divisibleby int) bool { - return (n % divisibleby) == 0 -} - -func FuzzUser(data []byte) int { - if len(data) == 0 { - return -1 - } - if !IsDivisbleBy(len(data), 5) { - return -1 - } - - var divided [][]byte - - chunkSize := len(data) / 5 - - for i := 0; i < len(data); i += chunkSize { - end := i + chunkSize - - divided = append(divided, data[i:end]) - } - - _, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil) - - var passwd, group io.Reader - - group = strings.NewReader(string(divided[1])) - _, _ = GetAdditionalGroups([]string{string(divided[2])}, group) - - passwd = strings.NewReader(string(divided[3])) - _, _ = GetExecUser(string(divided[4]), nil, passwd, group) - return 1 -} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go deleted file mode 100644 index 3ef333387..000000000 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ /dev/null @@ -1,1067 +0,0 @@ -package specs - -import "os" - -// Spec is the base configuration for the container. -type Spec struct { - // Version of the Open Container Initiative Runtime Specification with which the bundle complies. - Version string `json:"ociVersion"` - // Process configures the container process. - Process *Process `json:"process,omitempty"` - // Root configures the container's root filesystem. - Root *Root `json:"root,omitempty"` - // Hostname configures the container's hostname. - Hostname string `json:"hostname,omitempty"` - // Domainname configures the container's domainname. - Domainname string `json:"domainname,omitempty"` - // Mounts configures additional mounts (on top of Root). - Mounts []Mount `json:"mounts,omitempty"` - // Hooks configures callbacks for container lifecycle events. - Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris,zos"` - // Annotations contains arbitrary metadata for the container. - Annotations map[string]string `json:"annotations,omitempty"` - - // Linux is platform-specific configuration for Linux based containers. - Linux *Linux `json:"linux,omitempty" platform:"linux"` - // Solaris is platform-specific configuration for Solaris based containers. - Solaris *Solaris `json:"solaris,omitempty" platform:"solaris"` - // Windows is platform-specific configuration for Windows based containers. - Windows *Windows `json:"windows,omitempty" platform:"windows"` - // VM specifies configuration for virtual-machine-based containers. - VM *VM `json:"vm,omitempty" platform:"vm"` - // ZOS is platform-specific configuration for z/OS based containers. - ZOS *ZOS `json:"zos,omitempty" platform:"zos"` - // FreeBSD is platform-specific configuration for FreeBSD based containers. - FreeBSD *FreeBSD `json:"freebsd,omitempty" platform:"freebsd"` -} - -// Scheduler represents the scheduling attributes for a process. It is based on -// the Linux sched_setattr(2) syscall. -type Scheduler struct { - // Policy represents the scheduling policy (e.g., SCHED_FIFO, SCHED_RR, SCHED_OTHER). - Policy LinuxSchedulerPolicy `json:"policy"` - - // Nice is the nice value for the process, which affects its priority. - Nice int32 `json:"nice,omitempty"` - - // Priority represents the static priority of the process. - Priority int32 `json:"priority,omitempty"` - - // Flags is an array of scheduling flags. - Flags []LinuxSchedulerFlag `json:"flags,omitempty"` - - // The following ones are used by the DEADLINE scheduler. - - // Runtime is the amount of time in nanoseconds during which the process - // is allowed to run in a given period. - Runtime uint64 `json:"runtime,omitempty"` - - // Deadline is the absolute deadline for the process to complete its execution. - Deadline uint64 `json:"deadline,omitempty"` - - // Period is the length of the period in nanoseconds used for determining the process runtime. - Period uint64 `json:"period,omitempty"` -} - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal creates an interactive terminal for the container. - Terminal bool `json:"terminal,omitempty"` - // ConsoleSize specifies the size of the console. - ConsoleSize *Box `json:"consoleSize,omitempty"` - // User specifies user information for the process. - User User `json:"user"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args,omitempty"` - // CommandLine specifies the full command line for the application to execute on Windows. - CommandLine string `json:"commandLine,omitempty" platform:"windows"` - // Env populates the process environment for the process. - Env []string `json:"env,omitempty"` - // Cwd is the current working directory for the process and must be - // relative to the container's root. - Cwd string `json:"cwd"` - // Capabilities are Linux capabilities that are kept for the process. - Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` - // Rlimits specifies rlimit options to apply to the process. - Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` - // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. - NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux,zos"` - // ApparmorProfile specifies the apparmor profile for the container. - ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` - // Specify an oom_score_adj for the container. - OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"` - // Scheduler specifies the scheduling attributes for a process - Scheduler *Scheduler `json:"scheduler,omitempty" platform:"linux"` - // SelinuxLabel specifies the selinux context that the container process is run as. - SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` - // IOPriority contains the I/O priority settings for the cgroup. - IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` - // ExecCPUAffinity specifies CPU affinity for exec processes. - ExecCPUAffinity *CPUAffinity `json:"execCPUAffinity,omitempty" platform:"linux"` -} - -// LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. -// https://man7.org/linux/man-pages/man7/capabilities.7.html -type LinuxCapabilities struct { - // Bounding is the set of capabilities checked by the kernel. - Bounding []string `json:"bounding,omitempty" platform:"linux"` - // Effective is the set of capabilities checked by the kernel. - Effective []string `json:"effective,omitempty" platform:"linux"` - // Inheritable is the capabilities preserved across execve. - Inheritable []string `json:"inheritable,omitempty" platform:"linux"` - // Permitted is the limiting superset for effective capabilities. - Permitted []string `json:"permitted,omitempty" platform:"linux"` - // Ambient is the ambient set of capabilities that are kept. - Ambient []string `json:"ambient,omitempty" platform:"linux"` -} - -// IOPriority represents I/O priority settings for the container's processes within the process group. -type LinuxIOPriority struct { - Class IOPriorityClass `json:"class"` - Priority int `json:"priority"` -} - -// IOPriorityClass represents an I/O scheduling class. -type IOPriorityClass string - -// Possible values for IOPriorityClass. -const ( - IOPRIO_CLASS_RT IOPriorityClass = "IOPRIO_CLASS_RT" - IOPRIO_CLASS_BE IOPriorityClass = "IOPRIO_CLASS_BE" - IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" -) - -// CPUAffinity specifies process' CPU affinity. -type CPUAffinity struct { - Initial string `json:"initial,omitempty"` - Final string `json:"final,omitempty"` -} - -// Box specifies dimensions of a rectangle. Used for specifying the size of a console. -type Box struct { - // Height is the vertical dimension of a box. - Height uint `json:"height"` - // Width is the horizontal dimension of a box. - Width uint `json:"width"` -} - -// User specifies specific user (and group) information for the container process. -type User struct { - // UID is the user id. - UID uint32 `json:"uid" platform:"linux,solaris,zos"` - // GID is the group id. - GID uint32 `json:"gid" platform:"linux,solaris,zos"` - // Umask is the umask for the init process. - Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris,zos"` - // AdditionalGids are additional group ids set for the container's process. - AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` - // Username is the user name. - Username string `json:"username,omitempty" platform:"windows"` -} - -// Root contains information about the container's root filesystem on the host. -type Root struct { - // Path is the absolute path to the container's root filesystem. - Path string `json:"path"` - // Readonly makes the root filesystem for the container readonly before the process is executed. - Readonly bool `json:"readonly,omitempty"` -} - -// Mount specifies a mount for a container. -type Mount struct { - // Destination is the absolute path where the mount will be placed in the container. - Destination string `json:"destination"` - // Type specifies the mount kind. - Type string `json:"type,omitempty" platform:"linux,solaris,zos,freebsd"` - // Source specifies the source path of the mount. - Source string `json:"source,omitempty"` - // Options are fstab style mount options. - Options []string `json:"options,omitempty"` - - // UID/GID mappings used for changing file owners w/o calling chown, fs should support it. - // Every mount point could have its own mapping. - UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty" platform:"linux"` - GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty" platform:"linux"` -} - -// Hook specifies a command that is run at a particular event in the lifecycle of a container -type Hook struct { - Path string `json:"path"` - Args []string `json:"args,omitempty"` - Env []string `json:"env,omitempty"` - Timeout *int `json:"timeout,omitempty"` -} - -// Hooks specifies a command that is run in the container at a particular event in the lifecycle of a container -// Hooks for container setup and teardown -type Hooks struct { - // Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed. - // It is called in the Runtime Namespace - // - // Deprecated: use [Hooks.CreateRuntime], [Hooks.CreateContainer], and - // [Hooks.StartContainer] instead, which allow more granular hook control - // during the create and start phase. - Prestart []Hook `json:"prestart,omitempty"` - // CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called - // It is called in the Runtime Namespace - CreateRuntime []Hook `json:"createRuntime,omitempty"` - // CreateContainer is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called - // It is called in the Container Namespace - CreateContainer []Hook `json:"createContainer,omitempty"` - // StartContainer is a list of hooks to be run after the start operation is called but before the container process is started - // It is called in the Container Namespace - StartContainer []Hook `json:"startContainer,omitempty"` - // Poststart is a list of hooks to be run after the container process is started. - // It is called in the Runtime Namespace - Poststart []Hook `json:"poststart,omitempty"` - // Poststop is a list of hooks to be run after the container process exits. - // It is called in the Runtime Namespace - Poststop []Hook `json:"poststop,omitempty"` -} - -// Linux contains platform-specific configuration for Linux based containers. -type Linux struct { - // UIDMapping specifies user mappings for supporting user namespaces. - UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty"` - // GIDMapping specifies group mappings for supporting user namespaces. - GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty"` - // Sysctl are a set of key value pairs that are set for the container on start - Sysctl map[string]string `json:"sysctl,omitempty"` - // Resources contain cgroup information for handling resource constraints - // for the container - Resources *LinuxResources `json:"resources,omitempty"` - // CgroupsPath specifies the path to cgroups that are created and/or joined by the container. - // The path is expected to be relative to the cgroups mountpoint. - // If resources are specified, the cgroups at CgroupsPath will be updated based on resources. - CgroupsPath string `json:"cgroupsPath,omitempty"` - // Namespaces contains the namespaces that are created and/or joined by the container - Namespaces []LinuxNamespace `json:"namespaces,omitempty"` - // Devices are a list of device nodes that are created for the container - Devices []LinuxDevice `json:"devices,omitempty"` - // NetDevices are key-value pairs, keyed by network device name on the host, moved to the container's network namespace. - NetDevices map[string]LinuxNetDevice `json:"netDevices,omitempty"` - // Seccomp specifies the seccomp security settings for the container. - Seccomp *LinuxSeccomp `json:"seccomp,omitempty"` - // RootfsPropagation is the rootfs mount propagation mode for the container. - RootfsPropagation string `json:"rootfsPropagation,omitempty"` - // MaskedPaths masks over the provided paths inside the container. - MaskedPaths []string `json:"maskedPaths,omitempty"` - // ReadonlyPaths sets the provided paths as RO inside the container. - ReadonlyPaths []string `json:"readonlyPaths,omitempty"` - // MountLabel specifies the selinux context for the mounts in the container. - MountLabel string `json:"mountLabel,omitempty"` - // IntelRdt contains Intel Resource Director Technology (RDT) information for - // handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container - IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` - // MemoryPolicy contains NUMA memory policy for the container. - MemoryPolicy *LinuxMemoryPolicy `json:"memoryPolicy,omitempty"` - // Personality contains configuration for the Linux personality syscall - Personality *LinuxPersonality `json:"personality,omitempty"` - // TimeOffsets specifies the offset for supporting time namespaces. - TimeOffsets map[string]LinuxTimeOffset `json:"timeOffsets,omitempty"` -} - -// LinuxNamespace is the configuration for a Linux namespace -type LinuxNamespace struct { - // Type is the type of namespace - Type LinuxNamespaceType `json:"type"` - // Path is a path to an existing namespace persisted on disk that can be joined - // and is of the same type - Path string `json:"path,omitempty"` -} - -// LinuxNamespaceType is one of the Linux namespaces -type LinuxNamespaceType string - -const ( - // PIDNamespace for isolating process IDs - PIDNamespace LinuxNamespaceType = "pid" - // NetworkNamespace for isolating network devices, stacks, ports, etc - NetworkNamespace LinuxNamespaceType = "network" - // MountNamespace for isolating mount points - MountNamespace LinuxNamespaceType = "mount" - // IPCNamespace for isolating System V IPC, POSIX message queues - IPCNamespace LinuxNamespaceType = "ipc" - // UTSNamespace for isolating hostname and NIS domain name - UTSNamespace LinuxNamespaceType = "uts" - // UserNamespace for isolating user and group IDs - UserNamespace LinuxNamespaceType = "user" - // CgroupNamespace for isolating cgroup hierarchies - CgroupNamespace LinuxNamespaceType = "cgroup" - // TimeNamespace for isolating the clocks - TimeNamespace LinuxNamespaceType = "time" -) - -// LinuxIDMapping specifies UID/GID mappings -type LinuxIDMapping struct { - // ContainerID is the starting UID/GID in the container - ContainerID uint32 `json:"containerID"` - // HostID is the starting UID/GID on the host to be mapped to 'ContainerID' - HostID uint32 `json:"hostID"` - // Size is the number of IDs to be mapped - Size uint32 `json:"size"` -} - -// LinuxTimeOffset specifies the offset for Time Namespace -type LinuxTimeOffset struct { - // Secs is the offset of clock (in secs) in the container - Secs int64 `json:"secs,omitempty"` - // Nanosecs is the additional offset for Secs (in nanosecs) - Nanosecs uint32 `json:"nanosecs,omitempty"` -} - -// POSIXRlimit type and restrictions -type POSIXRlimit struct { - // Type of the rlimit to set - Type string `json:"type"` - // Hard is the hard limit for the specified type - Hard uint64 `json:"hard"` - // Soft is the soft limit for the specified type - Soft uint64 `json:"soft"` -} - -// LinuxHugepageLimit structure corresponds to limiting kernel hugepages. -// Default to reservation limits if supported. Otherwise fallback to page fault limits. -type LinuxHugepageLimit struct { - // Pagesize is the hugepage size. - // Format: "B' (e.g. 64KB, 2MB, 1GB, etc.). - Pagesize string `json:"pageSize"` - // Limit is the limit of "hugepagesize" hugetlb reservations (if supported) or usage. - Limit uint64 `json:"limit"` -} - -// LinuxInterfacePriority for network interfaces -type LinuxInterfacePriority struct { - // Name is the name of the network interface - Name string `json:"name"` - // Priority for the interface - Priority uint32 `json:"priority"` -} - -// LinuxBlockIODevice holds major:minor format supported in blkio cgroup -type LinuxBlockIODevice struct { - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` -} - -// LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice -type LinuxWeightDevice struct { - LinuxBlockIODevice - // Weight is the bandwidth rate for the device. - Weight *uint16 `json:"weight,omitempty"` - // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only - LeafWeight *uint16 `json:"leafWeight,omitempty"` -} - -// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair -type LinuxThrottleDevice struct { - LinuxBlockIODevice - // Rate is the IO rate limit per cgroup per device - Rate uint64 `json:"rate"` -} - -// LinuxBlockIO for Linux cgroup 'blkio' resource management -type LinuxBlockIO struct { - // Specifies per cgroup weight - Weight *uint16 `json:"weight,omitempty"` - // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only - LeafWeight *uint16 `json:"leafWeight,omitempty"` - // Weight per cgroup per device, can override BlkioWeight - WeightDevice []LinuxWeightDevice `json:"weightDevice,omitempty"` - // IO read rate limit per cgroup per device, bytes per second - ThrottleReadBpsDevice []LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"` - // IO write rate limit per cgroup per device, bytes per second - ThrottleWriteBpsDevice []LinuxThrottleDevice `json:"throttleWriteBpsDevice,omitempty"` - // IO read rate limit per cgroup per device, IO per second - ThrottleReadIOPSDevice []LinuxThrottleDevice `json:"throttleReadIOPSDevice,omitempty"` - // IO write rate limit per cgroup per device, IO per second - ThrottleWriteIOPSDevice []LinuxThrottleDevice `json:"throttleWriteIOPSDevice,omitempty"` -} - -// LinuxMemory for Linux cgroup 'memory' resource management -type LinuxMemory struct { - // Memory limit (in bytes). - Limit *int64 `json:"limit,omitempty"` - // Memory reservation or soft_limit (in bytes). - Reservation *int64 `json:"reservation,omitempty"` - // Total memory limit (memory + swap). - Swap *int64 `json:"swap,omitempty"` - // Kernel memory limit (in bytes). - // - // Deprecated: kernel-memory limits are not supported in cgroups v2, and - // were obsoleted in [kernel v5.4]. This field should no longer be used, - // as it may be ignored by runtimes. - // - // [kernel v5.4]: https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0 - Kernel *int64 `json:"kernel,omitempty"` - // Kernel memory limit for tcp (in bytes) - KernelTCP *int64 `json:"kernelTCP,omitempty"` - // How aggressive the kernel will swap memory pages. - Swappiness *uint64 `json:"swappiness,omitempty"` - // DisableOOMKiller disables the OOM killer for out of memory conditions - DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` - // Enables hierarchical memory accounting - UseHierarchy *bool `json:"useHierarchy,omitempty"` - // CheckBeforeUpdate enables checking if a new memory limit is lower - // than the current usage during update, and if so, rejecting the new - // limit. - CheckBeforeUpdate *bool `json:"checkBeforeUpdate,omitempty"` -} - -// LinuxCPU for Linux cgroup 'cpu' resource management -type LinuxCPU struct { - // CPU shares (relative weight (ratio) vs. other cgroups with cpu shares). - Shares *uint64 `json:"shares,omitempty"` - // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - Quota *int64 `json:"quota,omitempty"` - // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a - // given period. - Burst *uint64 `json:"burst,omitempty"` - // CPU period to be used for hardcapping (in usecs). - Period *uint64 `json:"period,omitempty"` - // How much time realtime scheduling may use (in usecs). - RealtimeRuntime *int64 `json:"realtimeRuntime,omitempty"` - // CPU period to be used for realtime scheduling (in usecs). - RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"` - // CPUs to use within the cpuset. Default is to use any CPU available. - Cpus string `json:"cpus,omitempty"` - // List of memory nodes in the cpuset. Default is to use any available memory node. - Mems string `json:"mems,omitempty"` - // cgroups are configured with minimum weight, 0: default behavior, 1: SCHED_IDLE. - Idle *int64 `json:"idle,omitempty"` -} - -// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) -type LinuxPids struct { - // Maximum number of PIDs. Default is "no limit". - Limit *int64 `json:"limit,omitempty"` -} - -// LinuxNetwork identification and priority configuration -type LinuxNetwork struct { - // Set class identifier for container's network packets - ClassID *uint32 `json:"classID,omitempty"` - // Set priority of network traffic for container - Priorities []LinuxInterfacePriority `json:"priorities,omitempty"` -} - -// LinuxRdma for Linux cgroup 'rdma' resource management (Linux 4.11) -type LinuxRdma struct { - // Maximum number of HCA handles that can be opened. Default is "no limit". - HcaHandles *uint32 `json:"hcaHandles,omitempty"` - // Maximum number of HCA objects that can be created. Default is "no limit". - HcaObjects *uint32 `json:"hcaObjects,omitempty"` -} - -// LinuxResources has container runtime resource constraints -type LinuxResources struct { - // Devices configures the device allowlist. - Devices []LinuxDeviceCgroup `json:"devices,omitempty"` - // Memory restriction configuration - Memory *LinuxMemory `json:"memory,omitempty"` - // CPU resource restriction configuration - CPU *LinuxCPU `json:"cpu,omitempty"` - // Task resource restriction configuration. - Pids *LinuxPids `json:"pids,omitempty"` - // BlockIO restriction configuration - BlockIO *LinuxBlockIO `json:"blockIO,omitempty"` - // Hugetlb limits (in bytes). Default to reservation limits if supported. - HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"` - // Network restriction configuration - Network *LinuxNetwork `json:"network,omitempty"` - // Rdma resource restriction configuration. - // Limits are a set of key value pairs that define RDMA resource limits, - // where the key is device name and value is resource limits. - Rdma map[string]LinuxRdma `json:"rdma,omitempty"` - // Unified resources. - Unified map[string]string `json:"unified,omitempty"` -} - -// LinuxDevice represents the mknod information for a Linux special device file -type LinuxDevice struct { - // Path to the device. - Path string `json:"path"` - // Device type, block, char, etc. - Type string `json:"type"` - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` - // FileMode permission bits for the device. - FileMode *os.FileMode `json:"fileMode,omitempty"` - // UID of the device. - UID *uint32 `json:"uid,omitempty"` - // Gid of the device. - GID *uint32 `json:"gid,omitempty"` -} - -// LinuxNetDevice represents a single network device to be added to the container's network namespace -type LinuxNetDevice struct { - // Name of the device in the container namespace - Name string `json:"name,omitempty"` -} - -// LinuxDeviceCgroup represents a device rule for the devices specified to -// the device controller -type LinuxDeviceCgroup struct { - // Allow or deny - Allow bool `json:"allow"` - // Device type, block, char, etc. - Type string `json:"type,omitempty"` - // Major is the device's major number. - Major *int64 `json:"major,omitempty"` - // Minor is the device's minor number. - Minor *int64 `json:"minor,omitempty"` - // Cgroup access permissions format, rwm. - Access string `json:"access,omitempty"` -} - -// LinuxPersonalityDomain refers to a personality domain. -type LinuxPersonalityDomain string - -// LinuxPersonalityFlag refers to an additional personality flag. None are currently defined. -type LinuxPersonalityFlag string - -// Define domain and flags for Personality -const ( - // PerLinux is the standard Linux personality - PerLinux LinuxPersonalityDomain = "LINUX" - // PerLinux32 sets personality to 32 bit - PerLinux32 LinuxPersonalityDomain = "LINUX32" -) - -// LinuxPersonality represents the Linux personality syscall input -type LinuxPersonality struct { - // Domain for the personality - Domain LinuxPersonalityDomain `json:"domain"` - // Additional flags - Flags []LinuxPersonalityFlag `json:"flags,omitempty"` -} - -// Solaris contains platform-specific configuration for Solaris application containers. -type Solaris struct { - // SMF FMRI which should go "online" before we start the container process. - Milestone string `json:"milestone,omitempty"` - // Maximum set of privileges any process in this container can obtain. - LimitPriv string `json:"limitpriv,omitempty"` - // The maximum amount of shared memory allowed for this container. - MaxShmMemory string `json:"maxShmMemory,omitempty"` - // Specification for automatic creation of network resources for this container. - Anet []SolarisAnet `json:"anet,omitempty"` - // Set limit on the amount of CPU time that can be used by container. - CappedCPU *SolarisCappedCPU `json:"cappedCPU,omitempty"` - // The physical and swap caps on the memory that can be used by this container. - CappedMemory *SolarisCappedMemory `json:"cappedMemory,omitempty"` -} - -// SolarisCappedCPU allows users to set limit on the amount of CPU time that can be used by container. -type SolarisCappedCPU struct { - Ncpus string `json:"ncpus,omitempty"` -} - -// SolarisCappedMemory allows users to set the physical and swap caps on the memory that can be used by this container. -type SolarisCappedMemory struct { - Physical string `json:"physical,omitempty"` - Swap string `json:"swap,omitempty"` -} - -// SolarisAnet provides the specification for automatic creation of network resources for this container. -type SolarisAnet struct { - // Specify a name for the automatically created VNIC datalink. - Linkname string `json:"linkname,omitempty"` - // Specify the link over which the VNIC will be created. - Lowerlink string `json:"lowerLink,omitempty"` - // The set of IP addresses that the container can use. - Allowedaddr string `json:"allowedAddress,omitempty"` - // Specifies whether allowedAddress limitation is to be applied to the VNIC. - Configallowedaddr string `json:"configureAllowedAddress,omitempty"` - // The value of the optional default router. - Defrouter string `json:"defrouter,omitempty"` - // Enable one or more types of link protection. - Linkprotection string `json:"linkProtection,omitempty"` - // Set the VNIC's macAddress - Macaddress string `json:"macAddress,omitempty"` -} - -// Windows defines the runtime configuration for Windows based containers, including Hyper-V containers. -type Windows struct { - // LayerFolders contains a list of absolute paths to directories containing image layers. - LayerFolders []string `json:"layerFolders"` - // Devices are the list of devices to be mapped into the container. - Devices []WindowsDevice `json:"devices,omitempty"` - // Resources contains information for handling resource constraints for the container. - Resources *WindowsResources `json:"resources,omitempty"` - // CredentialSpec contains a JSON object describing a group Managed Service Account (gMSA) specification. - CredentialSpec interface{} `json:"credentialSpec,omitempty"` - // Servicing indicates if the container is being started in a mode to apply a Windows Update servicing operation. - Servicing bool `json:"servicing,omitempty"` - // IgnoreFlushesDuringBoot indicates if the container is being started in a mode where disk writes are not flushed during its boot process. - IgnoreFlushesDuringBoot bool `json:"ignoreFlushesDuringBoot,omitempty"` - // HyperV contains information for running a container with Hyper-V isolation. - HyperV *WindowsHyperV `json:"hyperv,omitempty"` - // Network restriction configuration. - Network *WindowsNetwork `json:"network,omitempty"` -} - -// WindowsDevice represents information about a host device to be mapped into the container. -type WindowsDevice struct { - // Device identifier: interface class GUID, etc. - ID string `json:"id"` - // Device identifier type: "class", etc. - IDType string `json:"idType"` -} - -// WindowsResources has container runtime resource constraints for containers running on Windows. -type WindowsResources struct { - // Memory restriction configuration. - Memory *WindowsMemoryResources `json:"memory,omitempty"` - // CPU resource restriction configuration. - CPU *WindowsCPUResources `json:"cpu,omitempty"` - // Storage restriction configuration. - Storage *WindowsStorageResources `json:"storage,omitempty"` -} - -// WindowsMemoryResources contains memory resource management settings. -type WindowsMemoryResources struct { - // Memory limit in bytes. - Limit *uint64 `json:"limit,omitempty"` -} - -// WindowsCPUResources contains CPU resource management settings. -type WindowsCPUResources struct { - // Count is the number of CPUs available to the container. It represents the - // fraction of the configured processor `count` in a container in relation - // to the processors available in the host. The fraction ultimately - // determines the portion of processor cycles that the threads in a - // container can use during each scheduling interval, as the number of - // cycles per 10,000 cycles. - Count *uint64 `json:"count,omitempty"` - // Shares limits the share of processor time given to the container relative - // to other workloads on the processor. The processor `shares` (`weight` at - // the platform level) is a value between 0 and 10000. - Shares *uint16 `json:"shares,omitempty"` - // Maximum determines the portion of processor cycles that the threads in a - // container can use during each scheduling interval, as the number of - // cycles per 10,000 cycles. Set processor `maximum` to a percentage times - // 100. - Maximum *uint16 `json:"maximum,omitempty"` - // Set of CPUs to affinitize for this container. - Affinity []WindowsCPUGroupAffinity `json:"affinity,omitempty"` -} - -// Similar to _GROUP_AFFINITY struct defined in -// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/miniport/ns-miniport-_group_affinity -type WindowsCPUGroupAffinity struct { - // CPU mask relative to this CPU group. - Mask uint64 `json:"mask,omitempty"` - // Processor group the mask refers to, as returned by GetLogicalProcessorInformationEx. - Group uint32 `json:"group,omitempty"` -} - -// WindowsStorageResources contains storage resource management settings. -type WindowsStorageResources struct { - // Specifies maximum Iops for the system drive. - Iops *uint64 `json:"iops,omitempty"` - // Specifies maximum bytes per second for the system drive. - Bps *uint64 `json:"bps,omitempty"` - // Sandbox size specifies the minimum size of the system drive in bytes. - SandboxSize *uint64 `json:"sandboxSize,omitempty"` -} - -// WindowsNetwork contains network settings for Windows containers. -type WindowsNetwork struct { - // List of HNS endpoints that the container should connect to. - EndpointList []string `json:"endpointList,omitempty"` - // Specifies if unqualified DNS name resolution is allowed. - AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"` - // Comma separated list of DNS suffixes to use for name resolution. - DNSSearchList []string `json:"DNSSearchList,omitempty"` - // Name (ID) of the container that we will share with the network stack. - NetworkSharedContainerName string `json:"networkSharedContainerName,omitempty"` - // name (ID) of the network namespace that will be used for the container. - NetworkNamespace string `json:"networkNamespace,omitempty"` -} - -// WindowsHyperV contains information for configuring a container to run with Hyper-V isolation. -type WindowsHyperV struct { - // UtilityVMPath is an optional path to the image used for the Utility VM. - UtilityVMPath string `json:"utilityVMPath,omitempty"` -} - -// IOMems contains information about iomem addresses that should be passed to the VM. -type IOMems struct { - // Guest Frame Number to map the iomem range. If GFN is not specified, the mapping will be done to the same Frame Number as was provided in FirstMFN. - FirstGFN *uint64 `json:"firstGFN,omitempty"` - // Physical page number of iomem regions. - FirstMFN *uint64 `json:"firstMFN"` - // Number of pages to be mapped. - NrMFNs *uint64 `json:"nrMFNs"` -} - -// Hardware configuration for the VM image -type HWConfig struct { - // Path to the container device-tree file that should be passed to the VM configuration. - DeviceTree string `json:"deviceTree,omitempty"` - // Number of virtual cpus for the VM. - VCPUs *uint32 `json:"vcpus,omitempty"` - // Maximum memory in bytes allocated to the VM. - Memory *uint64 `json:"memory,omitempty"` - // Host device tree nodes to passthrough to the VM. - DtDevs []string `json:"dtdevs,omitempty"` - // Allow auto-translated domains to access specific hardware I/O memory pages. - IOMems []IOMems `json:"iomems,omitempty"` - // Allows VM to access specific physical IRQs. - Irqs []uint32 `json:"irqs,omitempty"` -} - -// VM contains information for virtual-machine-based containers. -type VM struct { - // Hypervisor specifies hypervisor-related configuration for virtual-machine-based containers. - Hypervisor VMHypervisor `json:"hypervisor,omitempty"` - // Kernel specifies kernel-related configuration for virtual-machine-based containers. - Kernel VMKernel `json:"kernel"` - // Image specifies guest image related configuration for virtual-machine-based containers. - Image VMImage `json:"image,omitempty"` - // Hardware configuration that should be passed to the VM. - HwConfig *HWConfig `json:"hwconfig,omitempty"` -} - -// VMHypervisor contains information about the hypervisor to use for a virtual machine. -type VMHypervisor struct { - // Path is the host path to the hypervisor used to manage the virtual machine. - Path string `json:"path"` - // Parameters specifies parameters to pass to the hypervisor. - Parameters []string `json:"parameters,omitempty"` -} - -// VMKernel contains information about the kernel to use for a virtual machine. -type VMKernel struct { - // Path is the host path to the kernel used to boot the virtual machine. - Path string `json:"path"` - // Parameters specifies parameters to pass to the kernel. - Parameters []string `json:"parameters,omitempty"` - // InitRD is the host path to an initial ramdisk to be used by the kernel. - InitRD string `json:"initrd,omitempty"` -} - -// VMImage contains information about the virtual machine root image. -type VMImage struct { - // Path is the host path to the root image that the VM kernel would boot into. - Path string `json:"path"` - // Format is the root image format type (e.g. "qcow2", "raw", "vhd", etc). - Format string `json:"format"` -} - -// LinuxSeccomp represents syscall restrictions -type LinuxSeccomp struct { - DefaultAction LinuxSeccompAction `json:"defaultAction"` - DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"` - Architectures []Arch `json:"architectures,omitempty"` - Flags []LinuxSeccompFlag `json:"flags,omitempty"` - ListenerPath string `json:"listenerPath,omitempty"` - ListenerMetadata string `json:"listenerMetadata,omitempty"` - Syscalls []LinuxSyscall `json:"syscalls,omitempty"` -} - -// Arch used for additional architectures -type Arch string - -// LinuxSeccompFlag is a flag to pass to seccomp(2). -type LinuxSeccompFlag string - -const ( - // LinuxSeccompFlagLog is a seccomp flag to request all returned - // actions except SECCOMP_RET_ALLOW to be logged. An administrator may - // override this filter flag by preventing specific actions from being - // logged via the /proc/sys/kernel/seccomp/actions_logged file. (since - // Linux 4.14) - LinuxSeccompFlagLog LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_LOG" - - // LinuxSeccompFlagSpecAllow can be used to disable Speculative Store - // Bypass mitigation. (since Linux 4.17) - LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW" - - // LinuxSeccompFlagWaitKillableRecv can be used to switch to the wait - // killable semantics. (since Linux 5.19) - LinuxSeccompFlagWaitKillableRecv LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV" -) - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" - ArchPPC Arch = "SCMP_ARCH_PPC" - ArchPPC64 Arch = "SCMP_ARCH_PPC64" - ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" - ArchS390 Arch = "SCMP_ARCH_S390" - ArchS390X Arch = "SCMP_ARCH_S390X" - ArchPARISC Arch = "SCMP_ARCH_PARISC" - ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" - ArchRISCV64 Arch = "SCMP_ARCH_RISCV64" - ArchLOONGARCH64 Arch = "SCMP_ARCH_LOONGARCH64" - ArchM68K Arch = "SCMP_ARCH_M68K" - ArchSH Arch = "SCMP_ARCH_SH" - ArchSHEB Arch = "SCMP_ARCH_SHEB" -) - -// LinuxSeccompAction taken upon Seccomp rule match -type LinuxSeccompAction string - -// Define actions for Seccomp rules -const ( - ActKill LinuxSeccompAction = "SCMP_ACT_KILL" - ActKillProcess LinuxSeccompAction = "SCMP_ACT_KILL_PROCESS" - ActKillThread LinuxSeccompAction = "SCMP_ACT_KILL_THREAD" - ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP" - ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO" - ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE" - ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW" - ActLog LinuxSeccompAction = "SCMP_ACT_LOG" - ActNotify LinuxSeccompAction = "SCMP_ACT_NOTIFY" -) - -// LinuxSeccompOperator used to match syscall arguments in Seccomp -type LinuxSeccompOperator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual LinuxSeccompOperator = "SCMP_CMP_NE" - OpLessThan LinuxSeccompOperator = "SCMP_CMP_LT" - OpLessEqual LinuxSeccompOperator = "SCMP_CMP_LE" - OpEqualTo LinuxSeccompOperator = "SCMP_CMP_EQ" - OpGreaterEqual LinuxSeccompOperator = "SCMP_CMP_GE" - OpGreaterThan LinuxSeccompOperator = "SCMP_CMP_GT" - OpMaskedEqual LinuxSeccompOperator = "SCMP_CMP_MASKED_EQ" -) - -// LinuxSeccompArg used for matching specific syscall arguments in Seccomp -type LinuxSeccompArg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo,omitempty"` - Op LinuxSeccompOperator `json:"op"` -} - -// LinuxSyscall is used to match a syscall in Seccomp -type LinuxSyscall struct { - Names []string `json:"names"` - Action LinuxSeccompAction `json:"action"` - ErrnoRet *uint `json:"errnoRet,omitempty"` - Args []LinuxSeccompArg `json:"args,omitempty"` -} - -// LinuxIntelRdt has container runtime resource constraints for Intel RDT CAT and MBA -// features and flags enabling Intel RDT CMT and MBM features. -// Intel RDT features are available in Linux 4.14 and newer kernel versions. -type LinuxIntelRdt struct { - // The identity for RDT Class of Service - ClosID string `json:"closID,omitempty"` - - // Schemata specifies the complete schemata to be written as is to the - // schemata file in resctrl fs. Each element represents a single line in the schemata file. - // NOTE: This will overwrite schemas specified in the L3CacheSchema and/or - // MemBwSchema fields. - Schemata []string `json:"schemata,omitempty"` - - // The schema for L3 cache id and capacity bitmask (CBM) - // Format: "L3:=;=;..." - // NOTE: Should not be specified if Schemata is non-empty. - L3CacheSchema string `json:"l3CacheSchema,omitempty"` - - // The schema of memory bandwidth per L3 cache id - // Format: "MB:=bandwidth0;=bandwidth1;..." - // The unit of memory bandwidth is specified in "percentages" by - // default, and in "MBps" if MBA Software Controller is enabled. - // NOTE: Should not be specified if Schemata is non-empty. - MemBwSchema string `json:"memBwSchema,omitempty"` - - // EnableMonitoring enables resctrl monitoring for the container. This will - // create a dedicated resctrl monitoring group for the container. - EnableMonitoring bool `json:"enableMonitoring,omitempty"` -} - -// LinuxMemoryPolicy represents input for the set_mempolicy syscall. -type LinuxMemoryPolicy struct { - // Mode for the set_mempolicy syscall. - Mode MemoryPolicyModeType `json:"mode"` - - // Nodes representing the nodemask for the set_mempolicy syscall in comma separated ranges format. - // Format: "-,,-,..." - Nodes string `json:"nodes"` - - // Flags for the set_mempolicy syscall. - Flags []MemoryPolicyFlagType `json:"flags,omitempty"` -} - -// ZOS contains platform-specific configuration for z/OS based containers. -type ZOS struct { - // Namespaces contains the namespaces that are created and/or joined by the container - Namespaces []ZOSNamespace `json:"namespaces,omitempty"` -} - -// ZOSNamespace is the configuration for a z/OS namespace -type ZOSNamespace struct { - // Type is the type of namespace - Type ZOSNamespaceType `json:"type"` - // Path is a path to an existing namespace persisted on disk that can be joined - // and is of the same type - Path string `json:"path,omitempty"` -} - -// ZOSNamespaceType is one of the z/OS namespaces -type ZOSNamespaceType string - -const ( - // PIDNamespace for isolating process IDs - ZOSPIDNamespace ZOSNamespaceType = "pid" - // MountNamespace for isolating mount points - ZOSMountNamespace ZOSNamespaceType = "mount" - // IPCNamespace for isolating System V IPC, POSIX message queues - ZOSIPCNamespace ZOSNamespaceType = "ipc" - // UTSNamespace for isolating hostname and NIS domain name - ZOSUTSNamespace ZOSNamespaceType = "uts" -) - -type MemoryPolicyModeType string - -const ( - MpolDefault MemoryPolicyModeType = "MPOL_DEFAULT" - MpolBind MemoryPolicyModeType = "MPOL_BIND" - MpolInterleave MemoryPolicyModeType = "MPOL_INTERLEAVE" - MpolWeightedInterleave MemoryPolicyModeType = "MPOL_WEIGHTED_INTERLEAVE" - MpolPreferred MemoryPolicyModeType = "MPOL_PREFERRED" - MpolPreferredMany MemoryPolicyModeType = "MPOL_PREFERRED_MANY" - MpolLocal MemoryPolicyModeType = "MPOL_LOCAL" -) - -type MemoryPolicyFlagType string - -const ( - MpolFNumaBalancing MemoryPolicyFlagType = "MPOL_F_NUMA_BALANCING" - MpolFRelativeNodes MemoryPolicyFlagType = "MPOL_F_RELATIVE_NODES" - MpolFStaticNodes MemoryPolicyFlagType = "MPOL_F_STATIC_NODES" -) - -// LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler -type LinuxSchedulerPolicy string - -const ( - // SchedOther is the default scheduling policy - SchedOther LinuxSchedulerPolicy = "SCHED_OTHER" - // SchedFIFO is the First-In-First-Out scheduling policy - SchedFIFO LinuxSchedulerPolicy = "SCHED_FIFO" - // SchedRR is the Round-Robin scheduling policy - SchedRR LinuxSchedulerPolicy = "SCHED_RR" - // SchedBatch is the Batch scheduling policy - SchedBatch LinuxSchedulerPolicy = "SCHED_BATCH" - // SchedISO is the Isolation scheduling policy - SchedISO LinuxSchedulerPolicy = "SCHED_ISO" - // SchedIdle is the Idle scheduling policy - SchedIdle LinuxSchedulerPolicy = "SCHED_IDLE" - // SchedDeadline is the Deadline scheduling policy - SchedDeadline LinuxSchedulerPolicy = "SCHED_DEADLINE" -) - -// LinuxSchedulerFlag represents the flags used by the Linux Scheduler. -type LinuxSchedulerFlag string - -const ( - // SchedFlagResetOnFork represents the reset on fork scheduling flag - SchedFlagResetOnFork LinuxSchedulerFlag = "SCHED_FLAG_RESET_ON_FORK" - // SchedFlagReclaim represents the reclaim scheduling flag - SchedFlagReclaim LinuxSchedulerFlag = "SCHED_FLAG_RECLAIM" - // SchedFlagDLOverrun represents the deadline overrun scheduling flag - SchedFlagDLOverrun LinuxSchedulerFlag = "SCHED_FLAG_DL_OVERRUN" - // SchedFlagKeepPolicy represents the keep policy scheduling flag - SchedFlagKeepPolicy LinuxSchedulerFlag = "SCHED_FLAG_KEEP_POLICY" - // SchedFlagKeepParams represents the keep parameters scheduling flag - SchedFlagKeepParams LinuxSchedulerFlag = "SCHED_FLAG_KEEP_PARAMS" - // SchedFlagUtilClampMin represents the utilization clamp minimum scheduling flag - SchedFlagUtilClampMin LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MIN" - // SchedFlagUtilClampMin represents the utilization clamp maximum scheduling flag - SchedFlagUtilClampMax LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MAX" -) - -// FreeBSD contains platform-specific configuration for FreeBSD based containers. -type FreeBSD struct { - // Devices which are accessible in the container - Devices []FreeBSDDevice `json:"devices,omitempty"` - // Jail definition for this container - Jail *FreeBSDJail `json:"jail,omitempty"` -} - -type FreeBSDDevice struct { - // Path to the device, relative to /dev. - Path string `json:"path"` - // FileMode permission bits for the device. - Mode *os.FileMode `json:"mode,omitempty"` -} - -// FreeBSDJail describes how to configure the container's jail -type FreeBSDJail struct { - // Parent jail name - this can be used to share a single vnet - // across several containers - Parent string `json:"parent,omitempty"` - // Whether to use parent UTS names or override in the container - Host FreeBSDSharing `json:"host,omitempty"` - // IPv4 address sharing for the container - Ip4 FreeBSDSharing `json:"ip4,omitempty"` - // IPv4 addresses for the container - Ip4Addr []string `json:"ip4Addr,omitempty"` - // IPv6 address sharing for the container - Ip6 FreeBSDSharing `json:"ip6,omitempty"` - // IPv6 addresses for the container - Ip6Addr []string `json:"ip6Addr,omitempty"` - // Which network stack to use for the container - Vnet FreeBSDSharing `json:"vnet,omitempty"` - // If set, Ip4Addr and Ip6Addr addresses will be added to this interface - Interface string `json:"interface,omitempty"` - // List interfaces to be moved to the container's vnet - VnetInterfaces []string `json:"vnetInterfaces,omitempty"` - // SystemV IPC message sharing for the container - SysVMsg FreeBSDSharing `json:"sysvmsg,omitempty"` - // SystemV semaphore message sharing for the container - SysVSem FreeBSDSharing `json:"sysvsem,omitempty"` - // SystemV memory sharing for the container - SysVShm FreeBSDSharing `json:"sysvshm,omitempty"` - // Mount visibility (see jail(8) for details) - EnforceStatfs *int `json:"enforceStatfs,omitempty"` - // Jail capabilities - Allow *FreeBSDJailAllow `json:"allow,omitempty"` -} - -// These values are used to control access to features in the container, either -// disabling the feature, sharing state with the parent or creating new private -// state in the container. -type FreeBSDSharing string - -const ( - FreeBSDShareDisable FreeBSDSharing = "disable" - FreeBSDShareNew FreeBSDSharing = "new" - FreeBSDShareInherit FreeBSDSharing = "inherit" -) - -// FreeBSDJailAllow describes jail capabilities -type FreeBSDJailAllow struct { - SetHostname bool `json:"setHostname,omitempty"` - RawSockets bool `json:"rawSockets,omitempty"` - Chflags bool `json:"chflags,omitempty"` - Mount []string `json:"mount,omitempty"` - Quotas bool `json:"quotas,omitempty"` - SocketAf bool `json:"socketAf,omitempty"` - Mlock bool `json:"mlock,omitempty"` - ReservedPorts bool `json:"reservedPorts,omitempty"` - Suser bool `json:"suser,omitempty"` -} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go deleted file mode 100644 index 7c010d4fe..000000000 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go +++ /dev/null @@ -1,56 +0,0 @@ -package specs - -// ContainerState represents the state of a container. -type ContainerState string - -const ( - // StateCreating indicates that the container is being created - StateCreating ContainerState = "creating" - - // StateCreated indicates that the runtime has finished the create operation - StateCreated ContainerState = "created" - - // StateRunning indicates that the container process has executed the - // user-specified program but has not exited - StateRunning ContainerState = "running" - - // StateStopped indicates that the container process has exited - StateStopped ContainerState = "stopped" -) - -// State holds information about the runtime state of the container. -type State struct { - // Version is the version of the specification that is supported. - Version string `json:"ociVersion"` - // ID is the container ID - ID string `json:"id"` - // Status is the runtime status of the container. - Status ContainerState `json:"status"` - // Pid is the process ID for the container process. - Pid int `json:"pid,omitempty"` - // Bundle is the path to the container's bundle directory. - Bundle string `json:"bundle"` - // Annotations are key values associated with the container. - Annotations map[string]string `json:"annotations,omitempty"` -} - -const ( - // SeccompFdName is the name of the seccomp notify file descriptor. - SeccompFdName string = "seccompFd" -) - -// ContainerProcessState holds information about the state of a container process. -type ContainerProcessState struct { - // Version is the version of the specification that is supported. - Version string `json:"ociVersion"` - // Fds is a string array containing the names of the file descriptors passed. - // The index of the name in this array corresponds to index of the file - // descriptor in the `SCM_RIGHTS` array. - Fds []string `json:"fds"` - // Pid is the process ID as seen by the runtime. - Pid int `json:"pid"` - // Opaque metadata. - Metadata string `json:"metadata,omitempty"` - // State of the container. - State State `json:"state"` -} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go deleted file mode 100644 index 0257dba3e..000000000 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package specs - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 1 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 3 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/regclient/regclient/.dockerignore b/vendor/github.com/regclient/regclient/.dockerignore new file mode 100644 index 000000000..cbcf085eb --- /dev/null +++ b/vendor/github.com/regclient/regclient/.dockerignore @@ -0,0 +1,15 @@ +* +!.git/ +!build/root.tgz +!cmd/ +!config/ +!internal/ +!mod/ +!pkg/ +!regclient/ +!scheme/ +!types/ +!vendor/ +!go.* +!*.go +!Makefile \ No newline at end of file diff --git a/vendor/github.com/regclient/regclient/.gitignore b/vendor/github.com/regclient/regclient/.gitignore new file mode 100644 index 000000000..24207e4ef --- /dev/null +++ b/vendor/github.com/regclient/regclient/.gitignore @@ -0,0 +1,5 @@ +artifacts/ +bin/ +output/ +vendor/ +.regctl_conf_ci.json diff --git a/vendor/github.com/regclient/regclient/.markdownlint.yml b/vendor/github.com/regclient/regclient/.markdownlint.yml new file mode 100644 index 000000000..01f625f5c --- /dev/null +++ b/vendor/github.com/regclient/regclient/.markdownlint.yml @@ -0,0 +1,19 @@ +# all lists use a `-` +MD004: + style: dash + +# allow tabs in code blocks (for Go) +MD010: + code_blocks: false + +# disable line length, prefer one sentence per line for PRs +MD013: false + +# emphasis with underscore (`_emphasis_`) +MD049: + style: "underscore" + +# bold with asterisk (`**bold**`) +MD050: + style: "asterisk" + \ No newline at end of file diff --git a/vendor/github.com/regclient/regclient/.osv-scanner.toml b/vendor/github.com/regclient/regclient/.osv-scanner.toml new file mode 100644 index 000000000..69916f390 --- /dev/null +++ b/vendor/github.com/regclient/regclient/.osv-scanner.toml @@ -0,0 +1 @@ +GoVersionOverride = "1.26.0" diff --git a/vendor/github.com/regclient/regclient/.version-bump.lock b/vendor/github.com/regclient/regclient/.version-bump.lock new file mode 100644 index 000000000..a90946541 --- /dev/null +++ b/vendor/github.com/regclient/regclient/.version-bump.lock @@ -0,0 +1,53 @@ +{"name":"docker-arg-alpine-digest","key":"docker.io/library/alpine:3.23.3","version":"sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659"} +{"name":"docker-arg-alpine-tag","key":"docker.io/library/alpine","version":"3.23.3"} +{"name":"docker-arg-ecr","key":"https://github.com/awslabs/amazon-ecr-credential-helper.git","version":"v0.11.0"} +{"name":"docker-arg-gcr","key":"https://github.com/GoogleCloudPlatform/docker-credential-gcr.git","version":"v2.1.32"} +{"name":"docker-arg-go-digest","key":"docker.io/library/golang:1.26.0-alpine","version":"sha256:d4c4845f5d60c6a974c6000ce58ae079328d03ab7f721a0734277e69905473e5"} +{"name":"docker-arg-go-tag","key":"docker.io/library/golang","version":"1.26.0"} +{"name":"docker-arg-lunajson","key":"https://github.com/grafi-tt/lunajson.git:master","version":"e3a9666eb1275741e887e29926b144f8daee3bef"} +{"name":"docker-arg-semver","key":"https://github.com/kikito/semver.lua.git:master","version":"a4b708ba243208d46e575da870af969dca46a94d"} +{"name":"gha-alpine-digest","key":"docker.io/library/alpine:3.23.3","version":"sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659"} +{"name":"gha-alpine-tag-base","key":"docker.io/library/alpine","version":"3"} +{"name":"gha-alpine-tag-comment","key":"docker.io/library/alpine","version":"3.23.3"} +{"name":"gha-cosign-version","key":"https://github.com/sigstore/cosign.git","version":"v3.0.5"} +{"name":"gha-golang-matrix","key":"golang-matrix","version":"[\"1.25\", \"1.26\"]"} +{"name":"gha-golang-release","key":"golang-latest","version":"1.26"} +{"name":"gha-syft-version","key":"docker.io/anchore/syft","version":"v1.42.1"} +{"name":"gha-uses-commit","key":"https://github.com/actions/checkout.git:v6.0.2","version":"de0fac2e4500dabe0009e67214ff5f5447ce83dd"} +{"name":"gha-uses-commit","key":"https://github.com/actions/setup-go.git:v6.2.0","version":"7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5"} +{"name":"gha-uses-commit","key":"https://github.com/actions/stale.git:v10.2.0","version":"b5d41d4e1d5dceea10e7104786b73624c18a190f"} +{"name":"gha-uses-commit","key":"https://github.com/actions/upload-artifact.git:v6.0.0","version":"b7c566a772e6b6bfb58ed0dc250532a479d7789f"} +{"name":"gha-uses-commit","key":"https://github.com/anchore/sbom-action.git:v0.22.2","version":"28d71544de8eaf1b958d335707167c5f783590ad"} +{"name":"gha-uses-commit","key":"https://github.com/docker/build-push-action.git:v6.19.2","version":"10e90e3645eae34f1e60eeb005ba3a3d33f178e8"} +{"name":"gha-uses-commit","key":"https://github.com/docker/login-action.git:v3.7.0","version":"c94ce9fb468520275223c153574b00df6fe4bcc9"} +{"name":"gha-uses-commit","key":"https://github.com/docker/setup-buildx-action.git:v3.12.0","version":"8d2750c68a42422c14e847fe6c8ac0403b4cbd6f"} +{"name":"gha-uses-commit","key":"https://github.com/regclient/actions.git:main","version":"da9319db8e44e8b062b3a147e1dfb2f574d41a03"} +{"name":"gha-uses-commit","key":"https://github.com/sigstore/cosign-installer.git:v4.0.0","version":"faadad0cce49287aee09b3a48701e75088a2c6ad"} +{"name":"gha-uses-commit","key":"https://github.com/softprops/action-gh-release.git:v2.5.0","version":"a06a81a03ee405af7f2048a818ed3f03bbf83c7b"} +{"name":"gha-uses-semver","key":"https://github.com/actions/checkout.git","version":"v6.0.2"} +{"name":"gha-uses-semver","key":"https://github.com/actions/setup-go.git","version":"v6.2.0"} +{"name":"gha-uses-semver","key":"https://github.com/actions/stale.git","version":"v10.2.0"} +{"name":"gha-uses-semver","key":"https://github.com/actions/upload-artifact.git","version":"v6.0.0"} +{"name":"gha-uses-semver","key":"https://github.com/anchore/sbom-action.git","version":"v0.22.2"} +{"name":"gha-uses-semver","key":"https://github.com/docker/build-push-action.git","version":"v6.19.2"} +{"name":"gha-uses-semver","key":"https://github.com/docker/login-action.git","version":"v3.7.0"} +{"name":"gha-uses-semver","key":"https://github.com/docker/setup-buildx-action.git","version":"v3.12.0"} +{"name":"gha-uses-semver","key":"https://github.com/sigstore/cosign-installer.git","version":"v4.0.0"} +{"name":"gha-uses-semver","key":"https://github.com/softprops/action-gh-release.git","version":"v2.5.0"} +{"name":"go-mod-golang-release","key":"golang-oldest","version":"1.25.0"} +{"name":"makefile-ci-distribution","key":"docker.io/library/registry","version":"3.0.0"} +{"name":"makefile-ci-zot","key":"ghcr.io/project-zot/zot-linux-amd64","version":"v2.1.14"} +{"name":"makefile-go-vulncheck","key":"https://go.googlesource.com/vuln.git","version":"v1.1.4"} +{"name":"makefile-gofumpt","key":"https://github.com/mvdan/gofumpt.git","version":"v0.9.2"} +{"name":"makefile-gomajor","key":"https://github.com/icholy/gomajor.git","version":"v0.15.0"} +{"name":"makefile-gosec","key":"https://github.com/securego/gosec.git","version":"v2.23.0"} +{"name":"makefile-markdown-lint","key":"docker.io/davidanson/markdownlint-cli2","version":"v0.21.0"} +{"name":"makefile-osv-scanner","key":"https://github.com/google/osv-scanner.git","version":"v2.3.3"} +{"name":"makefile-staticcheck","key":"https://github.com/dominikh/go-tools.git","version":"v0.7.0"} +{"name":"makefile-syft-container-digest","key":"anchore/syft:v1.42.1","version":"sha256:392b65f29a410d2c1294d347bb3ad6f37608345ab6e7b43d2df03ea18bd6f5b0"} +{"name":"makefile-syft-container-tag","key":"anchore/syft","version":"v1.42.1"} +{"name":"makefile-syft-version","key":"docker.io/anchore/syft","version":"v1.42.1"} +{"name":"osv-golang-release","key":"docker.io/library/golang","version":"1.26.0"} +{"name":"shell-alpine-digest","key":"docker.io/library/alpine:3.23.3","version":"sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659"} +{"name":"shell-alpine-tag-base","key":"docker.io/library/alpine","version":"3"} +{"name":"shell-alpine-tag-comment","key":"docker.io/library/alpine","version":"3.23.3"} diff --git a/vendor/github.com/regclient/regclient/.version-bump.yml b/vendor/github.com/regclient/regclient/.version-bump.yml new file mode 100644 index 000000000..5e75d790d --- /dev/null +++ b/vendor/github.com/regclient/regclient/.version-bump.yml @@ -0,0 +1,346 @@ +files: + "build/Dockerfile*": + processors: + - docker-arg-alpine-tag + - docker-arg-alpine-digest + - docker-arg-go-tag + - docker-arg-go-digest + - docker-arg-ecr + - docker-arg-gcr + - docker-arg-lunajson + - docker-arg-semver + "build/oci-image.sh": + processors: + - shell-alpine-tag-base + - shell-alpine-tag-comment + - shell-alpine-digest + ".github/workflows/*.yml": + processors: + - gha-golang-matrix + - gha-golang-release + - gha-uses-vx + - gha-uses-semver + - gha-uses-commit + - gha-syft-version + - gha-cosign-version + - gha-alpine-tag-base + - gha-alpine-tag-comment + - gha-alpine-digest + "Makefile": + processors: + - makefile-gofumpt + - makefile-gomajor + - makefile-go-vulncheck + - makefile-markdown-lint + - makefile-gosec + - makefile-osv-scanner + - makefile-staticcheck + - makefile-syft-version + - makefile-syft-container-tag + - makefile-syft-container-digest + - makefile-ci-distribution + - makefile-ci-zot + "go.mod": + processors: + - go-mod-golang-release + ".osv-scanner.toml": + processors: + - osv-golang-release + +x-processor-tmpl: + git-commit: &git-commit + key: "{{ .SourceArgs.url }}:{{ .SourceArgs.ref }}" + scan: "regexp" + source: "git-commit" + filter: + expr: "^{{ .SourceArgs.ref }}$" + git-tag-semver: &git-tag-semver + key: "{{ .SourceArgs.url }}" + scan: "regexp" + source: "git-tag" + filter: + expr: '^v?\d+\.\d+\.\d+$' + sort: + method: "semver" + registry-digest: ®istry-digest + key: "{{ .SourceArgs.image }}" + scan: "regexp" + source: "registry-digest" + registry-tag-semver: ®istry-tag-semver + key: "{{ .SourceArgs.repo }}" + scan: "regexp" + source: "registry-tag" + filter: + expr: '^v?\d+\.\d+\.\d+$' + sort: + method: "semver" + +processors: + docker-arg-alpine-tag: + <<: *registry-tag-semver + scanArgs: + regexp: '^ARG ALPINE_VER=(?Pv?\d+\.\d+\.\d+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + docker-arg-alpine-digest: + <<: *registry-digest + scanArgs: + regexp: '^ARG ALPINE_VER=(?Pv?\d+\.\d+\.\d+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + image: "docker.io/library/alpine:{{.ScanMatch.Tag}}" + docker-arg-go-tag: + <<: *registry-tag-semver + scanArgs: + regexp: '^ARG GO_VER=(?P[a-z0-9\-\.]+)-alpine@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + repo: "docker.io/library/golang" + docker-arg-go-digest: + <<: *registry-digest + scanArgs: + regexp: '^ARG GO_VER=(?P[a-z0-9\-\.]+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + image: "docker.io/library/golang:{{.ScanMatch.Tag}}" + docker-arg-ecr: + <<: *git-tag-semver + scanArgs: + regexp: '^ARG ECR_HELPER_VER=(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + url: "https://github.com/awslabs/amazon-ecr-credential-helper.git" + # get the version for the ecr-login nested package in the repo + filter: + expr: '^ecr-login/v?\d+\.\d+\.\d+$' + # sort and output only the version number without the package name prefix + sort: + method: "semver" + template: '{{ index (split . "/") 1 }}' + template: '{{ index (split .Version "/") 1 }}' + docker-arg-gcr: + <<: *git-tag-semver + scanArgs: + regexp: '^ARG GCR_HELPER_VER=(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + url: "https://github.com/GoogleCloudPlatform/docker-credential-gcr.git" + docker-arg-lunajson: + <<: *git-commit + scanArgs: + regexp: '^ARG LUNAJSON_COMMIT=(?P[0-9a-f]+)\s*$' + sourceArgs: + url: "https://github.com/grafi-tt/lunajson.git" + ref: master + docker-arg-semver: + <<: *git-commit + scanArgs: + regexp: '^ARG SEMVER_COMMIT=(?P[0-9a-f]+)\s*$' + sourceArgs: + url: "https://github.com/kikito/semver.lua.git" + ref: master + + gha-alpine-digest: + <<: *registry-digest + scanArgs: + regexp: '^\s*ALPINE_DIGEST: "(?Psha256:[0-9a-f]+)"\s*#\s*(?P\d+\.\d+\.\d+)\s*$' + sourceArgs: + image: "docker.io/library/alpine:{{ .ScanMatch.Tag }}" + gha-alpine-tag-base: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*ALPINE_NAME: "alpine:(?Pv?\d+)"\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + # only return the major version number in the tag to support detecting a change in the base image + template: '{{ index ( split .Version "." ) 0 }}' + gha-alpine-tag-comment: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*ALPINE_DIGEST: "(?Psha256:[0-9a-f]+)"\s*#\s*(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + gha-cosign-version: + <<: *git-tag-semver + scanArgs: + regexp: '^\s*cosign-release: "(?Pv?[0-9\.]+)"\s*$' + sourceArgs: + url: "https://github.com/sigstore/cosign.git" + filter: + expr: '^v?3\.\d+\.\d+$' # pin to v3, v4 will remove support for older clients + gha-golang-matrix: + <<: *registry-tag-semver + key: "golang-matrix" + scanArgs: + regexp: '^\s*gover: (?P\[["0-9, \.]+\])\s*$' + sourceArgs: + repo: "docker.io/library/golang" + filter: + expr: '^v?\d+\.\d+$' + template: '["{{ index .VerMap ( index .VerList 1 ) }}", "{{ index .VerMap ( index .VerList 0 ) }}"]' + gha-golang-release: + <<: *registry-tag-semver + key: "golang-latest" + scanArgs: + regexp: '^\s*RELEASE_GO_VER: "(?Pv?[0-9\.]+)"\s*$' + sourceArgs: + repo: "docker.io/library/golang" + filter: + expr: '^v?\d+\.\d+$' + gha-syft-version: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*syft-version: "(?Pv?[0-9\.]+)"\s*$' + sourceArgs: + repo: "docker.io/anchore/syft" + gha-uses-vx: + <<: *git-tag-semver + scanArgs: + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?Pv?\d+)\s*$' + sourceArgs: + url: "https://github.com/{{ .ScanMatch.Repo }}.git" + filter: + expr: '^v?\d+$' + gha-uses-semver: + <<: *git-tag-semver + scanArgs: + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + url: "https://github.com/{{ .ScanMatch.Repo }}.git" + gha-uses-commit: + <<: *git-commit + scanArgs: + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?P[\w\d\.]+)\s*$' + sourceArgs: + url: "https://github.com/{{ .ScanMatch.Repo }}.git" + ref: "{{ .ScanMatch.Ref }}" + + go-mod-golang-release: + <<: *registry-tag-semver + key: "golang-oldest" + scanArgs: + regexp: '^go (?P[0-9\.]+)\s*$' + sourceArgs: + repo: "docker.io/library/golang" + filter: + expr: '^\d+\.\d+$' + template: '{{ index .VerMap ( index .VerList 1 ) }}.0' + + makefile-ci-distribution: + <<: *registry-tag-semver + scanArgs: + regexp: '^CI_DISTRIBUTION_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + repo: "docker.io/library/registry" + makefile-ci-zot: + <<: *registry-tag-semver + scanArgs: + regexp: '^CI_ZOT_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + repo: "ghcr.io/project-zot/zot-linux-amd64" + makefile-gofumpt: + <<: *git-tag-semver + scanArgs: + regexp: '^GOFUMPT_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/mvdan/gofumpt.git" + makefile-gomajor: + <<: *git-tag-semver + scanArgs: + regexp: '^GOMAJOR_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/icholy/gomajor.git" + makefile-gosec: + <<: *git-tag-semver + scanArgs: + regexp: '^GOSEC_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/securego/gosec.git" + makefile-go-vulncheck: + <<: *git-tag-semver + scanArgs: + regexp: '^GO_VULNCHECK_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://go.googlesource.com/vuln.git" + makefile-markdown-lint: + <<: *registry-tag-semver + scanArgs: + regexp: '^MARKDOWN_LINT_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + repo: "docker.io/davidanson/markdownlint-cli2" + makefile-osv-scanner: + <<: *git-tag-semver + scanArgs: + regexp: '^OSV_SCANNER_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/google/osv-scanner.git" + makefile-staticcheck: + <<: *git-tag-semver + scanArgs: + regexp: '^STATICCHECK_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/dominikh/go-tools.git" + filter: + # repo also has dated tags, ignore versions without a preceding "v" + expr: '^v\d+\.\d+\.\d+$' + makefile-syft-container-tag: + <<: *registry-tag-semver + scanArgs: + regexp: '^SYFT_CONTAINER\?=(?P[^:]*):(?Pv?[0-9\.]+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + repo: "{{ .ScanMatch.Repo }}" + makefile-syft-container-digest: + <<: *registry-digest + scanArgs: + regexp: '^SYFT_CONTAINER\?=(?P[^:]*):(?Pv?[0-9\.]+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + image: "{{ .ScanMatch.Image }}:{{.ScanMatch.Tag}}" + makefile-syft-version: + <<: *registry-tag-semver + scanArgs: + regexp: '^SYFT_VERSION\?=(?Pv[0-9\.]+)\s*$' + sourceArgs: + repo: "docker.io/anchore/syft" + + osv-golang-release: + <<: *registry-tag-semver + scanArgs: + regexp: '^GoVersionOverride = "(?Pv?[0-9\.]+)"\s*$' + sourceArgs: + repo: "docker.io/library/golang" + + shell-alpine-tag-base: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*ALPINE_NAME="alpine:(?Pv?\d+)"\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + # only return the major version number in the tag to support detecting a change in the base image + template: '{{ index ( split .Version "." ) 0 }}' + shell-alpine-tag-comment: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*ALPINE_DIGEST="(?Psha256:[0-9a-f]+)"\s*#\s*(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + shell-alpine-digest: + <<: *registry-digest + scanArgs: + regexp: '^\s*ALPINE_DIGEST="(?Psha256:[0-9a-f]+)"\s*#\s*(?P\d+\.\d+\.\d+)\s*$' + sourceArgs: + image: "docker.io/library/alpine:{{ .ScanMatch.Tag }}" + +scans: + regexp: + type: "regexp" + +sources: + git-commit: + type: "git" + args: + type: "commit" + git-tag: + type: "git" + args: + type: "tag" + registry-digest: + type: "registry" + registry-tag: + type: "registry" + args: + type: "tag" diff --git a/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md b/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..5498720f1 --- /dev/null +++ b/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md @@ -0,0 +1,134 @@ + +# Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +- The use of sexualized language or imagery, and sexual attention or advances of + any kind +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email address, + without their explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at + or slack (I'm found on the CNCF, Docker, OCI, and OpenSSF +slacks). +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/regclient/regclient/CONTRIBUTING.md b/vendor/github.com/regclient/regclient/CONTRIBUTING.md new file mode 100644 index 000000000..1aa54e9ff --- /dev/null +++ b/vendor/github.com/regclient/regclient/CONTRIBUTING.md @@ -0,0 +1,80 @@ +# Contributing + +## Reporting security issues + +Please see [SECURITY.md](security.md) for the process to report security issues. + +## Reporting other issues + +Please search for similar issues and if none are seen, report an issue at [github.com/regclient/regclient/issues](https://github.com/regclient/regclient/issues). + +## Code style + +This project attempts to follow these principles: + +- Code is canonical Go, following styles and patterns commonly used by the Go community. +- Dependencies outside of the Go standard library should be minimized. +- Dependencies should be pinned to a specific digest and tracked by Go or version-check. +- Unit tests are strongly encouraged with a focus on test coverage of the successful path and common errors. +- Linters and other style formatting tools are used, please run `make all` before committing any changes. + +## LLM Policy + +This project expects all contributions to be developed by a human or created with a reproducible tool. +Developers using an AI/LLM tool to generate their contribution are expected to fully understand the entire contribution and the logic behind its design. +Contributions that appear to have been generated by an AI/LLM without a human review may result in a ban from future contributions to the project. + +## Pull requests + +PRs are welcome following the below guides: + +- For anything beyond a minor fix, opening an issue is suggested to discuss possible solutions. +- Changes should be rebased on the main branch. +- Changes should be squashed to a single commit per logical change. + +All changes must be signed (`git commit -s`) to indicate you agree to the [Developer Certificate or Origin](https://developercertificate.org/): + +```text +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +The sign-off will include the following message in your commit: + +```text +Signed-off-by: Your Name +``` + +This needs to be your real name, no aliases please. diff --git a/vendor/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/regclient/regclient/LICENSE similarity index 99% rename from vendor/github.com/opencontainers/runtime-spec/LICENSE rename to vendor/github.com/regclient/regclient/LICENSE index bdc403653..6e12b19d0 100644 --- a/vendor/github.com/opencontainers/runtime-spec/LICENSE +++ b/vendor/github.com/regclient/regclient/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2015 The Linux Foundation. + Copyright 2020 The regclient Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/regclient/regclient/Makefile b/vendor/github.com/regclient/regclient/Makefile new file mode 100644 index 000000000..6ecc2e65c --- /dev/null +++ b/vendor/github.com/regclient/regclient/Makefile @@ -0,0 +1,285 @@ +COMMANDS?=regctl regsync regbot +BINARIES?=$(addprefix bin/,$(COMMANDS)) +IMAGES?=$(addprefix docker-,$(COMMANDS)) +ARTIFACT_PLATFORMS?=linux-amd64 linux-arm64 linux-ppc64le linux-s390x linux-riscv64 darwin-amd64 darwin-arm64 windows-amd64.exe freebsd-amd64 +ARTIFACTS?=$(foreach cmd,$(addprefix artifacts/,$(COMMANDS)),$(addprefix $(cmd)-,$(ARTIFACT_PLATFORMS))) +IMAGE_PLATFORMS?=linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x,linux/riscv64 +VCS_REPO?="https://github.com/regclient/regclient.git" +VCS_REF?=$(shell git rev-list -1 HEAD) +ifneq ($(shell git status --porcelain 2>/dev/null),) + VCS_REF := $(VCS_REF)-dirty +endif +VCS_VERSION?=$(shell vcs_describe="$$(git describe --all)"; \ + vcs_version="(devel)"; \ + if [ "$${vcs_describe}" != "$${vcs_describe#tags/}" ]; then \ + vcs_version="$${vcs_describe#tags/}"; \ + elif [ "$${vcs_describe}" != "$${vcs_describe#heads/}" ]; then \ + vcs_version="$${vcs_describe#heads/}"; \ + if [ "main" = "$${vcs_version}" ]; then vcs_version=edge; fi; \ + fi; \ + echo "$${vcs_version}" | sed -r 's#/+#-#g') +VCS_TAG?=$(shell git describe --tags --abbrev=0 2>/dev/null || true) +VCS_SEC?=$(shell git log -1 --format=%ct) +VCS_DATE?=$(shell date -d "@$(VCS_SEC)" +%Y-%m-%dT%H:%M:%SZ --utc) +LD_FLAGS?=-s -w -extldflags -static -buildid= -X \"github.com/regclient/regclient/internal/version.vcsTag=$(VCS_TAG)\" +GO_BUILD_FLAGS?=-trimpath -ldflags "$(LD_FLAGS)" +DOCKERFILE_EXT?=$(shell if docker build --help 2>/dev/null | grep -q -- '--progress'; then echo ".buildkit"; fi) +DOCKER_ARGS?=--build-arg "VCS_REF=$(VCS_REF)" --build-arg "VCS_VERSION=$(VCS_VERSION)" --build-arg "SOURCE_DATE_EPOCH=$(VCS_SEC)" --build-arg "BUILD_DATE=$(VCS_DATE)" +GOPATH?=$(shell go env GOPATH) +PWD:=$(shell pwd) +VER_BUMP?=$(shell command -v version-bump 2>/dev/null) +VER_BUMP_CONTAINER?=sudobmitch/version-bump:edge +ifeq "$(strip $(VER_BUMP))" '' + VER_BUMP=docker run --rm \ + -v "$(shell pwd)/:$(shell pwd)/" -w "$(shell pwd)" \ + -u "$(shell id -u):$(shell id -g)" \ + $(VER_BUMP_CONTAINER) +endif +MARKDOWN_LINT_VER?=v0.21.0 +GOFUMPT_VER?=v0.9.2 +GOMAJOR_VER?=v0.15.0 +GOSEC_VER?=v2.23.0 +GO_VULNCHECK_VER?=v1.1.4 +OSV_SCANNER_VER?=v2.3.3 +SYFT?=$(shell command -v syft 2>/dev/null) +SYFT_CMD_VER:=$(shell [ -x "$(SYFT)" ] && echo "v$$($(SYFT) version | awk '/^Version: / {print $$2}')" || echo "0") +SYFT_VERSION?=v1.42.1 +SYFT_CONTAINER?=anchore/syft:v1.42.1@sha256:392b65f29a410d2c1294d347bb3ad6f37608345ab6e7b43d2df03ea18bd6f5b0 +ifneq "$(SYFT_CMD_VER)" "$(SYFT_VERSION)" + SYFT=docker run --rm \ + -v "$(shell pwd)/:$(shell pwd)/" -w "$(shell pwd)" \ + -u "$(shell id -u):$(shell id -g)" \ + $(SYFT_CONTAINER) +endif +STATICCHECK_VER?=v0.7.0 +CI_DISTRIBUTION_VER?=3.0.0 +CI_ZOT_VER?=v2.1.14 + +.PHONY: .FORCE +.FORCE: + +.PHONY: all +all: fmt gofumpt gofix goimports vet test lint binaries ## Full build of Go binaries (including fmt, vet, test, and lint) + +.PHONY: fmt +fmt: ## go fmt + go fmt ./... + +.PHONY: gofumpt +gofumpt: $(GOPATH)/bin/gofumpt ## gofumpt is a stricter alternative to go fmt + gofumpt -l -w . + +.PHONY: gofix +gofix: ## go fix + go fix ./... + +goimports: $(GOPATH)/bin/goimports + $(GOPATH)/bin/goimports -w -format-only -local github.com/regclient . + +.PHONY: vet +vet: ## go vet + go vet ./... + +.PHONY: test +test: ## go test + go test -cover -race ./... + +.PHONY: lint +lint: lint-go lint-goimports lint-md lint-gosec ## Run all linting + +.PHONY: lint-go +lint-go: $(GOPATH)/bin/gofumpt $(GOPATH)/bin/staticcheck .FORCE ## Run linting for Go + $(GOPATH)/bin/staticcheck -checks all ./... + $(GOPATH)/bin/gofumpt -l -d . + errors=$$(go fix -diff ./...); if [ "$${errors}" != "" ]; then echo "$${errors}"; exit 1; fi + +lint-goimports: $(GOPATH)/bin/goimports + @if [ -n "$$($(GOPATH)/bin/goimports -l -format-only -local github.com/regclient .)" ]; then \ + echo $(GOPATH)/bin/goimports -d -format-only -local github.com/regclient .; \ + $(GOPATH)/bin/goimports -d -format-only -local github.com/regclient .; \ + exit 1; \ + fi + +# excluding types/platform pending resultion to https://github.com/securego/gosec/issues/1116 +.PHONY: lint-gosec +lint-gosec: $(GOPATH)/bin/gosec .FORCE ## Run gosec + $(GOPATH)/bin/gosec -terse -exclude-dir types/platform ./... + +.PHONY: lint-md +lint-md: .FORCE ## Run linting for markdown + docker run --rm -v "$(PWD):/workdir:ro" davidanson/markdownlint-cli2:$(MARKDOWN_LINT_VER) \ + "**/*.md" "#vendor" + +.PHONY: vulnerability-scan +vulnerability-scan: osv-scanner vulncheck-go ## Run all vulnerability scanners + +.PHONY: osv-scanner +osv-scanner: $(GOPATH)/bin/osv-scanner .FORCE ## Run OSV Scanner + $(GOPATH)/bin/osv-scanner scan --config .osv-scanner.toml -r --licenses="Apache-2.0,BSD-3-Clause,MIT,CC-BY-SA-4.0,UNKNOWN" . + +.PHONY: vulncheck-go +vulncheck-go: $(GOPATH)/bin/govulncheck .FORCE ## Run govulncheck + $(GOPATH)/bin/govulncheck ./... + +.PHONY: vendor +vendor: ## Vendor Go modules + go mod vendor + +.PHONY: binaries +binaries: $(BINARIES) ## Build Go binaries + +bin/%: .FORCE + CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o bin/$* ./cmd/$* + +.PHONY: docker +docker: $(IMAGES) ## Build Docker images + +docker-%: .FORCE + docker build -t regclient/$* -f build/Dockerfile.$*$(DOCKERFILE_EXT) $(DOCKER_ARGS) . + docker build -t regclient/$*:alpine -f build/Dockerfile.$*$(DOCKERFILE_EXT) --target release-alpine $(DOCKER_ARGS) . + +.PHONY: oci-image +oci-image: $(addprefix oci-image-,$(COMMANDS)) ## Build reproducible images to an OCI Layout + +oci-image-%: bin/regctl .FORCE + PATH="$(PWD)/bin:$(PATH)" build/oci-image.sh -r scratch -i "$*" -p "$(IMAGE_PLATFORMS)" + PATH="$(PWD)/bin:$(PATH)" build/oci-image.sh -r alpine -i "$*" -p "$(IMAGE_PLATFORMS)" -b "alpine:3" + +.PHONY: test-docker +test-docker: $(addprefix test-docker-,$(COMMANDS)) ## Build multi-platform docker images (but do not tag) + +test-docker-%: + docker buildx build --platform="$(IMAGE_PLATFORMS)" -f build/Dockerfile.$*.buildkit . + docker buildx build --platform="$(IMAGE_PLATFORMS)" -f build/Dockerfile.$*.buildkit --target release-alpine . + +.PHONY: ci +ci: ci-distribution ci-zot ## Run CI tests against self hosted registries + +.PHONY: ci-distribution +ci-distribution: + docker run --rm -d -p 5000 \ + --label regclient-ci=true --name regclient-ci-distribution \ + -e "REGISTRY_STORAGE_DELETE_ENABLED=true" \ + docker.io/library/registry:$(CI_DISTRIBUTION_VER) + ./build/ci-test.sh -t localhost:$$(docker port regclient-ci-distribution 5000 | head -1 | cut -f2 -d:)/test-ci + docker stop regclient-ci-distribution + +.PHONY: ci-zot +ci-zot: + docker run --rm -d -p 5000 \ + --label regclient-ci=true --name regclient-ci-zot \ + -v "$$(pwd)/build/zot-config.json:/etc/zot/config.json:ro" \ + ghcr.io/project-zot/zot-linux-amd64:$(CI_ZOT_VER) + ./build/ci-test.sh -t localhost:$$(docker port regclient-ci-zot 5000 | head -1 | cut -f2 -d:)/test-ci + docker stop regclient-ci-zot + +.PHONY: artifacts +artifacts: $(ARTIFACTS) ## Generate artifacts + +.PHONY: artifact-pre +artifact-pre: + mkdir -p artifacts + +artifacts/%: artifact-pre .FORCE + @set -e; \ + target="$*"; \ + command="$${target%%-*}"; \ + platform_ext="$${target#*-}"; \ + platform="$${platform_ext%.*}"; \ + export GOOS="$${platform%%-*}"; \ + export GOARCH="$${platform#*-}"; \ + echo export GOOS=$${GOOS}; \ + echo export GOARCH=$${GOARCH}; \ + echo go build ${GO_BUILD_FLAGS} -o "$@" ./cmd/$${command}/; \ + CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o "$@" ./cmd/$${command}/; \ + $(SYFT) scan -q "file:$@" --source-name "$${command}" -o cyclonedx-json >"artifacts/$${command}-$${platform}.cyclonedx.json"; \ + $(SYFT) scan -q "file:$@" --source-name "$${command}" -o spdx-json >"artifacts/$${command}-$${platform}.spdx.json" + +.PHONY: clean +clean: ## delete generated content + [ ! -d artifacts ] || rm -r artifacts + [ ! -d bin ] || rm -r bin + [ ! -d output ] || rm -r output + [ ! -d vendor ] || rm -r vendor + +.PHONY: plugin-user +plugin-user: + mkdir -p ${HOME}/.docker/cli-plugins/ + cp docker-plugin/docker-regclient ${HOME}/.docker/cli-plugins/docker-regctl + +.PHONY: plugin-host +plugin-host: + sudo cp docker-plugin/docker-regclient /usr/libexec/docker/cli-plugins/docker-regctl + +.PHONY: util-golang-major +util-golang-major: $(GOPATH)/bin/gomajor ## check for major dependency updates + $(GOPATH)/bin/gomajor list + +.PHONY: util-golang-update +util-golang-update: ## update go module versions + go get -u -t ./... + go mod tidy + [ ! -d vendor ] || go mod vendor + +.PHONY: util-release-preview +util-release-preview: $(GOPATH)/bin/gorelease ## preview changes for next release + git checkout main + ./.github/release.sh -d + gorelease + +.PHONY: util-release-run +util-release-run: ## generate a new release + git checkout main + ./.github/release.sh + +.PHONY: util-version-check +util-version-check: ## check all dependencies for updates + $(VER_BUMP) check + +.PHONY: util-version-update +util-version-update: ## update versions on all dependencies + $(VER_BUMP) update + +$(GOPATH)/bin/gofumpt: .FORCE + @[ -f "$(GOPATH)/bin/gofumpt" ] \ + && [ "$$($(GOPATH)/bin/gofumpt -version | cut -f 1 -d ' ')" = "$(GOFUMPT_VER)" ] \ + || go install mvdan.cc/gofumpt@$(GOFUMPT_VER) + +$(GOPATH)/bin/gomajor: .FORCE + @[ -f "$(GOPATH)/bin/gomajor" ] \ + && [ "$$($(GOPATH)/bin/gomajor version | grep '^version' | cut -f 2 -d ' ')" = "$(GOMAJOR_VER)" ] \ + || go install github.com/icholy/gomajor@$(GOMAJOR_VER) + +$(GOPATH)/bin/goimports: .FORCE + @[ -f "$(GOPATH)/bin/goimports" ] && [ "$$(go version | cut -f3 -d' ')" = "$$(go version $(GOPATH)/bin/goimports | cut -f2 -d' ')" ] \ + || go install golang.org/x/tools/cmd/goimports@latest + +$(GOPATH)/bin/gorelease: .FORCE + @[ -f "$(GOPATH)/bin/gorelease" ] && [ "$$(go version | cut -f3 -d' ')" = "$$(go version $(GOPATH)/bin/gorelease | cut -f2 -d' ')" ] \ + || go install golang.org/x/exp/cmd/gorelease@latest + +$(GOPATH)/bin/gosec: .FORCE + @[ -f $(GOPATH)/bin/gosec ] \ + && [ "$$($(GOPATH)/bin/gosec -version | grep '^Version' | cut -f 2 -d ' ')" = "$(GOSEC_VER)" ] \ + || go install -ldflags '-X main.Version=$(GOSEC_VER) -X main.GitTag=$(GOSEC_VER)' \ + github.com/securego/gosec/v2/cmd/gosec@$(GOSEC_VER) + +$(GOPATH)/bin/staticcheck: .FORCE + @[ -f $(GOPATH)/bin/staticcheck ] \ + && [ "$$($(GOPATH)/bin/staticcheck -version | cut -f 3 -d ' ' | tr -d '()')" = "$(STATICCHECK_VER)" ] \ + || go install "honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VER)" + +$(GOPATH)/bin/govulncheck: .FORCE + @[ -f $(GOPATH)/bin/govulncheck ] \ + && [ $$(go version -m $(GOPATH)/bin/govulncheck | \ + awk -F ' ' '{ if ($$1 == "mod" && $$2 == "golang.org/x/vuln") { printf "%s\n", $$3 } }') = "$(GO_VULNCHECK_VER)" ] \ + || CGO_ENABLED=0 go install "golang.org/x/vuln/cmd/govulncheck@$(GO_VULNCHECK_VER)" + +$(GOPATH)/bin/osv-scanner: .FORCE + @[ -f $(GOPATH)/bin/osv-scanner ] \ + && [ "$$(osv-scanner --version | awk -F ': ' '{ if ($$1 == "osv-scanner version") { printf "%s\n", $$2 } }')" = "$(OSV_SCANNER_VER)" ] \ + || CGO_ENABLED=0 go install "github.com/google/osv-scanner/v2/cmd/osv-scanner@$(OSV_SCANNER_VER)" + +.PHONY: help +help: # Display help + @awk -F ':|##' '/^[^\t].+?:.*?##/ { printf "\033[36m%-30s\033[0m %s\n", $$1, $$NF }' $(MAKEFILE_LIST) diff --git a/vendor/github.com/regclient/regclient/README.md b/vendor/github.com/regclient/regclient/README.md new file mode 100644 index 000000000..59db82fe8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/README.md @@ -0,0 +1,116 @@ +# regclient + +[![Go Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/go.yml?branch=main&label=Go%20build)](https://github.com/regclient/regclient/actions/workflows/go.yml) +[![Docker Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/docker.yml?branch=main&label=Docker%20build)](https://github.com/regclient/regclient/actions/workflows/docker.yml) +[![Dependency Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/version-check.yml?branch=main&label=Dependency%20check)](https://github.com/regclient/regclient/actions/workflows/version-check.yml) +[![Vulnerability Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/vulnscans.yml?branch=main&label=Vulnerability%20check)](https://github.com/regclient/regclient/actions/workflows/vulnscans.yml) + +[![Go Reference](https://pkg.go.dev/badge/github.com/regclient/regclient.svg)](https://pkg.go.dev/github.com/regclient/regclient) +![License](https://img.shields.io/github/license/regclient/regclient) +[![Go Report Card](https://goreportcard.com/badge/github.com/regclient/regclient)](https://goreportcard.com/report/github.com/regclient/regclient) +[![GitHub Downloads](https://img.shields.io/github/downloads/regclient/regclient/total?label=GitHub%20downloads)](https://github.com/regclient/regclient/releases) + +regclient is a client interface to OCI conformant registries and content shipped with the OCI Image Layout. +It includes a Go library and several CLI commands. + +## regclient Go Library Features + +- Runs without a container runtime and without privileged access to the local host. +- Querying for a tag listing, repository listing, and remotely inspecting the contents of images. +- Efficiently copying and retagging images, only pulling layers when required, and without changing the image digest. +- Support for multi-platform images. +- Support for querying, creating, and copying OCI Artifacts, allowing arbitrary data to be stored in an OCI registry. +- Support for packaging OCI Artifacts with an Index of multiple artifacts, which can be used for platform specific artifacts. +- Support for querying OCI referrers, copying referrers, and pushing content with an OCI subject field, associating artifacts with other content on the registry. +- Support for the “digest tags” used by projects like sigstore/cosign, allowing the content to be included when copying images. +- Efficiently query for an image digest. +- Efficiently query for pull rate limits used by Docker Hub. +- Import and export content into OCI Layouts and Docker formatted tar files. +- Support OCI Layouts in all commands as a local disk equivalent of a repository. +- Support for deleting tags, manifests, and blobs. +- Ability to mutate existing images, including: + - Settings annotations or labels + - Deleting content from layers + - Changing timestamps for reproducibility + - Converting between Docker and OCI media types + - Replacing the base image layers + - Add or remove volumes and exposed ports + - Change digest algorithms +- Support for registry warning headers, which may be used to notify users of issues with the server or content they are using. +- Automatically import logins from the docker CLI, and registry certificates from the docker engine. +- Automatic retry, and fallback to a chunked blob push, when network issues are encountered. + +The full Go references is available on [pkg.go.dev](https://pkg.go.dev/github.com/regclient/regclient). + +## regctl Features + +`regctl` is a CLI interface to the `regclient` library. +In addition to the features listed for `regclient`, `regctl` adds the following abilities: + +- Generating multi-platform manifests from multiple images that may have been separately built. +- Repackage a multi-platform image with only the requested platforms. +- Push and pull arbitrary OCI artifacts. +- Recursively list all content associated with an image. +- Extract files from a layer or image. +- Compare images, showing the differences between manifests, the config, and layers. +- Formatted output using Go templates. + +The project website includes [usage instructions](https://regclient.org/usage/regctl/) and a [CLI reference](https://regclient.org/cli/regctl/). + +## regsync features + +`regsync` is an image mirroring tool. +It will copy images between two locations with the following additional features: + +- Ability to run on a cron schedule, one time synchronization, or only report stale images. +- Uses a yaml configuration. +- Each source may be an entire registry (not recommended), a repository, or a single image, with the ability to filter repositories and tags. +- Support for multi-platform images, OCI referrers, “digest tags”, and copying to or from an OCI Layout (for maintaining a mirror over an air-gap). +- Ability to mirror multiple images concurrently. +- Support for copying a single platform from multi-platform images. +- Ability to backup an existing image before overwriting the tag. +- Ability to postpone mirror step when rate limit (used by Docker Hub) is below a threshold. +- Can use user’s docker configuration for user credentials and registry certificates. + +The project website includes [usage instructions](https://regclient.org/usage/regsync/) and a [CLI reference](https://regclient.org/cli/regsync/). + +## regbot features + +`regbot` is a scripting tool on top of the `regclient` API with the following features: + +- Ability to run on a cron schedule, one time execution, or test with a dry-run mode. +- Uses a yaml configuration. +- Scripts are written in Lua and executed directly in Go. +- Built-in functions include: + - Repository list + - Tag list + - Image manifest (either head or get, and optional resolving multi-platform reference) + - Image config (this includes the creation time, labels, and other details shown in a docker image inspect) + - Image rate limit and a wait function to delay the script when rate limit remaining is below a threshold + - Image copy + - Manifest delete + - Tag delete + +The project website includes [usage instructions](https://regclient.org/usage/regbot/) and a [CLI reference](https://regclient.org/cli/regbot/). + +## Development Status + +This project is using v0 version numbers due to Go's backwards compatibility requirements of a v1 release. +The library and commands are stable for external use. +Minor version updates may contain breaking changes, however effort is made to first deprecate and provide warnings to give users time to move off of older APIs and commands. + +## Installing + +See the [installation instructions](https://regclient.org/install/) on the project website for the various ways to download or build CLI binaries. + +## Usage + +See the [project documentation](https://regclient.org/usage/). + +## Contributors + + + contributor list + + + diff --git a/vendor/github.com/regclient/regclient/SECURITY.md b/vendor/github.com/regclient/regclient/SECURITY.md new file mode 100644 index 000000000..50508cb45 --- /dev/null +++ b/vendor/github.com/regclient/regclient/SECURITY.md @@ -0,0 +1,5 @@ +# Reporting security issues + +Please report security issues directly in GitHub at or alternatively email . + +We will typically respond within 7 working days of your report. If the issue is confirmed as a vulnerability, we will open a Security Advisory and acknowledge your contributions as part of it. This project follows a 90 day disclosure timeline. diff --git a/vendor/github.com/regclient/regclient/blob.go b/vendor/github.com/regclient/regclient/blob.go new file mode 100644 index 000000000..a55d281b0 --- /dev/null +++ b/vendor/github.com/regclient/regclient/blob.go @@ -0,0 +1,283 @@ +package regclient + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log/slog" + "time" + + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +const blobCBFreq = time.Millisecond * 100 + +type blobOpt struct { + callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64) + readerHook func(*blob.BReader) (*blob.BReader, error) +} + +// BlobOpts define options for the Image* commands. +type BlobOpts func(*blobOpt) + +// BlobWithCallback provides progress data to a callback function. +func BlobWithCallback(callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64)) BlobOpts { + return func(opts *blobOpt) { + opts.callback = callback + } +} + +// BlobWithReaderHook is called in [RegClient.BlobCopy] with the blob source. +// The returned [blob.BReader] is pushed to the target. +// If the hook returns an error, the copy will fail. +func BlobWithReaderHook(hook func(*blob.BReader) (*blob.BReader, error)) BlobOpts { + return func(opts *blobOpt) { + opts.readerHook = hook + } +} + +// BlobCopy copies a blob between two locations. +// If the blob already exists in the target, the copy is skipped. +// A server side cross repository blob mount is attempted. +func (rc *RegClient) BlobCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, opts ...BlobOpts) error { + if !refSrc.IsSetRepo() { + return fmt.Errorf("refSrc is not set: %s%.0w", refSrc.CommonName(), errs.ErrInvalidReference) + } + if !refTgt.IsSetRepo() { + return fmt.Errorf("refTgt is not set: %s%.0w", refTgt.CommonName(), errs.ErrInvalidReference) + } + var opt blobOpt + for _, optFn := range opts { + optFn(&opt) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + tDesc := d + tDesc.URLs = []string{} // ignore URLs when pushing to target + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackStarted, 0, d.Size) + } + // for the same repository, there's nothing to copy + if ref.EqualRepository(refSrc, refTgt) { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } + rc.slog.Debug("Blob copy skipped, same repo", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("digest", string(d.Digest))) + return nil + } + // check if layer already exists + if _, err := rc.BlobHead(ctx, refTgt, tDesc); err == nil { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } + rc.slog.Debug("Blob copy skipped, already exists", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("digest", string(d.Digest))) + return nil + } + // acquire throttle for both src and tgt to avoid deadlocks + tList := []*pqueue.Queue[reqmeta.Data]{} + schemeSrcAPI, err := rc.schemeGet(refSrc.Scheme) + if err != nil { + return err + } + schemeTgtAPI, err := rc.schemeGet(refTgt.Scheme) + if err != nil { + return err + } + if tSrc, ok := schemeSrcAPI.(scheme.Throttler); ok { + tList = append(tList, tSrc.Throttle(refSrc, false)...) + } + if tTgt, ok := schemeTgtAPI.(scheme.Throttler); ok { + tList = append(tList, tTgt.Throttle(refTgt, true)...) + } + if len(tList) > 0 { + ctxMulti, done, err := pqueue.AcquireMulti[reqmeta.Data](ctx, reqmeta.Data{Kind: reqmeta.Blob, Size: d.Size}, tList...) + if err != nil { + return err + } + if done != nil { + defer done() + } + ctx = ctxMulti + } + + // try mounting blob from the source repo is the registry is the same + if ref.EqualRegistry(refSrc, refTgt) { + err := rc.BlobMount(ctx, refSrc, refTgt, d) + if err == nil { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } + rc.slog.Debug("Blob copy performed server side with registry mount", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("digest", string(d.Digest))) + return nil + } + rc.slog.Warn("Failed to mount blob", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("err", err.Error())) + } + // fast options failed, download layer from source and push to target + blobIO, err := rc.BlobGet(ctx, refSrc, d) + if err != nil { + if !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to retrieve blob", + slog.String("src", refSrc.Reference), + slog.String("digest", string(d.Digest)), + slog.String("err", err.Error())) + } + return err + } + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackStarted, 0, d.Size) + ticker := time.NewTicker(blobCBFreq) + done := make(chan bool) + defer func() { + close(done) + ticker.Stop() + if ctx.Err() == nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackFinished, d.Size, d.Size) + } + }() + go func() { + for { + select { + case <-done: + return + case <-ticker.C: + offset, err := blobIO.Seek(0, io.SeekCurrent) + if err == nil && offset > 0 { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackActive, offset, d.Size) + } + } + } + }() + } + if opt.readerHook != nil { + blobIO, err = opt.readerHook(blobIO) + if err != nil { + rc.slog.Warn("Failed to apply reader hook to blob", + slog.String("src", refSrc.Reference), + slog.String("err", err.Error())) + return err + } + } + defer blobIO.Close() + if _, err := rc.BlobPut(ctx, refTgt, blobIO.GetDescriptor(), blobIO); err != nil { + if !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to push blob", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("err", err.Error())) + } + return err + } + return nil +} + +// BlobDelete removes a blob from the registry. +// This method should only be used to repair a damaged registry. +// Typically a server side garbage collection should be used to purge unused blobs. +func (rc *RegClient) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + return schemeAPI.BlobDelete(ctx, r, d) +} + +// BlobGet retrieves a blob, returning a reader. +// This reader must be closed to free up resources that limit concurrent pulls. +func (rc *RegClient) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + data, err := d.GetData() + if err == nil { + return blob.NewReader(blob.WithDesc(d), blob.WithRef(r), blob.WithReader(bytes.NewReader(data))), nil + } + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + return schemeAPI.BlobGet(ctx, r, d) +} + +// BlobGetOCIConfig retrieves an OCI config from a blob, automatically extracting the JSON. +func (rc *RegClient) BlobGetOCIConfig(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.OCIConfig, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + b, err := rc.BlobGet(ctx, r, d) + if err != nil { + return nil, err + } + return b.ToOCIConfig() +} + +// BlobHead is used to verify if a blob exists and is accessible. +func (rc *RegClient) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + return schemeAPI.BlobHead(ctx, r, d) +} + +// BlobMount attempts to perform a server side copy/mount of the blob between repositories. +func (rc *RegClient) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error { + if !refSrc.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", refSrc.CommonName(), errs.ErrInvalidReference) + } + if !refTgt.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", refTgt.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(refSrc.Scheme) + if err != nil { + return err + } + return schemeAPI.BlobMount(ctx, refSrc, refTgt, d) +} + +// BlobPut uploads a blob to a repository. +// Descriptor is optional, leave size and digest to zero value if unknown. +// Reader must also be an [io.Seeker] to support chunked upload fallback. +// +// This will attempt an anonymous blob mount first which some registries may support. +// It will then try doing a full put of the blob without chunking (most widely supported). +// If the full put fails, it will fall back to a chunked upload (useful for flaky networks). +func (rc *RegClient) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { + if !r.IsSetRepo() { + return descriptor.Descriptor{}, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return descriptor.Descriptor{}, err + } + return schemeAPI.BlobPut(ctx, r, d, rdr) +} diff --git a/vendor/github.com/regclient/regclient/config/credhelper.go b/vendor/github.com/regclient/regclient/config/credhelper.go new file mode 100644 index 000000000..413e70404 --- /dev/null +++ b/vendor/github.com/regclient/regclient/config/credhelper.go @@ -0,0 +1,100 @@ +package config + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "strings" +) + +// credHelper wraps a command that manages user credentials. +type credHelper struct { + prog string + env map[string]string +} + +func newCredHelper(prog string, env map[string]string) *credHelper { + return &credHelper{prog: prog, env: env} +} + +func (ch *credHelper) run(arg string, input io.Reader) ([]byte, error) { + //#nosec G204 only untrusted arg is a hostname which the executed command should not trust + cmd := exec.Command(ch.prog, arg) + cmd.Env = os.Environ() + if ch.env != nil { + for k, v := range ch.env { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + cmd.Stderr = os.Stderr + cmd.Stdin = input + return cmd.Output() +} + +type credStore struct { + ServerURL string `json:"ServerURL"` + Username string `json:"Username"` + Secret string `json:"Secret"` //#nosec G117 exported struct intentionally holds secrets +} + +// get requests a credential from the helper for a given host. +func (ch *credHelper) get(host *Host) error { + hostname := host.Hostname + if host.CredHost != "" { + hostname = host.CredHost + } + hostIn := strings.NewReader(hostname) + credOut := credStore{ + Username: host.User, + Secret: host.Pass, + } + outB, err := ch.run("get", hostIn) + if err != nil { + outS := strings.TrimSpace(string(outB)) + return fmt.Errorf("error getting credentials, output: %s, error: %w", outS, err) + } + err = json.NewDecoder(bytes.NewReader(outB)).Decode(&credOut) + if err != nil { + return fmt.Errorf("error reading credentials: %w", err) + } + if credOut.Username == tokenUser { + host.User = "" + host.Pass = "" + host.Token = credOut.Secret + } else { + host.User = credOut.Username + host.Pass = credOut.Secret + host.Token = "" + } + return nil +} + +// list returns a list of hosts supported by the credential helper. +func (ch *credHelper) list() ([]Host, error) { + credList := map[string]string{} + outB, err := ch.run("list", bytes.NewReader([]byte{})) + if err != nil { + outS := strings.TrimSpace(string(outB)) + return nil, fmt.Errorf("error getting credential list, output: %s, error: %w", outS, err) + } + err = json.NewDecoder(bytes.NewReader(outB)).Decode(&credList) + if err != nil { + return nil, fmt.Errorf("error reading credential list: %w", err) + } + hostList := []Host{} + for host, user := range credList { + if !HostValidate(host) { + continue + } + h := HostNewName(host) + h.User = user + h.CredHelper = ch.prog + hostList = append(hostList, *h) + } + return hostList, nil +} + +// TODO: store method not implemented diff --git a/vendor/github.com/regclient/regclient/config/docker.go b/vendor/github.com/regclient/regclient/config/docker.go new file mode 100644 index 000000000..cb73a60e6 --- /dev/null +++ b/vendor/github.com/regclient/regclient/config/docker.go @@ -0,0 +1,210 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "os" + "strings" + + "github.com/regclient/regclient/internal/conffile" + "github.com/regclient/regclient/types/errs" +) + +const ( + // dockerEnv is the environment variable used to look for Docker's config.json. + dockerEnv = "DOCKER_CONFIG" + // dockerEnvConfig is used to inject the config as an environment variable. + dockerEnvConfig = "DOCKER_AUTH_CONFIG" + // dockerDir is the directory name for Docker's config (inside the users home directory). + dockerDir = ".docker" + // dockerConfFile is the name of Docker's config file. + dockerConfFile = "config.json" + // dockerHelperPre is the prefix of docker credential helpers. + dockerHelperPre = "docker-credential-" +) + +// dockerConfig is used to parse the ~/.docker/config.json +type dockerConfig struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Proxies map[string]dockerProxyConfig `json:"proxies,omitempty"` +} + +// dockerProxyConfig contains proxy configuration settings +type dockerProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` + AllProxy string `json:"allProxy,omitempty"` +} + +// dockerAuthConfig contains the auths +type dockerAuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` //#nosec G117 exported struct intentionally holds secrets + Auth string `json:"auth,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// DockerLoad returns a slice of hosts from the users docker config. +// This will search for the config.json in either the DOCKER_CONFIG identified directory or the default .docker directory. +// It also includes hosts extracted from the DOCKER_AUTH_CONFIG variable. +// If the config file is missing and no value is injected using an environment variable, an empty list is returned. +func DockerLoad() ([]Host, error) { + hosts := []Host{} + errList := []error{} + // load from a file + cf := conffile.New( + conffile.WithHomeDir(dockerDir, dockerConfFile, true), + conffile.WithEnvDir(dockerEnv, dockerConfFile), + ) + rdr, err := cf.Open() + if err != nil && !errors.Is(err, fs.ErrNotExist) { + errList = append(errList, err) + } else if err == nil { + defer rdr.Close() + hostsFile, err := dockerParse(rdr) + if err != nil { + errList = append(errList, err) + } else { + hosts = append(hosts, hostsFile...) + } + } + // load from an env var + hostsEnv, err := DockerLoadEnv(dockerEnvConfig) + if err != nil && !errors.Is(err, errs.ErrNotFound) { + errList = append(errList, err) + } else if err == nil { + hosts = append(hosts, hostsEnv...) + } + // return the concatenated result, only wrapping an error list if necessary + if len(errList) == 1 { + return hosts, errList[0] + } else { + return hosts, errors.Join(errList...) + } +} + +// DockerLoadFile returns a slice of hosts from a named docker config file. +func DockerLoadFile(fname string) ([]Host, error) { + //#nosec G304 scoping file operations to a directory is not yet a feature of regclient. + rdr, err := os.Open(fname) + if err != nil && errors.Is(err, fs.ErrNotExist) { + return []Host{}, nil + } else if err != nil { + return nil, err + } + defer rdr.Close() + return dockerParse(rdr) +} + +// DockerLoadEnv returns a slice of hosts extracted from the config injected in an environment variable. +func DockerLoadEnv(envName string) ([]Host, error) { + envVal := os.Getenv(envName) + if envVal == "" { + return []Host{}, errs.ErrNotFound + } + return dockerParse(strings.NewReader(envVal)) +} + +// dockerParse parses a docker config into a slice of Hosts. +func dockerParse(rdr io.Reader) ([]Host, error) { + dc := dockerConfig{} + if err := json.NewDecoder(rdr).Decode(&dc); err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + hosts := []Host{} + for name, auth := range dc.AuthConfigs { + if !HostValidate(name) { + continue + } + h, err := dockerAuthToHost(name, dc, auth) + if err != nil { + continue + } + hosts = append(hosts, h) + } + // also include default entries for credential helpers + for name, helper := range dc.CredentialHelpers { + if !HostValidate(name) { + continue + } + h := HostNewName(name) + h.CredHelper = dockerHelperPre + helper + if _, ok := dc.AuthConfigs[name]; ok { + continue // skip fields with auth config + } + hosts = append(hosts, *h) + } + // add credStore entries + if dc.CredentialsStore != "" { + ch := newCredHelper(dockerHelperPre+dc.CredentialsStore, map[string]string{}) + csHosts, err := ch.list() + if err == nil { + hosts = append(hosts, csHosts...) + } + } + return hosts, nil +} + +// dockerAuthToHost parses an auth entry from a docker config into a Host. +func dockerAuthToHost(name string, conf dockerConfig, auth dockerAuthConfig) (Host, error) { + helper := "" + if conf.CredentialHelpers != nil && conf.CredentialHelpers[name] != "" { + helper = dockerHelperPre + conf.CredentialHelpers[name] + } + // parse base64 auth into user/pass + if auth.Auth != "" { + var err error + auth.Username, auth.Password, err = decodeAuth(auth.Auth) + if err != nil { + return Host{}, err + } + } + if (auth.Username == "" || auth.Password == "") && auth.IdentityToken == "" && helper == "" { + return Host{}, fmt.Errorf("no credentials found for %s", name) + } + + h := HostNewName(name) + // ignore unknown names + if h.Name != DockerRegistry && !strings.HasSuffix(strings.TrimSuffix(name, "/"), h.Name) { + return Host{}, fmt.Errorf("rejecting entry with repository: %s", name) + } + h.User = auth.Username + h.Pass = auth.Password + h.Token = auth.IdentityToken + h.CredHelper = helper + return *h, nil +} + +// decodeAuth extracts a base64 encoded user:pass into the username and password. +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + decoded, err := base64.StdEncoding.DecodeString(authStr) + if err != nil { + return "", "", err + } + userPass := strings.SplitN(string(decoded), ":", 2) + if len(userPass) != 2 { + return "", "", fmt.Errorf("invalid auth configuration file") + } + return userPass[0], strings.Trim(userPass[1], "\x00"), nil +} diff --git a/vendor/github.com/regclient/regclient/config/host.go b/vendor/github.com/regclient/regclient/config/host.go new file mode 100644 index 000000000..8d751482e --- /dev/null +++ b/vendor/github.com/regclient/regclient/config/host.go @@ -0,0 +1,521 @@ +// Package config is used for all regclient configuration settings. +package config + +import ( + "encoding/json" + "fmt" + "io" + "log/slog" + "maps" + "slices" + "strings" + "time" + + "github.com/regclient/regclient/internal/timejson" +) + +// TLSConf specifies whether TLS is enabled and verified for a host. +type TLSConf int + +const ( + // TLSUndefined indicates TLS is not passed, defaults to Enabled. + TLSUndefined TLSConf = iota + // TLSEnabled uses TLS (https) for the connection. + TLSEnabled + // TLSInsecure uses TLS but does not verify CA. + TLSInsecure + // TLSDisabled does not use TLS (http). + TLSDisabled +) + +const ( + // DockerRegistry is the name resolved in docker images on Hub. + DockerRegistry = "docker.io" + // DockerRegistryAuth is the name provided in docker's config for Hub. + DockerRegistryAuth = "https://index.docker.io/v1/" + // DockerRegistryDNS is the host to connect to for Hub. + DockerRegistryDNS = "registry-1.docker.io" + // defaultExpire is the default time to expire a credential and force re-authentication. + defaultExpire = time.Hour * 1 + // defaultCredHelperRetry is the time to refresh a credential from a failed credential helper command. + defaultCredHelperRetry = time.Second * 5 + // defaultConcurrent is the default number of concurrent registry connections. + defaultConcurrent = 3 + // defaultReqPerSec is the default maximum frequency to send requests to a registry. + defaultReqPerSec = 0 + // tokenUser is the username returned by credential helpers that indicates the password is an identity token. + tokenUser = "" +) + +// MarshalJSON converts TLSConf to a json string using MarshalText. +func (t TLSConf) MarshalJSON() ([]byte, error) { + s, err := t.MarshalText() + if err != nil { + return []byte(""), err + } + return json.Marshal(string(s)) +} + +// MarshalText converts TLSConf to a string. +func (t TLSConf) MarshalText() ([]byte, error) { + var s string + switch t { + default: + s = "" + case TLSEnabled: + s = "enabled" + case TLSInsecure: + s = "insecure" + case TLSDisabled: + s = "disabled" + } + return []byte(s), nil +} + +// UnmarshalJSON converts TLSConf from a json string. +func (t *TLSConf) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + return t.UnmarshalText([]byte(s)) +} + +// UnmarshalText converts TLSConf from a string. +func (t *TLSConf) UnmarshalText(b []byte) error { + switch strings.ToLower(string(b)) { + default: + return fmt.Errorf("unknown TLS value \"%s\"", b) + case "": + *t = TLSUndefined + case "enabled": + *t = TLSEnabled + case "insecure": + *t = TLSInsecure + case "disabled": + *t = TLSDisabled + } + return nil +} + +// Host defines settings for connecting to a registry. +type Host struct { + Name string `json:"-" yaml:"registry,omitempty"` // Name of the registry (required) (yaml configs pass this as a field, json provides this from the object key) + TLS TLSConf `json:"tls,omitempty" yaml:"tls"` // TLS setting: enabled (default), disabled, insecure + RegCert string `json:"regcert,omitempty" yaml:"regcert"` // public pem cert of registry + ClientCert string `json:"clientCert,omitempty" yaml:"clientCert"` // public pem cert for client (mTLS) + ClientKey string `json:"clientKey,omitempty" yaml:"clientKey"` //#nosec G117 private pem cert for client (mTLS) + Hostname string `json:"hostname,omitempty" yaml:"hostname"` // hostname of registry, default is the registry name + User string `json:"user,omitempty" yaml:"user"` // username, not used with credHelper + Pass string `json:"pass,omitempty" yaml:"pass"` //#nosec G117 password, not used with credHelper + Token string `json:"token,omitempty" yaml:"token"` // token, experimental for specific APIs + CredHelper string `json:"credHelper,omitempty" yaml:"credHelper"` // credential helper command for requesting logins + CredExpire timejson.Duration `json:"credExpire,omitempty" yaml:"credExpire"` // time until credential expires + CredHost string `json:"credHost,omitempty" yaml:"credHost"` // used when a helper hostname doesn't match Hostname + PathPrefix string `json:"pathPrefix,omitempty" yaml:"pathPrefix"` // used for mirrors defined within a repository namespace + Mirrors []string `json:"mirrors,omitempty" yaml:"mirrors"` // list of other Host Names to use as mirrors + Priority uint `json:"priority,omitempty" yaml:"priority"` // priority when sorting mirrors, higher priority attempted first + RepoAuth bool `json:"repoAuth,omitempty" yaml:"repoAuth"` // tracks a separate auth per repo + API string `json:"api,omitempty" yaml:"api"` // Deprecated: registry API to use + APIOpts map[string]string `json:"apiOpts,omitempty" yaml:"apiOpts"` // options for APIs + BlobChunk int64 `json:"blobChunk,omitempty" yaml:"blobChunk"` // size of each blob chunk + BlobMax int64 `json:"blobMax,omitempty" yaml:"blobMax"` // threshold to switch to chunked upload, -1 to disable, 0 for regclient.blobMaxPut + ReqPerSec float64 `json:"reqPerSec,omitempty" yaml:"reqPerSec"` // requests per second + ReqConcurrent int64 `json:"reqConcurrent,omitempty" yaml:"reqConcurrent"` // concurrent requests, default is defaultConcurrent(3) + Scheme string `json:"scheme,omitempty" yaml:"scheme"` // Deprecated: use TLS instead + credRefresh time.Time `json:"-" yaml:"-"` // internal use, when to refresh credentials +} + +// Cred defines a user credential for accessing a registry. +type Cred struct { + User, Password, Token string //#nosec G117 exported struct intentionally holds secrets +} + +// HostNew creates a default Host entry. +func HostNew() *Host { + h := Host{ + TLS: TLSEnabled, + APIOpts: map[string]string{}, + ReqConcurrent: int64(defaultConcurrent), + ReqPerSec: float64(defaultReqPerSec), + } + return &h +} + +// HostNewName creates a default Host with a hostname. +func HostNewName(name string) *Host { + return HostNewDefName(nil, name) +} + +// HostNewDefName creates a host using provided defaults and hostname. +func HostNewDefName(def *Host, name string) *Host { + var h Host + if def == nil { + h = *HostNew() + } else { + h = *def + // configure required defaults + if h.TLS == TLSUndefined { + h.TLS = TLSEnabled + } + if h.APIOpts == nil { + h.APIOpts = map[string]string{} + } + if h.ReqConcurrent == 0 { + h.ReqConcurrent = int64(defaultConcurrent) + } + if h.ReqPerSec == 0 { + h.ReqPerSec = float64(defaultReqPerSec) + } + // copy any fields that are not passed by value + if len(h.APIOpts) > 0 { + orig := h.APIOpts + h.APIOpts = map[string]string{} + maps.Copy(h.APIOpts, orig) + } + if h.Mirrors != nil { + orig := h.Mirrors + h.Mirrors = make([]string, len(orig)) + copy(h.Mirrors, orig) + } + } + // configure host + scheme, registry, _ := parseName(name) + if scheme == "http" { + h.TLS = TLSDisabled + } + // Docker Hub is a special case + if registry == DockerRegistry { + h.Name = DockerRegistry + h.Hostname = DockerRegistryDNS + h.CredHost = DockerRegistryAuth + return &h + } + h.Name = registry + h.Hostname = registry + if name != registry { + h.CredHost = name + } + return &h +} + +// HostValidate returns true if the scheme is missing or a known value, and the path is not set. +func HostValidate(name string) bool { + scheme, _, path := parseName(name) + return path == "" && (scheme == "https" || scheme == "http") +} + +// GetCred returns the credential, fetching from a credential helper if needed. +func (host *Host) GetCred() Cred { + // refresh from credHelper if needed + if host.CredHelper != "" && (host.credRefresh.IsZero() || time.Now().After(host.credRefresh)) { + host.refreshHelper() + } + return Cred{User: host.User, Password: host.Pass, Token: host.Token} +} + +func (host *Host) refreshHelper() { + if host.CredHelper == "" { + return + } + if host.CredExpire <= 0 { + host.CredExpire = timejson.Duration(defaultExpire) + } + // run a cred helper, calling get method + ch := newCredHelper(host.CredHelper, map[string]string{}) + err := ch.get(host) + if err != nil { + host.credRefresh = time.Now().Add(defaultCredHelperRetry) + } else { + host.credRefresh = time.Now().Add(time.Duration(host.CredExpire)) + } +} + +// IsZero returns true if the struct is set to the zero value or the result of [HostNew]. +func (host Host) IsZero() bool { + if (host.TLS != TLSUndefined && host.TLS != TLSEnabled) || + host.RegCert != "" || + host.ClientCert != "" || + host.ClientKey != "" || + (host.Hostname != "" && host.Hostname != host.Name) || + host.User != "" || + host.Pass != "" || + host.Token != "" || + host.CredHelper != "" || + host.CredExpire != 0 || + host.CredHost != "" || + host.PathPrefix != "" || + len(host.Mirrors) != 0 || + host.Priority != 0 || + host.RepoAuth || + len(host.APIOpts) != 0 || + host.BlobChunk != 0 || + host.BlobMax != 0 || + (host.ReqPerSec != 0 && host.ReqPerSec != float64(defaultReqPerSec)) || + (host.ReqConcurrent != 0 && host.ReqConcurrent != int64(defaultConcurrent)) || + !host.credRefresh.IsZero() { + return false + } + return true +} + +// Merge adds fields from a new config host entry. +func (host *Host) Merge(newHost Host, log *slog.Logger) error { + name := newHost.Name + if name == "" { + name = host.Name + } + if log == nil { + log = slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})) + } + + // merge the existing and new config host + if host.Name == "" { + // only set the name if it's not initialized, this shouldn't normally change + host.Name = newHost.Name + } + + if newHost.CredHelper == "" && (newHost.Pass != "" || host.Token != "") { + // unset existing cred helper for user/pass or token + host.CredHelper = "" + host.CredExpire = 0 + } + if newHost.CredHelper != "" && newHost.User == "" && newHost.Pass == "" && newHost.Token == "" { + // unset existing user/pass/token for cred helper + host.User = "" + host.Pass = "" + host.Token = "" + } + + if newHost.User != "" { + if host.User != "" && host.User != newHost.User { + log.Warn("Changing login user for registry", + slog.String("orig", host.User), + slog.String("new", newHost.User), + slog.String("host", name)) + } + host.User = newHost.User + } + + if newHost.Pass != "" { + if host.Pass != "" && host.Pass != newHost.Pass { + log.Warn("Changing login password for registry", + slog.String("host", name)) + } + host.Pass = newHost.Pass + } + + if newHost.Token != "" { + if host.Token != "" && host.Token != newHost.Token { + log.Warn("Changing login token for registry", + slog.String("host", name)) + } + host.Token = newHost.Token + } + + if newHost.CredHelper != "" { + if host.CredHelper != "" && host.CredHelper != newHost.CredHelper { + log.Warn("Changing credential helper for registry", + slog.String("host", name), + slog.String("orig", host.CredHelper), + slog.String("new", newHost.CredHelper)) + } + host.CredHelper = newHost.CredHelper + } + + if newHost.CredExpire != 0 { + if host.CredExpire != 0 && host.CredExpire != newHost.CredExpire { + log.Warn("Changing credential expire for registry", + slog.String("host", name), + slog.Any("orig", host.CredExpire), + slog.Any("new", newHost.CredExpire)) + } + host.CredExpire = newHost.CredExpire + } + + if newHost.CredHost != "" { + if host.CredHost != "" && host.CredHost != newHost.CredHost { + log.Warn("Changing credential host for registry", + slog.String("host", name), + slog.String("orig", host.CredHost), + slog.String("new", newHost.CredHost)) + } + host.CredHost = newHost.CredHost + } + + if newHost.TLS != TLSUndefined { + if host.TLS != TLSUndefined && host.TLS != newHost.TLS { + tlsOrig, _ := host.TLS.MarshalText() + tlsNew, _ := newHost.TLS.MarshalText() + log.Warn("Changing TLS settings for registry", + slog.String("orig", string(tlsOrig)), + slog.String("new", string(tlsNew)), + slog.String("host", name)) + } + host.TLS = newHost.TLS + } + + if newHost.RegCert != "" { + if host.RegCert != "" && host.RegCert != newHost.RegCert { + log.Warn("Changing certificate settings for registry", + slog.String("orig", host.RegCert), + slog.String("new", newHost.RegCert), + slog.String("host", name)) + } + host.RegCert = newHost.RegCert + } + + if newHost.ClientCert != "" { + if host.ClientCert != "" && host.ClientCert != newHost.ClientCert { + log.Warn("Changing client certificate settings for registry", + slog.String("orig", host.ClientCert), + slog.String("new", newHost.ClientCert), + slog.String("host", name)) + } + host.ClientCert = newHost.ClientCert + } + + if newHost.ClientKey != "" { + if host.ClientKey != "" && host.ClientKey != newHost.ClientKey { + log.Warn("Changing client certificate key settings for registry", + slog.String("host", name)) + } + host.ClientKey = newHost.ClientKey + } + + if newHost.Hostname != "" { + if host.Hostname != "" && host.Hostname != newHost.Hostname { + log.Warn("Changing hostname settings for registry", + slog.String("orig", host.Hostname), + slog.String("new", newHost.Hostname), + slog.String("host", name)) + } + host.Hostname = newHost.Hostname + } + + if newHost.PathPrefix != "" { + newHost.PathPrefix = strings.Trim(newHost.PathPrefix, "/") // leading and trailing / are not needed + if host.PathPrefix != "" && host.PathPrefix != newHost.PathPrefix { + log.Warn("Changing path prefix settings for registry", + slog.String("orig", host.PathPrefix), + slog.String("new", newHost.PathPrefix), + slog.String("host", name)) + } + host.PathPrefix = newHost.PathPrefix + } + + if len(newHost.Mirrors) > 0 { + if len(host.Mirrors) > 0 && !slices.Equal(host.Mirrors, newHost.Mirrors) { + log.Warn("Changing mirror settings for registry", + slog.Any("orig", host.Mirrors), + slog.Any("new", newHost.Mirrors), + slog.String("host", name)) + } + host.Mirrors = newHost.Mirrors + } + + if newHost.Priority != 0 { + if host.Priority != 0 && host.Priority != newHost.Priority { + log.Warn("Changing priority settings for registry", + slog.Uint64("orig", uint64(host.Priority)), + slog.Uint64("new", uint64(newHost.Priority)), + slog.String("host", name)) + } + host.Priority = newHost.Priority + } + + if newHost.RepoAuth { + host.RepoAuth = newHost.RepoAuth + } + + // TODO: eventually delete + if newHost.API != "" { + log.Warn("API field has been deprecated", + slog.String("api", newHost.API), + slog.String("host", name)) + } + + if len(newHost.APIOpts) > 0 { + if len(host.APIOpts) > 0 { + merged := maps.Clone(host.APIOpts) + for k, v := range newHost.APIOpts { + if host.APIOpts[k] != "" && host.APIOpts[k] != v { + log.Warn("Changing APIOpts setting for registry", + slog.String("orig", host.APIOpts[k]), + slog.String("new", newHost.APIOpts[k]), + slog.String("opt", k), + slog.String("host", name)) + } + merged[k] = v + } + host.APIOpts = merged + } else { + host.APIOpts = newHost.APIOpts + } + } + + if newHost.BlobChunk > 0 { + if host.BlobChunk != 0 && host.BlobChunk != newHost.BlobChunk { + log.Warn("Changing blobChunk settings for registry", + slog.Int64("orig", host.BlobChunk), + slog.Int64("new", newHost.BlobChunk), + slog.String("host", name)) + } + host.BlobChunk = newHost.BlobChunk + } + + if newHost.BlobMax != 0 { + if host.BlobMax != 0 && host.BlobMax != newHost.BlobMax { + log.Warn("Changing blobMax settings for registry", + slog.Int64("orig", host.BlobMax), + slog.Int64("new", newHost.BlobMax), + slog.String("host", name)) + } + host.BlobMax = newHost.BlobMax + } + + if newHost.ReqPerSec != 0 { + if host.ReqPerSec != 0 && host.ReqPerSec != newHost.ReqPerSec { + log.Warn("Changing reqPerSec settings for registry", + slog.Float64("orig", host.ReqPerSec), + slog.Float64("new", newHost.ReqPerSec), + slog.String("host", name)) + } + host.ReqPerSec = newHost.ReqPerSec + } + + if newHost.ReqConcurrent > 0 { + if host.ReqConcurrent != 0 && host.ReqConcurrent != newHost.ReqConcurrent { + log.Warn("Changing reqPerSec settings for registry", + slog.Int64("orig", host.ReqConcurrent), + slog.Int64("new", newHost.ReqConcurrent), + slog.String("host", name)) + } + host.ReqConcurrent = newHost.ReqConcurrent + } + + return nil +} + +// parseName splits a registry into the scheme, hostname, and repository/path. +func parseName(name string) (string, string, string) { + scheme := "https" + path := "" + // Docker Hub is a special case + if name == DockerRegistryAuth || name == DockerRegistryDNS || name == DockerRegistry { + return scheme, DockerRegistry, "" + } + // handle http/https prefix + i := strings.Index(name, "://") + if i > 0 { + scheme = name[:i] + name = name[i+3:] + } + // trim any repository path + i = strings.Index(name, "/") + if i > 0 { + path = name[i+1:] + name = name[:i] + } + return scheme, name, path +} diff --git a/vendor/github.com/regclient/regclient/image.go b/vendor/github.com/regclient/regclient/image.go new file mode 100644 index 000000000..6a9a0c877 --- /dev/null +++ b/vendor/github.com/regclient/regclient/image.go @@ -0,0 +1,1905 @@ +package regclient + +import ( + "archive/tar" + "cmp" + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "net/url" + "path/filepath" + "slices" + "strings" + "sync" + "time" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/pkg/archive" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +const ( + dockerManifestFilename = "manifest.json" + ociLayoutVersion = "1.0.0" + ociIndexFilename = "index.json" + ociLayoutFilename = "oci-layout" + annotationRefName = "org.opencontainers.image.ref.name" + annotationImageName = "io.containerd.image.name" +) + +// used by import/export to match docker tar expected format +type dockerTarManifest struct { + Config string + RepoTags []string + Layers []string + Parent digest.Digest `json:",omitempty"` + LayerSources map[digest.Digest]descriptor.Descriptor `json:",omitempty"` +} + +type ( + tarFileHandler func(header *tar.Header, trd *tarReadData) error + tarReadData struct { + tr *tar.Reader + name string + handleAdded bool + handlers map[string]tarFileHandler + links map[string][]string + processed map[string]bool + finish []func() error + // data processed from various handlers + manifests map[digest.Digest]manifest.Manifest + ociIndex v1.Index + ociManifest manifest.Manifest + dockerManifestFound bool + dockerManifestList []dockerTarManifest + dockerManifest schema2.Manifest + } +) + +type tarWriteData struct { + tw *tar.Writer + dirs map[string]bool + files map[string]bool + // uid, gid int + mode int64 + timestamp time.Time +} + +type imageOpt struct { + callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64) + checkBaseDigest string + checkBaseRef string + checkSkipConfig bool + child bool + exportCompress bool + exportRef ref.Ref + fastCheck bool + forceRecursive bool + importName string + includeExternal bool + digestTags bool + platform string + platforms []string + referrerConfs []scheme.ReferrerConfig + referrerSrc ref.Ref + referrerTgt ref.Ref + tagList []string + mu sync.Mutex + seen map[string]*imageSeen + finalFn []func(context.Context) error + blobReaderHook func(*blob.BReader) (*blob.BReader, error) +} + +type imageSeen struct { + done chan struct{} + err error +} + +// ImageOpts define options for the Image* commands. +type ImageOpts func(*imageOpt) + +// ImageWithBlobReaderHook calls the given function on every blob copy in [RegClient.ImageCopy]. +// The hook receives a [blob.BReader] from getting the blob from the source. +// The returned [blob.BReader] will be used for pushing the blob to the target. +// If the hook returns an error on any blob, the image copy may fail. +func ImageWithBlobReaderHook(fn func(*blob.BReader) (*blob.BReader, error)) ImageOpts { + return func(opts *imageOpt) { + opts.blobReaderHook = fn + } +} + +// ImageWithCallback provides progress data to a callback function. +func ImageWithCallback(callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64)) ImageOpts { + return func(opts *imageOpt) { + opts.callback = callback + } +} + +// ImageWithCheckBaseDigest provides a base digest to compare in ImageCheckBase. +func ImageWithCheckBaseDigest(d string) ImageOpts { + return func(opts *imageOpt) { + opts.checkBaseDigest = d + } +} + +// ImageWithCheckBaseRef provides a base reference to compare in ImageCheckBase. +func ImageWithCheckBaseRef(r string) ImageOpts { + return func(opts *imageOpt) { + opts.checkBaseRef = r + } +} + +// ImageWithCheckSkipConfig skips the configuration check in ImageCheckBase. +func ImageWithCheckSkipConfig() ImageOpts { + return func(opts *imageOpt) { + opts.checkSkipConfig = true + } +} + +// ImageWithChild attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. +func ImageWithChild() ImageOpts { + return func(opts *imageOpt) { + opts.child = true + } +} + +// ImageWithExportCompress adds gzip compression to tar export output in ImageExport. +func ImageWithExportCompress() ImageOpts { + return func(opts *imageOpt) { + opts.exportCompress = true + } +} + +// ImageWithExportRef overrides the image name embedded in the export file in ImageExport. +func ImageWithExportRef(r ref.Ref) ImageOpts { + return func(opts *imageOpt) { + opts.exportRef = r + } +} + +// ImageWithFastCheck skips check for referrers when manifest has already been copied in ImageCopy. +func ImageWithFastCheck() ImageOpts { + return func(opts *imageOpt) { + opts.fastCheck = true + } +} + +// ImageWithForceRecursive attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. +func ImageWithForceRecursive() ImageOpts { + return func(opts *imageOpt) { + opts.forceRecursive = true + } +} + +// ImageWithImportName selects the name of the image to import when multiple images are included in ImageImport. +func ImageWithImportName(name string) ImageOpts { + return func(opts *imageOpt) { + opts.importName = name + } +} + +// ImageWithIncludeExternal attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. +func ImageWithIncludeExternal() ImageOpts { + return func(opts *imageOpt) { + opts.includeExternal = true + } +} + +// ImageWithDigestTags looks for "sha-.*" tags in the repo to copy with any manifest in ImageCopy. +// These are used by some artifact systems like sigstore/cosign. +func ImageWithDigestTags() ImageOpts { + return func(opts *imageOpt) { + opts.digestTags = true + } +} + +// ImageWithPlatform requests specific platforms from a manifest list in ImageCheckBase. +func ImageWithPlatform(p string) ImageOpts { + return func(opts *imageOpt) { + opts.platform = p + } +} + +// ImageWithPlatforms only copies specific platforms from a manifest list in ImageCopy. +// This will result in a failure on many registries that validate manifests. +// Use the empty string to indicate images without a platform definition should be copied. +func ImageWithPlatforms(p []string) ImageOpts { + return func(opts *imageOpt) { + opts.platforms = p + } +} + +// ImageWithReferrers recursively recursively includes referrer images in ImageCopy. +func ImageWithReferrers(rOpts ...scheme.ReferrerOpts) ImageOpts { + return func(opts *imageOpt) { + if opts.referrerConfs == nil { + opts.referrerConfs = []scheme.ReferrerConfig{} + } + rConf := scheme.ReferrerConfig{} + for _, rOpt := range rOpts { + rOpt(&rConf) + } + opts.referrerConfs = append(opts.referrerConfs, rConf) + } +} + +// ImageWithReferrerSrc specifies an alternate repository to pull referrers from. +func ImageWithReferrerSrc(src ref.Ref) ImageOpts { + return func(opts *imageOpt) { + opts.referrerSrc = src + } +} + +// ImageWithReferrerTgt specifies an alternate repository to pull referrers from. +func ImageWithReferrerTgt(tgt ref.Ref) ImageOpts { + return func(opts *imageOpt) { + opts.referrerTgt = tgt + } +} + +// ImageCheckBase returns nil if the base image is unchanged. +// A base image mismatch returns an error that wraps errs.ErrMismatch. +func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...ImageOpts) error { + var opt imageOpt + for _, optFn := range opts { + optFn(&opt) + } + var m manifest.Manifest + var err error + + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // if the base name is not provided, check image for base annotations + if opt.checkBaseRef == "" { + m, err = rc.ManifestGet(ctx, r) + if err != nil { + return err + } + ma, ok := m.(manifest.Annotator) + if !ok { + return fmt.Errorf("image does not support annotations, base image must be provided%.0w", errs.ErrMissingAnnotation) + } + annot, err := ma.GetAnnotations() + if err != nil { + return err + } + if baseName, ok := annot[types.AnnotationBaseImageName]; ok { + opt.checkBaseRef = baseName + } else { + return fmt.Errorf("image does not have a base annotation, base image must be provided%.0w", errs.ErrMissingAnnotation) + } + if baseDig, ok := annot[types.AnnotationBaseImageDigest]; ok { + opt.checkBaseDigest = baseDig + } + } + baseR, err := ref.New(opt.checkBaseRef) + if err != nil { + return err + } + defer rc.Close(ctx, baseR) + + // if the digest is available, check if that matches the base name + if opt.checkBaseDigest != "" { + baseMH, err := rc.ManifestHead(ctx, baseR, WithManifestRequireDigest()) + if err != nil { + return err + } + expectDig, err := digest.Parse(opt.checkBaseDigest) + if err != nil { + return err + } + if baseMH.GetDescriptor().Digest == expectDig { + rc.slog.Debug("base image digest matches", + slog.String("name", baseR.CommonName()), + slog.String("digest", baseMH.GetDescriptor().Digest.String())) + return nil + } else { + rc.slog.Debug("base image digest changed", + slog.String("name", baseR.CommonName()), + slog.String("digest", baseMH.GetDescriptor().Digest.String()), + slog.String("expected", expectDig.String())) + return fmt.Errorf("base digest changed, %s, expected %s, received %s%.0w", + baseR.CommonName(), expectDig.String(), baseMH.GetDescriptor().Digest.String(), errs.ErrMismatch) + } + } + + // if the digest is not available, compare layers of each manifest + if m == nil { + m, err = rc.ManifestGet(ctx, r) + if err != nil { + return err + } + } + if m.IsList() && opt.platform != "" { + p, err := platform.Parse(opt.platform) + if err != nil { + return err + } + d, err := manifest.GetPlatformDesc(m, &p) + if err != nil { + return err + } + rp := r.AddDigest(d.Digest.String()) + m, err = rc.ManifestGet(ctx, rp) + if err != nil { + return err + } + } + if m.IsList() { + // loop through each platform + ml, ok := m.(manifest.Indexer) + if !ok { + return fmt.Errorf("manifest list is not an Indexer") + } + dl, err := ml.GetManifestList() + if err != nil { + return err + } + for _, d := range dl { + rp := r.AddDigest(d.Digest.String()) + optP := append(opts, ImageWithPlatform(d.Platform.String())) + err = rc.ImageCheckBase(ctx, rp, optP...) + if err != nil { + return fmt.Errorf("platform %s mismatch: %w", d.Platform.String(), err) + } + } + return nil + } + img, ok := m.(manifest.Imager) + if !ok { + return fmt.Errorf("manifest must be an image") + } + layers, err := img.GetLayers() + if err != nil { + return err + } + baseM, err := rc.ManifestGet(ctx, baseR) + if err != nil { + return err + } + if baseM.IsList() && opt.platform != "" { + p, err := platform.Parse(opt.platform) + if err != nil { + return err + } + d, err := manifest.GetPlatformDesc(baseM, &p) + if err != nil { + return err + } + baseM, err = rc.ManifestGet(ctx, baseR, WithManifestDesc(*d)) + if err != nil { + return err + } + } + baseImg, ok := baseM.(manifest.Imager) + if !ok { + return fmt.Errorf("base image manifest must be an image") + } + baseLayers, err := baseImg.GetLayers() + if err != nil { + return err + } + if len(baseLayers) <= 0 { + return fmt.Errorf("base image has no layers") + } + for i := range baseLayers { + if i >= len(layers) { + return fmt.Errorf("image has fewer layers than base image") + } + if !layers[i].Same(baseLayers[i]) { + rc.slog.Debug("image layer changed", + slog.Int("layer", i), + slog.String("expected", layers[i].Digest.String()), + slog.String("digest", baseLayers[i].Digest.String())) + return fmt.Errorf("base layer changed, %s[%d], expected %s, received %s%.0w", + baseR.CommonName(), i, layers[i].Digest.String(), baseLayers[i].Digest.String(), errs.ErrMismatch) + } + } + + if opt.checkSkipConfig { + return nil + } + + // if the layers match, compare the config history + confDesc, err := img.GetConfig() + if err != nil { + return err + } + conf, err := rc.BlobGetOCIConfig(ctx, r, confDesc) + if err != nil { + return err + } + confOCI := conf.GetConfig() + baseConfDesc, err := baseImg.GetConfig() + if err != nil { + return err + } + baseConf, err := rc.BlobGetOCIConfig(ctx, baseR, baseConfDesc) + if err != nil { + return err + } + baseConfOCI := baseConf.GetConfig() + for i := range baseConfOCI.History { + if i >= len(confOCI.History) { + return fmt.Errorf("image has fewer history entries than base image") + } + if baseConfOCI.History[i].Author != confOCI.History[i].Author || + baseConfOCI.History[i].Comment != confOCI.History[i].Comment || + !baseConfOCI.History[i].Created.Equal(*confOCI.History[i].Created) || + baseConfOCI.History[i].CreatedBy != confOCI.History[i].CreatedBy || + baseConfOCI.History[i].EmptyLayer != confOCI.History[i].EmptyLayer { + rc.slog.Debug("image history changed", + slog.Int("index", i), + slog.Any("expected", confOCI.History[i]), + slog.Any("history", baseConfOCI.History[i])) + return fmt.Errorf("base history changed, %s[%d], expected %v, received %v%.0w", + baseR.CommonName(), i, confOCI.History[i], baseConfOCI.History[i], errs.ErrMismatch) + } + } + + rc.slog.Debug("base image layers and history matches", + slog.String("base", baseR.CommonName())) + return nil +} + +// ImageConfig returns the OCI config of a given image. +// Use [ImageWithPlatform] to select a platform from an Index or Manifest List. +func (rc *RegClient) ImageConfig(ctx context.Context, r ref.Ref, opts ...ImageOpts) (*blob.BOCIConfig, error) { + opt := imageOpt{ + platform: "local", + } + for _, optFn := range opts { + optFn(&opt) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + p, err := platform.Parse(opt.platform) + if err != nil { + return nil, fmt.Errorf("failed to parse platform %s: %w", opt.platform, err) + } + m, err := rc.ManifestGet(ctx, r, WithManifestPlatform(p)) + if err != nil { + return nil, fmt.Errorf("failed to get manifest: %w", err) + } + for m.IsList() { + mi, ok := m.(manifest.Indexer) + if !ok { + return nil, fmt.Errorf("unsupported manifest type: %s", m.GetDescriptor().MediaType) + } + ml, err := mi.GetManifestList() + if err != nil { + return nil, fmt.Errorf("failed to get manifest list: %w", err) + } + d, err := descriptor.DescriptorListSearch(ml, descriptor.MatchOpt{Platform: &p}) + if err != nil { + return nil, fmt.Errorf("failed to find platform in manifest list: %w", err) + } + m, err = rc.ManifestGet(ctx, r, WithManifestDesc(d)) + if err != nil { + return nil, fmt.Errorf("failed to get manifest: %w", err) + } + } + mi, ok := m.(manifest.Imager) + if !ok { + return nil, fmt.Errorf("unsupported manifest type: %s", m.GetDescriptor().MediaType) + } + d, err := mi.GetConfig() + if err != nil { + return nil, fmt.Errorf("failed to get image config: %w", err) + } + if d.MediaType != mediatype.OCI1ImageConfig && d.MediaType != mediatype.Docker2ImageConfig { + return nil, fmt.Errorf("unsupported config media type %s: %w", d.MediaType, errs.ErrUnsupportedMediaType) + } + return rc.BlobGetOCIConfig(ctx, r, d) +} + +// ImageCopy copies an image. +// This will retag an image in the same repository, only pushing and pulling the top level manifest. +// On the same registry, it will attempt to use cross-repository blob mounts to avoid pulling blobs. +// Blobs are only pulled when they don't exist on the target and a blob mount fails. +// Referrers are optionally copied recursively. +func (rc *RegClient) ImageCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, opts ...ImageOpts) error { + opt := imageOpt{ + seen: map[string]*imageSeen{}, + finalFn: []func(context.Context) error{}, + } + for _, optFn := range opts { + optFn(&opt) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // block GC from running (in OCIDir) during the copy + schemeTgtAPI, err := rc.schemeGet(refTgt.Scheme) + if err != nil { + return err + } + if tgtGCLocker, isGCLocker := schemeTgtAPI.(scheme.GCLocker); isGCLocker { + tgtGCLocker.GCLock(refTgt) + defer tgtGCLocker.GCUnlock(refTgt) + } + // run the copy of manifests and blobs recursively + err = rc.imageCopyOpt(ctx, refSrc, refTgt, descriptor.Descriptor{}, opt.child, []digest.Digest{}, &opt) + if err != nil { + return err + } + // run any final functions, digest-tags and referrers that detected loops are retried here + for _, fn := range opt.finalFn { + err := fn(ctx) + if err != nil { + return err + } + } + return nil +} + +// imageCopyOpt is a thread safe copy of a manifest and nested content. +func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, child bool, parents []digest.Digest, opt *imageOpt) (err error) { + var mSrc, mTgt manifest.Manifest + var sDig digest.Digest + refTgtRepo := refTgt.SetTag("").CommonName() + seenCB := func(error) {} + defer func() { + if seenCB != nil { + seenCB(err) + } + }() + // if digest is provided and we are already copying it, wait + if d.Digest != "" { + sDig = d.Digest + } else if refSrc.Digest != "" { + sDig = digest.Digest(refSrc.Digest) + } + if sDig != "" { + if seenCB, err = imageSeenOrWait(ctx, opt, refTgtRepo, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + // check target with head request + mTgt, err = rc.ManifestHead(ctx, refTgt, WithManifestRequireDigest()) + var urlError *url.Error + if err != nil && errors.As(err, &urlError) { + return fmt.Errorf("failed to access target registry: %w", err) + } + // for non-recursive copies, compare to source digest + if err == nil && (opt.fastCheck || (!opt.forceRecursive && opt.referrerConfs == nil && !opt.digestTags)) { + if sDig == "" { + mSrc, err = rc.ManifestHead(ctx, refSrc, WithManifestRequireDigest()) + if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgtRepo, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + if sDig == mTgt.GetDescriptor().Digest { + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackSkipped, mTgt.GetDescriptor().Size, mTgt.GetDescriptor().Size) + } + return nil + } + } + // when copying/updating digest tags or referrers, only the source digest is needed for an image + if mTgt != nil && mSrc == nil && !opt.forceRecursive && sDig == "" { + mSrc, err = rc.ManifestHead(ctx, refSrc, WithManifestRequireDigest()) + if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgtRepo, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + // get the source manifest when a copy is needed or recursion into the content is needed + if sDig == "" || mTgt == nil || sDig != mTgt.GetDescriptor().Digest || opt.forceRecursive || mTgt.IsList() { + mSrc, err = rc.ManifestGet(ctx, refSrc, WithManifestDesc(d)) + if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + if sDig == "" { + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgtRepo, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + } + // setup vars for a copy + mOpts := []ManifestOpts{} + if child { + mOpts = append(mOpts, WithManifestChild()) + } + bOpt := []BlobOpts{} + if opt.callback != nil { + bOpt = append(bOpt, BlobWithCallback(opt.callback)) + } + if opt.blobReaderHook != nil { + bOpt = append(bOpt, BlobWithReaderHook(opt.blobReaderHook)) + } + waitCh := make(chan error) + waitCount := 0 + ctx, cancel := context.WithCancel(ctx) + defer cancel() + parentsNew := make([]digest.Digest, len(parents)+1) + copy(parentsNew, parents) + parentsNew[len(parentsNew)-1] = sDig + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackStarted, 0, d.Size) + } + // process entries in an index + if mSrcIndex, ok := mSrc.(manifest.Indexer); ok && mSrc.IsSet() && !ref.EqualRepository(refSrc, refTgt) { + // manifest lists need to recursively copy nested images by digest + dList, err := mSrcIndex.GetManifestList() + if err != nil { + return err + } + for _, dEntry := range dList { + // skip copy of platforms not specifically included + if len(opt.platforms) > 0 { + match, err := imagePlatformInList(dEntry.Platform, opt.platforms) + if err != nil { + return err + } + if !match { + rc.slog.Debug("Platform excluded from copy", + slog.Any("platform", dEntry.Platform)) + continue + } + } + waitCount++ + go func() { + var err error + rc.slog.Debug("Copy platform", + slog.Any("platform", dEntry.Platform), + slog.String("digest", dEntry.Digest.String())) + entrySrc := refSrc.SetDigest(dEntry.Digest.String()) + entryTgt := refTgt.SetDigest(dEntry.Digest.String()) + switch dEntry.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + // known manifest media type + err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, dEntry, true, parentsNew, opt) + case mediatype.Docker2ImageConfig, mediatype.OCI1ImageConfig, + mediatype.Docker2Layer, mediatype.Docker2LayerGzip, mediatype.Docker2LayerZstd, + mediatype.OCI1Layer, mediatype.OCI1LayerGzip, mediatype.OCI1LayerZstd, + mediatype.BuildkitCacheConfig: + // known blob media type + err = rc.imageCopyBlob(ctx, entrySrc, entryTgt, dEntry, opt, bOpt...) + default: + // unknown media type, first try an image copy + err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, dEntry, true, parentsNew, opt) + if err != nil { + // fall back to trying to copy a blob + err = rc.imageCopyBlob(ctx, entrySrc, entryTgt, dEntry, opt, bOpt...) + } + } + waitCh <- err + }() + } + } + + // If source is image, copy blobs + if mSrcImg, ok := mSrc.(manifest.Imager); ok && mSrc.IsSet() && !ref.EqualRepository(refSrc, refTgt) { + // copy the config + cd, err := mSrcImg.GetConfig() + if err != nil { + // docker schema v1 does not have a config object, ignore if it's missing + if !errors.Is(err, errs.ErrUnsupportedMediaType) { + rc.slog.Warn("Failed to get config digest from manifest", + slog.String("ref", refSrc.Reference), + slog.String("err", err.Error())) + return fmt.Errorf("failed to get config digest for %s: %w", refSrc.CommonName(), err) + } + } else { + waitCount++ + go func() { + rc.slog.Info("Copy config", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("digest", cd.Digest.String())) + err := rc.imageCopyBlob(ctx, refSrc, refTgt, cd, opt, bOpt...) + if err != nil && !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to copy config", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("digest", cd.Digest.String()), + slog.String("err", err.Error())) + } + waitCh <- err + }() + } + + // copy filesystem layers + l, err := mSrcImg.GetLayers() + if err != nil { + return err + } + for _, layerSrc := range l { + if len(layerSrc.URLs) > 0 && !opt.includeExternal { + // skip blobs where the URLs are defined, these aren't hosted and won't be pulled from the source + rc.slog.Debug("Skipping external layer", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("layer", layerSrc.Digest.String()), + slog.Any("external-urls", layerSrc.URLs)) + continue + } + waitCount++ + go func() { + rc.slog.Info("Copy layer", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("layer", layerSrc.Digest.String())) + err := rc.imageCopyBlob(ctx, refSrc, refTgt, layerSrc, opt, bOpt...) + if err != nil && !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to copy layer", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("layer", layerSrc.Digest.String()), + slog.String("err", err.Error())) + } + waitCh <- err + }() + } + } + + // check for any errors and abort early if found + err = nil + done := false + for !done && waitCount > 0 { + if err == nil { + select { + case err = <-waitCh: + if err != nil { + cancel() + } + default: + done = true // happy path + } + } else { + if errors.Is(err, context.Canceled) { + // try to find a better error message than context canceled + err = <-waitCh + } else { + <-waitCh + } + } + if !done { + waitCount-- + } + } + if err != nil { + rc.slog.Debug("child manifest copy failed", + slog.String("err", err.Error()), + slog.String("sDig", sDig.String())) + return err + } + + // copy referrers + referrerTags := []string{} + if opt.referrerConfs != nil { + referrerOpts := []scheme.ReferrerOpts{} + rSubject := refSrc + referrerSrc := refSrc + referrerTgt := refTgt + if opt.referrerSrc.IsSet() { + referrerOpts = append(referrerOpts, scheme.WithReferrerSource(opt.referrerSrc)) + referrerSrc = opt.referrerSrc + } + if opt.referrerTgt.IsSet() { + referrerTgt = opt.referrerTgt + } + if sDig != "" { + rSubject = rSubject.SetDigest(sDig.String()) + } + rl, err := rc.ReferrerList(ctx, rSubject, referrerOpts...) + if err != nil { + return err + } + if !rl.Source.IsSet() || ref.EqualRepository(refSrc, rl.Source) { + referrerTags = append(referrerTags, rl.Tags...) + } + descList := []descriptor.Descriptor{} + if len(opt.referrerConfs) == 0 { + descList = rl.Descriptors + } else { + for _, rConf := range opt.referrerConfs { + rlFilter := scheme.ReferrerFilter(rConf, rl) + descList = append(descList, rlFilter.Descriptors...) + } + } + for _, rDesc := range descList { + opt.mu.Lock() + seen := opt.seen[":"+rDesc.Digest.String()] + opt.mu.Unlock() + if seen != nil { + continue // skip referrers that have been seen + } + referrerSrc := referrerSrc.SetDigest(rDesc.Digest.String()) + referrerTgt := referrerTgt.SetDigest(rDesc.Digest.String()) + waitCount++ + go func() { + err := rc.imageCopyOpt(ctx, referrerSrc, referrerTgt, rDesc, true, parentsNew, opt) + if errors.Is(err, errs.ErrLoopDetected) { + // if a loop is detected, push the referrers copy to the end + opt.mu.Lock() + opt.finalFn = append(opt.finalFn, func(ctx context.Context) error { + return rc.imageCopyOpt(ctx, referrerSrc, referrerTgt, rDesc, true, []digest.Digest{}, opt) + }) + opt.mu.Unlock() + waitCh <- nil + } else { + if err != nil && !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to copy referrer", + slog.String("digest", rDesc.Digest.String()), + slog.String("src", referrerSrc.CommonName()), + slog.String("tgt", referrerTgt.CommonName())) + } + waitCh <- err + } + }() + } + } + + // lookup digest tags to include artifacts with image + if opt.digestTags { + // load tag listing for digest tag copy + opt.mu.Lock() + if opt.tagList == nil { + tl, err := rc.TagList(ctx, refSrc) + if err != nil { + opt.mu.Unlock() + rc.slog.Warn("Failed to list tags for digest-tag copy", + slog.String("source", refSrc.Reference), + slog.String("err", err.Error())) + return err + } + tags, err := tl.GetTags() + if err != nil { + opt.mu.Unlock() + rc.slog.Warn("Failed to list tags for digest-tag copy", + slog.String("source", refSrc.Reference), + slog.String("err", err.Error())) + return err + } + if tags == nil { + tags = []string{} + } + opt.tagList = tags + } + opt.mu.Unlock() + prefix := fmt.Sprintf("%s-%s", sDig.Algorithm(), sDig.Encoded()) + for _, tag := range opt.tagList { + if strings.HasPrefix(tag, prefix) { + // skip referrers that were copied above + if slices.Contains(referrerTags, tag) { + continue + } + refTagSrc := refSrc.SetTag(tag) + refTagTgt := refTgt.SetTag(tag) + waitCount++ + go func() { + err := rc.imageCopyOpt(ctx, refTagSrc, refTagTgt, descriptor.Descriptor{}, false, parentsNew, opt) + if errors.Is(err, errs.ErrLoopDetected) { + // if a loop is detected, push the digest tag copy back to the end + opt.mu.Lock() + opt.finalFn = append(opt.finalFn, func(ctx context.Context) error { + return rc.imageCopyOpt(ctx, refTagSrc, refTagTgt, descriptor.Descriptor{}, false, []digest.Digest{}, opt) + }) + opt.mu.Unlock() + waitCh <- nil + } else { + if err != nil && !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to copy digest-tag", + slog.String("tag", tag), + slog.String("src", refTagSrc.CommonName()), + slog.String("tgt", refTagTgt.CommonName())) + } + waitCh <- err + } + }() + } + } + } + + // wait for background tasks to finish + err = nil + for waitCount > 0 { + if err == nil { + err = <-waitCh + if err != nil { + cancel() + } + } else { + if errors.Is(err, context.Canceled) { + // try to find a better error message than context canceled + err = <-waitCh + } else { + <-waitCh + } + } + waitCount-- + } + if err != nil { + return err + } + + // push manifest + if mTgt == nil || sDig != mTgt.GetDescriptor().Digest || opt.forceRecursive { + err = rc.ManifestPut(ctx, refTgt, mSrc, mOpts...) + if err != nil { + if !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to push manifest", + slog.String("target", refTgt.Reference), + slog.String("err", err.Error())) + } + return err + } + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackFinished, d.Size, d.Size) + } + } else { + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackSkipped, d.Size, d.Size) + } + } + if seenCB != nil { + seenCB(nil) + seenCB = nil + } + + return nil +} + +func (rc *RegClient) imageCopyBlob(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, opt *imageOpt, bOpt ...BlobOpts) error { + seenCB, err := imageSeenOrWait(ctx, opt, refTgt.SetTag("").CommonName(), "", d.Digest, []digest.Digest{}) + if seenCB == nil { + return err + } + err = rc.BlobCopy(ctx, refSrc, refTgt, d, bOpt...) + seenCB(err) + return err +} + +// imageSeenOrWait returns either a callback to report the error when the digest hasn't been seen before +// or it will wait for the previous copy to run and return the error from that copy +func imageSeenOrWait(ctx context.Context, opt *imageOpt, repo, tag string, dig digest.Digest, parents []digest.Digest) (func(error), error) { + var seenNew *imageSeen + key := repo + "/" + tag + ":" + dig.String() + opt.mu.Lock() + seen := opt.seen[key] + if seen == nil { + seenNew = &imageSeen{ + done: make(chan struct{}), + } + opt.seen[key] = seenNew + } + opt.mu.Unlock() + if seen != nil { + // quick check for the previous copy already done + select { + case <-seen.done: + return nil, seen.err + default: + } + // look for loops in parents + for _, p := range parents { + if key == repo+"/"+tag+":"+p.String() { + return nil, errs.ErrLoopDetected + } + } + // wait for copy to finish or context to cancel + done := ctx.Done() + select { + case <-seen.done: + return nil, seen.err + case <-done: + return nil, ctx.Err() + } + } else { + return func(err error) { + seenNew.err = err + close(seenNew.done) + // on failures, delete the history to allow a retry + if err != nil { + opt.mu.Lock() + delete(opt.seen, key) + opt.mu.Unlock() + } + }, nil + } +} + +// ImageExport exports an image to an output stream. +// The format is compatible with "docker load" if a single image is selected and not a manifest list. +// The ref must include a tag for exporting to docker (defaults to latest), and may also include a digest. +// The export is also formatted according to [OCI Layout] which supports multi-platform images. +// A tar file will be sent to outStream. +// +// Resulting filesystem: +// - oci-layout: created at top level, can be done at the start +// - index.json: created at top level, single descriptor with org.opencontainers.image.ref.name annotation pointing to the tag +// - manifest.json: created at top level, based on every layer added, only works for a single arch image +// - blobs/$algo/$hash: each content addressable object (manifest, config, or layer), created recursively +// +// [OCI Layout]: https://github.com/opencontainers/image-spec/blob/master/image-layout.md +func (rc *RegClient) ImageExport(ctx context.Context, r ref.Ref, outStream io.Writer, opts ...ImageOpts) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + var ociIndex v1.Index + + var opt imageOpt + for _, optFn := range opts { + optFn(&opt) + } + if opt.exportRef.IsZero() { + opt.exportRef = r + } + + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // create tar writer object + out := outStream + if opt.exportCompress { + gzOut := gzip.NewWriter(out) + defer gzOut.Close() + out = gzOut + } + tw := tar.NewWriter(out) + defer tw.Close() + twd := &tarWriteData{ + tw: tw, + dirs: map[string]bool{}, + files: map[string]bool{}, + mode: 0o644, + } + + // retrieve image manifest + m, err := rc.ManifestGet(ctx, r) + if err != nil { + rc.slog.Warn("Failed to get manifest", + slog.String("ref", r.CommonName()), + slog.String("err", err.Error())) + return err + } + + // build/write oci-layout + ociLayout := v1.ImageLayout{Version: ociLayoutVersion} + err = twd.tarWriteFileJSON(ociLayoutFilename, ociLayout) + if err != nil { + return err + } + + // create a manifest descriptor + mDesc := m.GetDescriptor() + if mDesc.Annotations == nil { + mDesc.Annotations = map[string]string{} + } + mDesc.Annotations[annotationImageName] = opt.exportRef.CommonName() + mDesc.Annotations[annotationRefName] = opt.exportRef.Tag + + // generate/write an OCI index + ociIndex.Versioned = v1.IndexSchemaVersion + ociIndex.Manifests = []descriptor.Descriptor{mDesc} // initialize with the descriptor to the manifest list + err = twd.tarWriteFileJSON(ociIndexFilename, ociIndex) + if err != nil { + return err + } + + // append to docker manifest with tag, config filename, each layer filename, and layer descriptors + if mi, ok := m.(manifest.Imager); ok { + conf, err := mi.GetConfig() + if err != nil { + return err + } + if err = conf.Digest.Validate(); err != nil { + return err + } + refTag := opt.exportRef.ToReg() + refTag = refTag.SetTag(cmp.Or(refTag.Tag, "latest")) + dockerManifest := dockerTarManifest{ + RepoTags: []string{refTag.CommonName()}, + Config: tarOCILayoutDescPath(conf), + Layers: []string{}, + LayerSources: map[digest.Digest]descriptor.Descriptor{}, + } + dl, err := mi.GetLayers() + if err != nil { + return err + } + for _, d := range dl { + if err = d.Digest.Validate(); err != nil { + return err + } + dockerManifest.Layers = append(dockerManifest.Layers, tarOCILayoutDescPath(d)) + dockerManifest.LayerSources[d.Digest] = d + } + + // marshal manifest and write manifest.json + err = twd.tarWriteFileJSON(dockerManifestFilename, []dockerTarManifest{dockerManifest}) + if err != nil { + return err + } + } + + // recursively include manifests and nested blobs + err = rc.imageExportDescriptor(ctx, r, mDesc, twd) + if err != nil { + return err + } + + return nil +} + +// imageExportDescriptor pulls a manifest or blob, outputs to a tar file, and recursively processes any nested manifests or blobs +func (rc *RegClient) imageExportDescriptor(ctx context.Context, r ref.Ref, desc descriptor.Descriptor, twd *tarWriteData) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + tarFilename := tarOCILayoutDescPath(desc) + if twd.files[tarFilename] { + // blob has already been imported into tar, skip + return nil + } + switch desc.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, mediatype.Docker2Manifest, mediatype.OCI1Manifest: + // Handle single platform manifests + // retrieve manifest + m, err := rc.ManifestGet(ctx, r, WithManifestDesc(desc)) + if err != nil { + return err + } + mi, ok := m.(manifest.Imager) + if !ok { + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) + } + // write manifest body by digest + mBody, err := m.RawBody() + if err != nil { + return err + } + err = twd.tarWriteHeader(tarFilename, int64(len(mBody))) + if err != nil { + return err + } + _, err = twd.tw.Write(mBody) + if err != nil { + return err + } + + // add config + confD, err := mi.GetConfig() + // ignore unsupported media type errors + if err != nil && !errors.Is(err, errs.ErrUnsupportedMediaType) { + return err + } + if err == nil { + err = rc.imageExportDescriptor(ctx, r, confD, twd) + if err != nil { + return err + } + } + + // loop over layers + layerDL, err := mi.GetLayers() + // ignore unsupported media type errors + if err != nil && !errors.Is(err, errs.ErrUnsupportedMediaType) { + return err + } + if err == nil { + for _, layerD := range layerDL { + err = rc.imageExportDescriptor(ctx, r, layerD, twd) + if err != nil { + return err + } + } + } + + case mediatype.Docker2ManifestList, mediatype.OCI1ManifestList: + // handle OCI index and Docker manifest list + // retrieve manifest + m, err := rc.ManifestGet(ctx, r, WithManifestDesc(desc)) + if err != nil { + return err + } + mi, ok := m.(manifest.Indexer) + if !ok { + return fmt.Errorf("manifest doesn't support index methods%.0w", errs.ErrUnsupportedMediaType) + } + // write manifest body by digest + mBody, err := m.RawBody() + if err != nil { + return err + } + err = twd.tarWriteHeader(tarFilename, int64(len(mBody))) + if err != nil { + return err + } + _, err = twd.tw.Write(mBody) + if err != nil { + return err + } + // recurse over entries in the list/index + mdl, err := mi.GetManifestList() + if err != nil { + return err + } + for _, md := range mdl { + err = rc.imageExportDescriptor(ctx, r, md, twd) + if err != nil { + return err + } + } + + default: + // get blob + blobR, err := rc.BlobGet(ctx, r, desc) + if err != nil { + return err + } + defer blobR.Close() + // write blob by digest + err = twd.tarWriteHeader(tarFilename, int64(desc.Size)) + if err != nil { + return err + } + size, err := io.Copy(twd.tw, blobR) + if err != nil { + return fmt.Errorf("failed to export blob %s: %w", desc.Digest.String(), err) + } + if size != desc.Size { + return fmt.Errorf("blob size mismatch, descriptor %d, received %d", desc.Size, size) + } + } + + return nil +} + +// ImageImport pushes an image from a tar file (ImageExport) to a registry. +func (rc *RegClient) ImageImport(ctx context.Context, r ref.Ref, rs io.ReadSeeker, opts ...ImageOpts) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + var opt imageOpt + for _, optFn := range opts { + optFn(&opt) + } + + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + trd := &tarReadData{ + name: opt.importName, + handlers: map[string]tarFileHandler{}, + links: map[string][]string{}, + processed: map[string]bool{}, + finish: []func() error{}, + manifests: map[digest.Digest]manifest.Manifest{}, + } + + // add handler for oci-layout, index.json, and manifest.json + rc.imageImportOCIAddHandler(ctx, r, trd) + rc.imageImportDockerAddHandler(trd) + + // process tar file looking for oci-layout and index.json, load manifests/blobs on success + err := trd.tarReadAll(rs) + + if err != nil && errors.Is(err, errs.ErrNotFound) && trd.dockerManifestFound { + // import failed but manifest.json found, fall back to manifest.json processing + // add handlers for the docker manifest layers + rc.imageImportDockerAddLayerHandlers(ctx, r, trd) + // reprocess the tar looking for manifest.json files + err = trd.tarReadAll(rs) + if err != nil { + return fmt.Errorf("failed to import layers from docker tar: %w", err) + } + // push docker manifest + m, err := manifest.New(manifest.WithOrig(trd.dockerManifest)) + if err != nil { + return err + } + err = rc.ManifestPut(ctx, r, m) + if err != nil { + return err + } + } else if err != nil { + // unhandled error from tar read + return err + } else { + // successful load of OCI blobs, now push manifest and tag + err = rc.imageImportOCIPushManifests(ctx, r, trd) + if err != nil { + return err + } + } + return nil +} + +func (rc *RegClient) imageImportBlob(ctx context.Context, r ref.Ref, desc descriptor.Descriptor, trd *tarReadData) error { + // skip if blob already exists + _, err := rc.BlobHead(ctx, r, desc) + if err == nil { + return nil + } + // upload blob + _, err = rc.BlobPut(ctx, r, desc, trd.tr) + if err != nil { + return err + } + return nil +} + +// imageImportDockerAddHandler processes tar files generated by docker. +func (rc *RegClient) imageImportDockerAddHandler(trd *tarReadData) { + trd.handlers[dockerManifestFilename] = func(header *tar.Header, trd *tarReadData) error { + err := trd.tarReadFileJSON(&trd.dockerManifestList) + if err != nil { + return err + } + trd.dockerManifestFound = true + return nil + } +} + +// imageImportDockerAddLayerHandlers imports the docker layers when OCI import fails and docker manifest found. +func (rc *RegClient) imageImportDockerAddLayerHandlers(ctx context.Context, r ref.Ref, trd *tarReadData) { + // remove handlers for OCI + delete(trd.handlers, ociLayoutFilename) + delete(trd.handlers, ociIndexFilename) + + index := 0 + if trd.name != "" { + found := false + tags := []string{} + for i, entry := range trd.dockerManifestList { + tags = append(tags, entry.RepoTags...) + if slices.Contains(entry.RepoTags, trd.name) { + index = i + found = true + break + } + } + if !found { + rc.slog.Warn("Could not find requested name", + slog.Any("tags", tags), + slog.String("name", trd.name)) + return + } + } + + // make a docker v2 manifest from first json array entry (can only tag one image) + trd.dockerManifest.SchemaVersion = 2 + trd.dockerManifest.MediaType = mediatype.Docker2Manifest + trd.dockerManifest.Layers = make([]descriptor.Descriptor, len(trd.dockerManifestList[index].Layers)) + + // add handler for config + trd.handlers[filepath.ToSlash(filepath.Clean(trd.dockerManifestList[index].Config))] = func(header *tar.Header, trd *tarReadData) error { + // upload blob, digest is unknown + d, err := rc.BlobPut(ctx, r, descriptor.Descriptor{Size: header.Size}, trd.tr) + if err != nil { + return err + } + // save the resulting descriptor to the manifest + if od, ok := trd.dockerManifestList[index].LayerSources[d.Digest]; ok { + trd.dockerManifest.Config = od + } else { + d.MediaType = mediatype.Docker2ImageConfig + trd.dockerManifest.Config = d + } + return nil + } + // add handlers for each layer + for i, layerFile := range trd.dockerManifestList[index].Layers { + func(i int) { + trd.handlers[filepath.ToSlash(filepath.Clean(layerFile))] = func(header *tar.Header, trd *tarReadData) error { + // ensure blob is compressed + rdrUC, err := archive.Decompress(trd.tr) + if err != nil { + return err + } + gzipR, err := archive.Compress(rdrUC, archive.CompressGzip) + if err != nil { + return err + } + defer gzipR.Close() + // upload blob, digest and size is unknown + d, err := rc.BlobPut(ctx, r, descriptor.Descriptor{}, gzipR) + if err != nil { + return err + } + // save the resulting descriptor in the appropriate layer + if od, ok := trd.dockerManifestList[index].LayerSources[d.Digest]; ok { + trd.dockerManifest.Layers[i] = od + } else { + d.MediaType = mediatype.Docker2LayerGzip + trd.dockerManifest.Layers[i] = d + } + return nil + } + }(i) + } + trd.handleAdded = true +} + +// imageImportOCIAddHandler adds handlers for oci-layout and index.json found in OCI layout tar files. +func (rc *RegClient) imageImportOCIAddHandler(ctx context.Context, r ref.Ref, trd *tarReadData) { + // add handler for oci-layout, index.json, and manifest.json + var err error + var foundLayout, foundIndex bool + + // common handler code when both oci-layout and index.json have been processed + ociHandler := func(trd *tarReadData) error { + // no need to process docker manifest.json when OCI layout is available + delete(trd.handlers, dockerManifestFilename) + // create a manifest from the index + trd.ociManifest, err = manifest.New(manifest.WithOrig(trd.ociIndex)) + if err != nil { + return err + } + // start recursively processing manifests starting with the index + // there's no need to push the index.json by digest, it will be pushed by tag if needed + err = rc.imageImportOCIHandleManifest(ctx, r, trd.ociManifest, trd, false, false) + if err != nil { + return err + } + return nil + } + trd.handlers[ociLayoutFilename] = func(header *tar.Header, trd *tarReadData) error { + var ociLayout v1.ImageLayout + err := trd.tarReadFileJSON(&ociLayout) + if err != nil { + return err + } + if ociLayout.Version != ociLayoutVersion { + // unknown version, ignore + rc.slog.Warn("Unsupported oci-layout version", + slog.String("version", ociLayout.Version)) + return nil + } + foundLayout = true + if foundIndex { + err = ociHandler(trd) + if err != nil { + return err + } + } + return nil + } + trd.handlers[ociIndexFilename] = func(header *tar.Header, trd *tarReadData) error { + err := trd.tarReadFileJSON(&trd.ociIndex) + if err != nil { + return err + } + foundIndex = true + if foundLayout { + err = ociHandler(trd) + if err != nil { + return err + } + } + return nil + } +} + +// imageImportOCIHandleManifest recursively processes index and manifest entries from an OCI layout tar. +func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, r ref.Ref, m manifest.Manifest, trd *tarReadData, push bool, child bool) error { + // cache the manifest to avoid needing to pull again later, this is used if index.json is a wrapper around some other manifest + trd.manifests[m.GetDescriptor().Digest] = m + + handleManifest := func(d descriptor.Descriptor, child bool) error { + if err := d.Digest.Validate(); err != nil { + return err + } + filename := tarOCILayoutDescPath(d) + if !trd.processed[filename] && trd.handlers[filename] == nil { + trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { + b, err := io.ReadAll(trd.tr) + if err != nil { + return err + } + switch d.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + // known manifest media types + md, err := manifest.New(manifest.WithDesc(d), manifest.WithRaw(b)) + if err != nil { + return err + } + return rc.imageImportOCIHandleManifest(ctx, r, md, trd, true, child) + case mediatype.Docker2ImageConfig, mediatype.OCI1ImageConfig, + mediatype.Docker2Layer, mediatype.Docker2LayerGzip, mediatype.Docker2LayerZstd, + mediatype.OCI1Layer, mediatype.OCI1LayerGzip, mediatype.OCI1LayerZstd, + mediatype.BuildkitCacheConfig: + // known blob media types + return rc.imageImportBlob(ctx, r, d, trd) + default: + // attempt manifest import, fall back to blob import + md, err := manifest.New(manifest.WithDesc(d), manifest.WithRaw(b)) + if err == nil { + return rc.imageImportOCIHandleManifest(ctx, r, md, trd, true, child) + } + return rc.imageImportBlob(ctx, r, d, trd) + } + } + } + return nil + } + + if !push { + mi, ok := m.(manifest.Indexer) + if !ok { + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) + } + // for root index, add handler for matching reference (or only reference) + dl, err := mi.GetManifestList() + if err != nil { + return err + } + // locate the digest in the index + var d descriptor.Descriptor + if len(dl) == 1 { + d = dl[0] + } else if r.Digest != "" { + d.Digest = digest.Digest(r.Digest) + } else if trd.name != "" { + for _, cur := range dl { + if cur.Annotations[annotationRefName] == trd.name { + d = cur + break + } + } + if d.Digest.String() == "" { + return fmt.Errorf("could not find requested tag in index.json, %s", trd.name) + } + } else { + if r.Tag == "" { + r.Tag = "latest" + } + // if more than one digest is in the index, use the first matching tag + for _, cur := range dl { + if cur.Annotations[annotationRefName] == r.Tag { + d = cur + break + } + } + if d.Digest.String() == "" { + return fmt.Errorf("could not find requested tag in index.json, %s", r.Tag) + } + } + err = handleManifest(d, false) + if err != nil { + return err + } + // add a finish step to tag the selected digest + trd.finish = append(trd.finish, func() error { + mRef, ok := trd.manifests[d.Digest] + if !ok { + return fmt.Errorf("could not find manifest to tag, ref: %s, digest: %s", r.CommonName(), d.Digest) + } + return rc.ManifestPut(ctx, r, mRef) + }) + } else if m.IsList() { + // for index/manifest lists, add handlers for each embedded manifest + mi, ok := m.(manifest.Indexer) + if !ok { + return fmt.Errorf("manifest doesn't support index methods%.0w", errs.ErrUnsupportedMediaType) + } + dl, err := mi.GetManifestList() + if err != nil { + return err + } + for _, d := range dl { + err = handleManifest(d, true) + if err != nil { + return err + } + } + } else { + // else if a single image/manifest + mi, ok := m.(manifest.Imager) + if !ok { + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) + } + // add handler for the config descriptor if it's defined + cd, err := mi.GetConfig() + if err == nil { + if err = cd.Digest.Validate(); err != nil { + return err + } + filename := tarOCILayoutDescPath(cd) + if !trd.processed[filename] && trd.handlers[filename] == nil { + func(cd descriptor.Descriptor) { + trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { + return rc.imageImportBlob(ctx, r, cd, trd) + } + }(cd) + } + } + // add handlers for each layer + layers, err := mi.GetLayers() + if err != nil { + return err + } + for _, d := range layers { + if err = d.Digest.Validate(); err != nil { + return err + } + filename := tarOCILayoutDescPath(d) + if !trd.processed[filename] && trd.handlers[filename] == nil { + func(d descriptor.Descriptor) { + trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { + return rc.imageImportBlob(ctx, r, d, trd) + } + }(d) + } + } + } + // add a finish func to push the manifest, this gets skipped for the index.json + if push { + trd.finish = append(trd.finish, func() error { + mRef := r.SetDigest(m.GetDescriptor().Digest.String()) + _, err := rc.ManifestHead(ctx, mRef) + if err == nil { + return nil + } + opts := []ManifestOpts{} + if child { + opts = append(opts, WithManifestChild()) + } + return rc.ManifestPut(ctx, mRef, m, opts...) + }) + } + trd.handleAdded = true + return nil +} + +// imageImportOCIPushManifests uploads manifests after OCI blobs were successfully loaded. +func (rc *RegClient) imageImportOCIPushManifests(_ context.Context, _ ref.Ref, trd *tarReadData) error { + // run finish handlers in reverse order to upload nested manifests + for i := len(trd.finish) - 1; i >= 0; i-- { + err := trd.finish[i]() + if err != nil { + return err + } + } + return nil +} + +func imagePlatformInList(target *platform.Platform, list []string) (bool, error) { + // special case for an unset platform + if target == nil || target.OS == "" { + if slices.Contains(list, "") { + return true, nil + } + return false, nil + } + for _, entry := range list { + if entry == "" { + continue + } + plat, err := platform.Parse(entry) + if err != nil { + return false, err + } + if platform.Match(*target, plat) { + return true, nil + } + } + return false, nil +} + +// tarReadAll processes the tar file in a loop looking for matching filenames in the list of handlers. +// Handlers for filenames are added at the top level, and by manifest imports. +func (trd *tarReadData) tarReadAll(rs io.ReadSeeker) error { + // return immediately if nothing to do + if len(trd.handlers) == 0 { + return nil + } + for { + // reset back to beginning of tar file + _, err := rs.Seek(0, 0) + if err != nil { + return err + } + dr, err := archive.Decompress(rs) + if err != nil { + return err + } + trd.tr = tar.NewReader(dr) + trd.handleAdded = false + // loop over each entry of the tar file + for { + header, err := trd.tr.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + name := filepath.ToSlash(filepath.Clean(header.Name)) + // track symlinks + if header.Typeflag == tar.TypeSymlink || header.Typeflag == tar.TypeLink { + // normalize target relative to root of tar + target := header.Linkname + if !filepath.IsAbs(target) { + target, err = filepath.Rel(filepath.Dir(name), target) + if err != nil { + return err + } + } + target = filepath.ToSlash(filepath.Clean("/" + target)[1:]) + // track and set handleAdded if an existing handler points to the target + if trd.linkAdd(name, target) && !trd.handleAdded { + list, err := trd.linkList(target) + if err != nil { + return err + } + for _, src := range append(list, name) { + if trd.handlers[src] != nil { + trd.handleAdded = true + } + } + } + } else { + // loop through filename and symlinks to file in search of handlers + list, err := trd.linkList(name) + if err != nil { + return err + } + list = append(list, name) + trdUsed := false + for _, entry := range list { + if trd.handlers[entry] != nil { + // trd cannot be reused, force the loop to run again + if trdUsed { + trd.handleAdded = true + break + } + trdUsed = true + // run handler + err = trd.handlers[entry](header, trd) + if err != nil { + return err + } + delete(trd.handlers, entry) + trd.processed[entry] = true + // return if last handler processed + if len(trd.handlers) == 0 { + return nil + } + } + } + } + } + // if entire file read without adding a new handler, fail + if !trd.handleAdded { + return fmt.Errorf("unable to read all files from tar: %w", errs.ErrNotFound) + } + } +} + +func (trd *tarReadData) linkAdd(src, tgt string) bool { + if slices.Contains(trd.links[tgt], src) { + return false + } + trd.links[tgt] = append(trd.links[tgt], src) + return true +} + +func (trd *tarReadData) linkList(tgt string) ([]string, error) { + list := trd.links[tgt] + for _, entry := range list { + if entry == tgt { + return nil, fmt.Errorf("symlink loop encountered for %s", tgt) + } + list = append(list, trd.links[entry]...) + } + return list, nil +} + +// tarReadFileJSON reads the current tar entry and unmarshals json into provided interface. +func (trd *tarReadData) tarReadFileJSON(data any) error { + b, err := io.ReadAll(trd.tr) + if err != nil { + return err + } + err = json.Unmarshal(b, data) + if err != nil { + return err + } + return nil +} + +var errTarFileExists = errors.New("tar file already exists") + +func (td *tarWriteData) tarWriteHeader(filename string, size int64) error { + dirName := filepath.ToSlash(filepath.Dir(filename)) + if !td.dirs[dirName] && dirName != "." { + dirSplit := strings.Split(dirName, "/") + for i := range dirSplit { + dirJoin := strings.Join(dirSplit[:i+1], "/") + if !td.dirs[dirJoin] && dirJoin != "" { + header := tar.Header{ + Format: tar.FormatPAX, + Typeflag: tar.TypeDir, + Name: dirJoin + "/", + Size: 0, + Mode: td.mode | 0o511, + ModTime: td.timestamp, + AccessTime: td.timestamp, + ChangeTime: td.timestamp, + } + err := td.tw.WriteHeader(&header) + if err != nil { + return err + } + td.dirs[dirJoin] = true + } + } + } + if td.files[filename] { + return fmt.Errorf("%w: %s", errTarFileExists, filename) + } + td.files[filename] = true + header := tar.Header{ + Format: tar.FormatPAX, + Typeflag: tar.TypeReg, + Name: filename, + Size: size, + Mode: td.mode | 0o400, + ModTime: td.timestamp, + AccessTime: td.timestamp, + ChangeTime: td.timestamp, + } + return td.tw.WriteHeader(&header) +} + +func (td *tarWriteData) tarWriteFileJSON(filename string, data any) error { + dataJSON, err := json.Marshal(data) + if err != nil { + return err + } + err = td.tarWriteHeader(filename, int64(len(dataJSON))) + if err != nil { + return err + } + _, err = td.tw.Write(dataJSON) + if err != nil { + return err + } + return nil +} + +func tarOCILayoutDescPath(d descriptor.Descriptor) string { + return fmt.Sprintf("blobs/%s/%s", d.Digest.Algorithm(), d.Digest.Encoded()) +} diff --git a/vendor/github.com/regclient/regclient/internal/auth/auth.go b/vendor/github.com/regclient/regclient/internal/auth/auth.go new file mode 100644 index 000000000..cbe7885e8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/auth/auth.go @@ -0,0 +1,916 @@ +// Package auth is used for HTTP authentication +package auth + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "slices" + "strings" + "sync" + "time" + + "github.com/regclient/regclient/types/errs" +) + +type charLU byte + +var charLUs [256]charLU + +var defaultClientID = "regclient" + +// minTokenLife tokens are required to last at least 60 seconds to support older docker clients +var minTokenLife = 60 + +// tokenBuffer is used to renew a token before it expires to account for time to process requests on the server +var tokenBuffer = time.Second * 5 + +const ( + isSpace charLU = 1 << iota + isToken +) + +func init() { + for c := range 256 { + charLUs[c] = 0 + if strings.ContainsRune(" \t\r\n", rune(c)) { + charLUs[c] |= isSpace + } + if (rune('a') <= rune(c) && rune(c) <= rune('z')) || (rune('A') <= rune(c) && rune(c) <= rune('Z') || (rune('0') <= rune(c) && rune(c) <= rune('9')) || strings.ContainsRune("-._~+/", rune(c))) { + charLUs[c] |= isToken + } + } +} + +// CredsFn is passed to lookup credentials for a given hostname, response is a username and password or empty strings +type CredsFn func(host string) Cred + +// Cred is returned by the CredsFn. +// If Token is provided and auth method is bearer, it will attempt to use it as a refresh token. +// Else if user and password are provided, they are attempted with all auth methods. +// Else if neither are provided and auth method is bearer, an anonymous login is attempted. +type Cred struct { + //#nosec G117 exported struct intentionally holds secrets + User, Password string // clear text username and password + Token string // refresh token only used for bearer auth +} + +// challenge is the extracted contents of the WWW-Authenticate header. +type challenge struct { + authType string + params map[string]string +} + +// handler handles a challenge for a host to return an auth header +type handler interface { + AddScope(scope string) error + ProcessChallenge(challenge) error + UpdateRequest(*http.Request) error +} + +// handlerBuild is used to make a new handler for a specific authType and URL +type handlerBuild func(client *http.Client, clientID, host string, credFn CredsFn, slog *slog.Logger) handler + +// Opts configures options for NewAuth +type Opts func(*Auth) + +// Auth is used to handle authentication requests. +type Auth struct { + httpClient *http.Client + clientID string + credsFn CredsFn + hbs map[string]handlerBuild // handler builders based on authType + hs map[string]map[string]handler // handlers based on url and authType + authTypes []string + slog *slog.Logger + mu sync.Mutex +} + +// NewAuth creates a new Auth +func NewAuth(opts ...Opts) *Auth { + a := &Auth{ + httpClient: &http.Client{}, + clientID: defaultClientID, + credsFn: DefaultCredsFn, + hbs: map[string]handlerBuild{}, + hs: map[string]map[string]handler{}, + authTypes: []string{}, + slog: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})), + } + + for _, opt := range opts { + opt(a) + } + + if len(a.authTypes) == 0 { + a.addDefaultHandlers() + } + + return a +} + +// WithCreds provides a user/pass lookup for a url +func WithCreds(f CredsFn) Opts { + return func(a *Auth) { + if f != nil { + a.credsFn = f + } + } +} + +// WithHTTPClient uses a specific http client with requests +func WithHTTPClient(h *http.Client) Opts { + return func(a *Auth) { + if h != nil { + a.httpClient = h + } + } +} + +// WithClientID uses a client ID with request headers +func WithClientID(clientID string) Opts { + return func(a *Auth) { + a.clientID = clientID + } +} + +// WithHandler includes a handler for a specific auth type +func WithHandler(authType string, hb handlerBuild) Opts { + return func(a *Auth) { + lcat := strings.ToLower(authType) + a.hbs[lcat] = hb + a.authTypes = append(a.authTypes, lcat) + } +} + +// WithDefaultHandlers includes a Basic and Bearer handler, this is automatically added with "WithHandler" is not called +func WithDefaultHandlers() Opts { + return func(a *Auth) { + a.addDefaultHandlers() + } +} + +// WithLog injects a Logger +func WithLog(slog *slog.Logger) Opts { + return func(a *Auth) { + a.slog = slog + } +} + +// AddScope extends an existing auth with additional scopes. +// This is used to pre-populate scopes with the Docker convention rather than +// depend on the registry to respond with the correct http status and headers. +func (a *Auth) AddScope(host, scope string) error { + a.mu.Lock() + defer a.mu.Unlock() + success := false + if a.hs[host] == nil { + return errs.ErrNoNewChallenge + } + for _, at := range a.authTypes { + if a.hs[host][at] != nil { + err := a.hs[host][at].AddScope(scope) + if err == nil { + success = true + } else if err != errs.ErrNoNewChallenge { + return err + } + } + } + if !success { + return errs.ErrNoNewChallenge + } + a.slog.Debug("Auth scope added", + slog.String("host", host), + slog.String("scope", scope)) + return nil +} + +// HandleResponse parses the 401 response, extracting the WWW-Authenticate +// header and verifying the requirement is different from what was included in +// the last request +func (a *Auth) HandleResponse(resp *http.Response) error { + a.mu.Lock() + defer a.mu.Unlock() + // verify response is an access denied + if resp.StatusCode != http.StatusUnauthorized { + return errs.ErrUnsupported + } + + // extract host and auth header + host := resp.Request.URL.Host + cl, err := ParseAuthHeaders(resp.Header.Values("WWW-Authenticate")) + if err != nil { + return err + } + a.slog.Debug("Auth request parsed", + slog.Any("challenge", cl)) + if len(cl) < 1 { + return errs.ErrEmptyChallenge + } + goodChallenge := false + // loop over the received challenge(s) + for _, c := range cl { + if _, ok := a.hbs[c.authType]; !ok { + a.slog.Warn("Unsupported auth type", + slog.String("authtype", c.authType)) + continue + } + // setup a handler for the host and auth type + if _, ok := a.hs[host]; !ok { + a.hs[host] = map[string]handler{} + } + if _, ok := a.hs[host][c.authType]; !ok { + h := a.hbs[c.authType](a.httpClient, a.clientID, host, a.credsFn, a.slog) + if h == nil { + continue + } + a.hs[host][c.authType] = h + } + // process the challenge with that handler + err := a.hs[host][c.authType].ProcessChallenge(c) + if err == nil { + goodChallenge = true + } else if err == errs.ErrNoNewChallenge { + // handle race condition when another request updates the challenge + // detect that by seeing the current auth header is different + prevAH := resp.Request.Header.Get("Authorization") + err := a.hs[host][c.authType].UpdateRequest(resp.Request) + if err == nil && prevAH != resp.Request.Header.Get("Authorization") { + goodChallenge = true + } + } else { + return err + } + } + if !goodChallenge { + return errs.ErrHTTPUnauthorized + } + + return nil +} + +// UpdateRequest adds Authorization headers to a request +func (a *Auth) UpdateRequest(req *http.Request) error { + a.mu.Lock() + defer a.mu.Unlock() + host := req.URL.Host + if a.hs[host] == nil { + return nil + } + var err error + for _, at := range a.authTypes { + if a.hs[host][at] != nil { + err = a.hs[host][at].UpdateRequest(req) + if err != nil { + a.slog.Debug("Failed to generate auth", + slog.String("err", err.Error()), + slog.String("host", host), + slog.String("authtype", at)) + continue + } + break + } + } + if err != nil { + return err + } + return nil +} + +func (a *Auth) addDefaultHandlers() { + if _, ok := a.hbs["basic"]; !ok { + a.hbs["basic"] = NewBasicHandler + a.authTypes = append(a.authTypes, "basic") + } + if _, ok := a.hbs["bearer"]; !ok { + a.hbs["bearer"] = NewBearerHandler + a.authTypes = append(a.authTypes, "bearer") + } +} + +// DefaultCredsFn is used to return no credentials when auth is not configured with a CredsFn +// This avoids the need to check for nil pointers +func DefaultCredsFn(h string) Cred { + return Cred{} +} + +// ParseAuthHeaders extracts the scheme and realm from WWW-Authenticate headers +func ParseAuthHeaders(ahl []string) ([]challenge, error) { + var cl []challenge + for _, ah := range ahl { + c, err := parseAuthHeader(ah) + if err != nil { + return nil, fmt.Errorf("failed to parse challenge header: %s, %w", ah, err) + } + cl = append(cl, c...) + } + return cl, nil +} + +// parseAuthHeader parses a single header line for WWW-Authenticate +// Example values: +// Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +// Basic realm="GitHub Package Registry" +func parseAuthHeader(ah string) ([]challenge, error) { + var cl []challenge + var c *challenge + curElement := []byte{} + curKey := "" + stateElement := "string" + stateSyntax := "start" + + for _, b := range []byte(ah) { + switch stateElement { + case "string": + // string: ignore leading space, enter quote only if first character, handle escapes, handle valid tokens, else end + if charLUs[b]&isToken != 0 { + // add valid tokens to element + curElement = append(curElement, b) + } else if charLUs[b]&isSpace != 0 && len(curElement) == 0 { + // ignore leading spaces + } else if b == '"' && len(curElement) == 0 { + stateElement = "quote" + } else if b == '\\' { + stateElement = "escape_string" + } else { + stateElement = "end" + } + case "quote": + // quote: handle escapes, handle closing quote (quote_end to read next character), all other tokens are valid + if b == '\\' { + stateElement = "escape_quote" + } else if b == '"' { + stateElement = "quote_end" + } else { + curElement = append(curElement, b) + } + case "quote_end": + // any character after the close quote is the end of the element + stateElement = "end" + case "escape_string": + // escape_string: handle any character and return to string state + curElement = append(curElement, b) + stateElement = "string" + case "escape_quote": + // escape_quote: handle any character and return to quote state + curElement = append(curElement, b) + stateElement = "quote" + case "end": + // finished parsing element, continue to processing element according to the current state + default: + return nil, fmt.Errorf("unhandled element case: %w", errs.ErrParsingFailed) + } + if stateElement != "end" { + // continue parsing the element until it ends + continue + } + + // syntax looks at each string within the overall challenge syntax + switch stateSyntax { + case "start": + // start: (start of auth_type) read auth_type and space (end_auth_type) or auth_type and comma (start) + if charLUs[b]&isSpace != 0 && len(curElement) > 0 { + stateSyntax = "end_auth_type" + } else if b == ',' && len(curElement) > 0 { + // state remains at start + } else { + return nil, fmt.Errorf("start element did not end with a space or comma: %w", errs.ErrParsingFailed) + } + c = &challenge{authType: strings.ToLower(string(curElement)), params: map[string]string{}} + cl = append(cl, *c) + case "start_or_param": + // start_or_param: (after param_value) read auth_type and space (end_auth_type) or param_key and equals (param_value) + if charLUs[b]&isSpace != 0 && len(curElement) > 0 { + c = &challenge{authType: strings.ToLower(string(curElement)), params: map[string]string{}} + cl = append(cl, *c) + stateSyntax = "end_auth_type" + } else if b == '=' && len(curElement) > 0 { + curKey = strings.ToLower((string(curElement))) + stateSyntax = "param_value" + } else { + return nil, fmt.Errorf("expected auth type or param: %w", errs.ErrParsingFailed) + } + case "end_auth_type": + // end_auth_type: (after reading auth_type) read param_key and equals (param_value) or just a comma (start) + if b == '=' && len(curElement) > 0 { + curKey = strings.ToLower((string(curElement))) + stateSyntax = "param_value" + } else if b == ',' && len(curElement) == 0 { + // ignore white space between end of auth_type and comma + stateSyntax = "start" + } else { + return nil, fmt.Errorf("expected param or comma: %w", errs.ErrParsingFailed) + } + case "param_value": + // param_value: (after param_key) read param_value and comma (start_or_param) + if b == ',' { + c.params[curKey] = string(curElement) + stateSyntax = "start_or_param" + curKey = "" + } else { + return nil, fmt.Errorf("expected param value: %w", errs.ErrParsingFailed) + } + default: + return nil, fmt.Errorf("unhandled syntax case: %w", errs.ErrParsingFailed) + } + // reset element state + stateElement = "string" + curElement = []byte{} + } + // at end of parsing, if the element is not empty, process according to syntax state: + if len(curElement) > 0 { + // ensure this is not within an unclosed quote or partial escape + if stateElement != "string" && stateElement != "quote_end" { + return nil, fmt.Errorf("eol element in state %s: %w", stateElement, errs.ErrParsingFailed) + } + switch stateSyntax { + case "start", "start_or_param": + // add a new auth type if a string is seen at the start, before any equals + c = &challenge{authType: strings.ToLower(string(curElement)), params: map[string]string{}} + cl = append(cl, *c) + case "param_value": + // add the last param key=val + c.params[curKey] = string(curElement) + case "end_auth_type": + // missing equals for param + return nil, fmt.Errorf("eol at param without value: %w", errs.ErrParsingFailed) + } + } + + return cl, nil +} + +// basicHandler supports Basic auth type requests +type basicHandler struct { + realm string + host string + credsFn CredsFn +} + +// NewBasicHandler creates a new BasicHandler +func NewBasicHandler(client *http.Client, clientID, host string, credsFn CredsFn, slog *slog.Logger) handler { + return &basicHandler{ + realm: "", + host: host, + credsFn: credsFn, + } +} + +// AddScope is not valid for BasicHandler +func (b *basicHandler) AddScope(scope string) error { + return errs.ErrNoNewChallenge +} + +// ProcessChallenge for BasicHandler is a noop +func (b *basicHandler) ProcessChallenge(c challenge) error { + if _, ok := c.params["realm"]; !ok { + return errs.ErrInvalidChallenge + } + if b.realm != c.params["realm"] { + b.realm = c.params["realm"] + return nil + } + return errs.ErrNoNewChallenge +} + +// UpdateRequest for BasicHandler generates base64 encoded user/pass for a host +func (b *basicHandler) UpdateRequest(req *http.Request) error { + cred := b.credsFn(b.host) + if cred.User == "" || cred.Password == "" { + return fmt.Errorf("no credentials available: %w", errs.ErrHTTPUnauthorized) + } + req.Header.Set("Authorization", fmt.Sprintf("Basic %s", + base64.StdEncoding.EncodeToString([]byte(cred.User+":"+cred.Password)))) + return nil +} + +// bearerHandler supports Bearer auth type requests +type bearerHandler struct { + client *http.Client + clientID string + realm, service string + host string + credsFn CredsFn + scopes []string + tokenURL *url.URL + token bearerToken + slog *slog.Logger +} + +// bearerToken is the json response to the Bearer request +type bearerToken struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` //#nosec G117 exported struct intentionally holds secrets + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` //#nosec G117 exported struct intentionally holds secrets + Scope string `json:"scope"` +} + +// NewBearerHandler creates a new BearerHandler +func NewBearerHandler(client *http.Client, clientID, host string, credsFn CredsFn, slog *slog.Logger) handler { + return &bearerHandler{ + client: client, + clientID: clientID, + host: host, + credsFn: credsFn, + realm: "", + service: "", + scopes: []string{}, + slog: slog, + } +} + +// AddScope appends a new scope if it doesn't already exist +func (b *bearerHandler) AddScope(scope string) error { + if b.scopeExists(scope) { + if b.token.Token == "" || !b.isExpired() { + return errs.ErrNoNewChallenge + } + return nil + } + b.addScope(scope) + return nil +} + +func (b *bearerHandler) addScope(scope string) { + if !b.tryExtendExistingScope(scope) { + b.scopes = append(b.scopes, scope) + } + // delete old token + b.token.Token = "" +} + +var knownActions = []string{"pull", "push", "delete"} + +// tryExtendExistingScope extends an existing scope if both the new scope and the current scope contain only knownActions. +// It returns true if actions are added or are already present. Otherwise, it returns false, +// indicating that the new scope should be appended to b.scopes instead. +func (b *bearerHandler) tryExtendExistingScope(scope string) bool { + repo, actions, ok := parseScope(scope) + if !ok { + return false + } + scopePrefix := "repository:" + repo + ":" + for i, cur := range b.scopes { + if !strings.HasPrefix(cur, scopePrefix) { + continue + } + _, curActions, curOk := parseScope(cur) + if !curOk { + continue + } + + for _, a := range actions { + if !slices.Contains(curActions, a) { + curActions = append(curActions, a) + } + } + b.scopes[i] = scopePrefix + strings.Join(curActions, ",") + return true + } + return false +} + +// parseScope splits a scope into the repo and slice of actions. +// Unknown actions in the scope will set bool to false. +func parseScope(scope string) (string, []string, bool) { + scopeSplit := strings.SplitN(scope, ":", 3) + if scopeSplit[0] != "repository" || len(scopeSplit) < 3 { + return "", nil, false + } + actionSplit := strings.Split(scopeSplit[2], ",") + for _, a := range actionSplit { + if !slices.Contains(knownActions, a) { + return "", nil, false + } + } + return scopeSplit[1], actionSplit, true +} + +// ProcessChallenge handles WWW-Authenticate header for bearer tokens +// Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +func (b *bearerHandler) ProcessChallenge(c challenge) error { + if _, ok := c.params["realm"]; !ok { + return errs.ErrInvalidChallenge + } + if _, ok := c.params["service"]; !ok { + c.params["service"] = "" + } + if _, ok := c.params["scope"]; !ok { + c.params["scope"] = "" + } + + existingScope := b.scopeExists(c.params["scope"]) + + if b.realm == c.params["realm"] && b.service == c.params["service"] && existingScope && (b.token.Token == "" || !b.isExpired()) { + return errs.ErrNoNewChallenge + } + + if b.realm == "" { + b.realm = c.params["realm"] + } else if b.realm != c.params["realm"] { + return errs.ErrInvalidChallenge + } + if b.service == "" { + b.service = c.params["service"] + } else if b.service != c.params["service"] { + return errs.ErrInvalidChallenge + } + if !existingScope { + b.addScope(c.params["scope"]) + } + return nil +} + +// UpdateRequest for BearerHandler adds a bearer token to the request. +func (b *bearerHandler) UpdateRequest(req *http.Request) error { + // handle relative realm values + if b.tokenURL == nil { + u, err := req.URL.Parse(b.realm) + if err != nil { + return err + } + b.tokenURL = u + } + // if unexpired token already exists, return it + if b.token.Token != "" && !b.isExpired() { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", b.token.Token)) + return nil + } + // attempt to post if a refresh token is available or token auth is being used + cred := b.credsFn(b.host) + if b.token.RefreshToken != "" || cred.Token != "" { + if err := b.tryPost(cred); err == nil { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", b.token.Token)) + return nil + } else if err != errs.ErrHTTPUnauthorized { + return fmt.Errorf("failed to request auth token (post): %w%.0w", err, errs.ErrHTTPUnauthorized) + } + } + // attempt a get (with basic auth if user/pass available) + if err := b.tryGet(cred); err == nil { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", b.token.Token)) + return nil + } else if err != errs.ErrHTTPUnauthorized { + return fmt.Errorf("failed to request auth token (get): %w%.0w", err, errs.ErrHTTPUnauthorized) + } + return errs.ErrHTTPUnauthorized +} + +// isExpired returns true when token issue date is either 0, token has expired, +// or will expire within buffer time +func (b *bearerHandler) isExpired() bool { + if b.token.IssuedAt.IsZero() { + return true + } + expireSec := b.token.IssuedAt.Add(time.Duration(b.token.ExpiresIn) * time.Second) + expireSec = expireSec.Add(tokenBuffer * -1) + return time.Now().After(expireSec) +} + +// tryGet requests a new token with a GET request +func (b *bearerHandler) tryGet(cred Cred) error { + req, err := http.NewRequest("GET", b.tokenURL.String(), nil) + if err != nil { + return err + } + + reqParams := req.URL.Query() + reqParams.Add("client_id", b.clientID) + // Note, an offline_token should not be requested by default due to broken OAuth2 implementations returning an invalid token + if b.service != "" { + reqParams.Add("service", b.service) + } + + for _, s := range b.scopes { + reqParams.Add("scope", s) + } + + if cred.User != "" && cred.Password != "" { + reqParams.Add("account", cred.User) + req.SetBasicAuth(cred.User, cred.Password) + } + + req.Header.Add("User-Agent", b.clientID) + req.URL.RawQuery = reqParams.Encode() + + //#nosec G704 inputs are user controlled or follow specification + resp, err := b.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return b.validateResponse(resp) +} + +// tryPost requests a new token via a POST request +func (b *bearerHandler) tryPost(cred Cred) error { + form := url.Values{} + if len(b.scopes) > 0 { + form.Set("scope", strings.Join(b.scopes, " ")) + } + if b.service != "" { + form.Set("service", b.service) + } + form.Set("client_id", b.clientID) + if b.token.RefreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", b.token.RefreshToken) + } else if cred.Token != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", cred.Token) + } else if cred.User != "" && cred.Password != "" { + form.Set("grant_type", "password") + form.Set("username", cred.User) + form.Set("password", cred.Password) + } + + req, err := http.NewRequest("POST", b.tokenURL.String(), strings.NewReader(form.Encode())) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + req.Header.Add("User-Agent", b.clientID) + + //#nosec G704 inputs are user controlled or follow specification + resp, err := b.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return b.validateResponse(resp) +} + +// scopeExists check if the scope already exists within the list of scopes +func (b *bearerHandler) scopeExists(search string) bool { + if search == "" { + return true + } + searchRepo, searchActions, searchOk := parseScope(search) + if !searchOk { + return slices.Contains(b.scopes, search) + } + scopePrefix := "repository:" + searchRepo + ":" + for _, scope := range b.scopes { + if scope == search { + return true + } + if !strings.HasPrefix(scope, scopePrefix) { + continue + } + _, actions, ok := parseScope(scope) + if !ok { + continue + } + + for _, sa := range searchActions { + if !slices.Contains(actions, sa) { + return false + } + } + return true + + } + return false +} + +// validateResponse extracts the returned token +func (b *bearerHandler) validateResponse(resp *http.Response) error { + if resp.StatusCode != 200 { + return errs.ErrHTTPUnauthorized + } + + // decode response and if successful, update token + decoder := json.NewDecoder(resp.Body) + decoded := bearerToken{} + if err := decoder.Decode(&decoded); err != nil { + return err + } + b.token = decoded + + if b.token.ExpiresIn < minTokenLife { + b.token.ExpiresIn = minTokenLife + } + + // If token is already expired, it was sent with a zero value or + // there may be a clock skew between the client and auth server. + // Also handle cases of remote time in the future. + // But if remote time is slightly in the past, leave as is so token + // expires here before the server. + if b.isExpired() || b.token.IssuedAt.After(time.Now()) { + b.token.IssuedAt = time.Now().UTC() + } + + // AccessToken and Token should be the same and we use Token elsewhere + if b.token.AccessToken != "" { + b.token.Token = b.token.AccessToken + } + + return nil +} + +// jwtHubHandler supports JWT auth type requests. +type jwtHubHandler struct { + client *http.Client + clientID string + realm string + host string + credsFn CredsFn + jwt string +} + +type jwtHubPost struct { + User string `json:"username"` + Pass string `json:"password"` //#nosec G117 exported struct intentionally holds secrets +} +type jwtHubResp struct { + Detail string `json:"detail"` + Token string `json:"token"` + RefreshToken string `json:"refresh_token"` //#nosec G117 exported struct intentionally holds secrets +} + +// NewJWTHubHandler creates a new JWTHandler for Docker Hub. +func NewJWTHubHandler(client *http.Client, clientID, host string, credsFn CredsFn, slog *slog.Logger) handler { + // JWT handler is only tested against Hub, and the API is Hub specific + if host == "hub.docker.com" { + return &jwtHubHandler{ + client: client, + clientID: clientID, + host: host, + credsFn: credsFn, + realm: "https://hub.docker.com/v2/users/login", + } + } + return nil +} + +// AddScope is not valid for JWTHubHandler +func (j *jwtHubHandler) AddScope(scope string) error { + return errs.ErrNoNewChallenge +} + +// ProcessChallenge handles WWW-Authenticate header for JWT auth on Docker Hub +func (j *jwtHubHandler) ProcessChallenge(c challenge) error { + cred := j.credsFn(j.host) + // use token if provided + if cred.Token != "" { + j.jwt = cred.Token + return nil + } + + // send a login request to hub + bodyBytes, err := json.Marshal(jwtHubPost{ + User: cred.User, + Pass: cred.Password, + }) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", j.realm, bytes.NewReader(bodyBytes)) + if err != nil { + return err + } + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Accept", "application/json") + req.Header.Add("User-Agent", j.clientID) + + //#nosec G704 inputs are user controlled or follow specification requirements + resp, err := j.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + + if resp.StatusCode != 200 || resp.StatusCode >= 300 { + return errs.ErrHTTPUnauthorized + } + + var bodyParsed jwtHubResp + err = json.Unmarshal(body, &bodyParsed) + if err != nil { + return err + } + j.jwt = bodyParsed.Token + + return nil +} + +// UpdateRequest for JWTHubHandler adds JWT header +func (j *jwtHubHandler) UpdateRequest(req *http.Request) error { + if len(j.jwt) > 0 { + req.Header.Set("Authorization", fmt.Sprintf("JWT %s", j.jwt)) + return nil + } + return errs.ErrHTTPUnauthorized +} diff --git a/vendor/github.com/regclient/regclient/internal/auth/error.go b/vendor/github.com/regclient/regclient/internal/auth/error.go new file mode 100644 index 000000000..7b04198a6 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/auth/error.go @@ -0,0 +1,48 @@ +package auth + +import ( + "github.com/regclient/regclient/types/errs" +) + +var ( + // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header + // + // Deprecated: replace with [errs.ErrEmptyChallenge]. + //go:fix inline + ErrEmptyChallenge = errs.ErrEmptyChallenge + // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header + // + // Deprecated: replace with [errs.ErrInvalidChallenge]. + //go:fix inline + ErrInvalidChallenge = errs.ErrInvalidChallenge + // ErrNoNewChallenge indicates a challenge update did not result in any change + // + // Deprecated: replace with [errs.ErrNoNewChallenge]. + //go:fix inline + ErrNoNewChallenge = errs.ErrNoNewChallenge + // ErrNotFound indicates no credentials found for basic auth + // + // Deprecated: replace with [errs.ErrNotFound]. + //go:fix inline + ErrNotFound = errs.ErrNotFound + // ErrNotImplemented returned when method has not been implemented yet + // + // Deprecated: replace with [errs.ErrNotImplemented]. + //go:fix inline + ErrNotImplemented = errs.ErrNotImplemented + // ErrParseFailure indicates the WWW-Authenticate header could not be parsed + // + // Deprecated: replace with [errs.ErrParseFailure]. + //go:fix inline + ErrParseFailure = errs.ErrParsingFailed + // ErrUnauthorized request was not authorized + // + // Deprecated: replace with [errs.ErrUnauthorized]. + //go:fix inline + ErrUnauthorized = errs.ErrHTTPUnauthorized + // ErrUnsupported indicates the request was unsupported + // + // Deprecated: replace with [errs.ErrUnsupported]. + //go:fix inline + ErrUnsupported = errs.ErrUnsupported +) diff --git a/vendor/github.com/regclient/regclient/internal/cache/cache.go b/vendor/github.com/regclient/regclient/internal/cache/cache.go new file mode 100644 index 000000000..a08def82d --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/cache/cache.go @@ -0,0 +1,181 @@ +//go:build go1.18 + +// Package cache is used to store values with limits. +// Items are automatically pruned when too many entries are stored, or values become stale. +package cache + +import ( + "sort" + "sync" + "time" + + "github.com/regclient/regclient/types/errs" +) + +type Cache[k comparable, v any] struct { + mu sync.Mutex + minAge time.Duration + maxAge time.Duration + minCount int + maxCount int + timer *time.Timer + entries map[k]*Entry[v] +} + +type Entry[v any] struct { + used time.Time + value v +} + +type sortKeys[k comparable] struct { + keys []k + lessFn func(a, b k) bool +} + +type conf struct { + minAge time.Duration + maxCount int +} + +type cacheOpts func(*conf) + +func WithAge(age time.Duration) cacheOpts { + return func(c *conf) { + c.minAge = age + } +} + +func WithCount(count int) cacheOpts { + return func(c *conf) { + c.maxCount = count + } +} + +func New[k comparable, v any](opts ...cacheOpts) Cache[k, v] { + c := conf{} + for _, opt := range opts { + opt(&c) + } + maxAge := c.minAge + (c.minAge / 10) + minCount := 0 + if c.maxCount > 0 { + minCount = int(float64(c.maxCount) * 0.9) + } + return Cache[k, v]{ + minAge: c.minAge, + maxAge: maxAge, + minCount: minCount, + maxCount: c.maxCount, + entries: map[k]*Entry[v]{}, + } +} + +func (c *Cache[k, v]) Delete(key k) { + if c == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + delete(c.entries, key) + if len(c.entries) == 0 && c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +func (c *Cache[k, v]) Set(key k, val v) { + if c == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + c.entries[key] = &Entry[v]{ + used: time.Now(), + value: val, + } + if len(c.entries) > c.maxCount { + c.pruneLocked() + } else if c.timer == nil { + // prune resets the timer, so this is only needed if the prune wasn't triggered + c.timer = time.AfterFunc(c.maxAge, c.prune) + } +} + +func (c *Cache[k, v]) Get(key k) (v, error) { + if c == nil { + var val v + return val, errs.ErrNotFound + } + c.mu.Lock() + defer c.mu.Unlock() + if e, ok := c.entries[key]; ok { + if e.used.Add(c.minAge).Before(time.Now()) { + // entry expired + go c.prune() + } else { + c.entries[key].used = time.Now() + return e.value, nil + } + } + var val v + return val, errs.ErrNotFound +} + +func (c *Cache[k, v]) prune() { + c.mu.Lock() + defer c.mu.Unlock() + c.pruneLocked() +} + +func (c *Cache[k, v]) pruneLocked() { + // sort key list by last used date + keyList := make([]k, 0, len(c.entries)) + for key := range c.entries { + keyList = append(keyList, key) + } + sk := sortKeys[k]{ + keys: keyList, + lessFn: func(a, b k) bool { + return c.entries[a].used.Before(c.entries[b].used) + }, + } + sort.Sort(&sk) + // prune entries + now := time.Now() + cutoff := now.Add(c.minAge * -1) + nextTime := now + delCount := len(keyList) - c.minCount + for i, key := range keyList { + if i < delCount || c.entries[key].used.Before(cutoff) { + delete(c.entries, key) + } else { + nextTime = c.entries[key].used + break + } + } + // set next timer + if len(c.entries) > 0 { + dur := nextTime.Sub(now) + c.maxAge + if c.timer == nil { + // this shouldn't be possible + c.timer = time.AfterFunc(dur, c.prune) + } else { + c.timer.Reset(dur) + } + } else if c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +func (sk *sortKeys[k]) Len() int { + return len(sk.keys) +} + +func (sk *sortKeys[k]) Less(i, j int) bool { + return sk.lessFn(sk.keys[i], sk.keys[j]) +} + +func (sk *sortKeys[k]) Swap(i, j int) { + sk.keys[i], sk.keys[j] = sk.keys[j], sk.keys[i] +} diff --git a/vendor/github.com/regclient/regclient/internal/conffile/conffile.go b/vendor/github.com/regclient/regclient/internal/conffile/conffile.go new file mode 100644 index 000000000..ba8f7aa85 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/conffile/conffile.go @@ -0,0 +1,188 @@ +// Package conffile wraps the read and write of configuration files +package conffile + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "os/user" + "path/filepath" +) + +type File struct { + perms int + fullname string +} + +type Opt func(*File) + +// New returns a new File. +// The last successful option determines the filename. +func New(opts ...Opt) *File { + f := File{perms: 0o600} + for _, fn := range opts { + fn(&f) + } + if f.fullname == "" { + return nil + } + return &f +} + +// WithAppDir determines the filename from the XDG or Windows specification. +// By default, this is based in $HOME/.config on Linux and %APPDATA% on Windows. +// If the file does not exist, this will set the filename only if "force" is true. +func WithAppDir(unixDir, winDir, name string, force bool) Opt { + var dir string + if winDir == "" { + dir = unixDir + } else { + dir = osString(unixDir, winDir) + } + return func(f *File) { + fullname := filepath.Join(appDir(), dir, name) + if force || exists(fullname) { + f.fullname = fullname + } + } +} + +// WithDirName determines the filename from a subdirectory in the user's HOME. +// +// Deprecated: Replace with [WithHomeDir] +// +//go:fix inline +func WithDirName(dir, name string) Opt { + return WithHomeDir(dir, name, true) +} + +// WithEnvFile sets the fullname to the environment value if defined. +func WithEnvFile(envVar string) Opt { + return func(f *File) { + val := os.Getenv(envVar) + if val != "" { + f.fullname = val + } + } +} + +// WithEnvDir sets the fullname to the environment value + filename if the environment variable is defined. +func WithEnvDir(envVar, name string) Opt { + return func(f *File) { + val := os.Getenv(envVar) + if val != "" { + f.fullname = filepath.Join(val, name) + } + } +} + +// WithFullname specifies the filename. +// This will always set the filename even if the file does not exist. +func WithFullname(fullname string) Opt { + return func(f *File) { + f.fullname = fullname + } +} + +// WithHomeDir determines the filename from a subdirectory in the user's HOME +// e.g. dir=".app", name="config.json", sets the fullname to "$HOME/.app/config.json". +// If the file does not exist, this will set the filename only if "force" is true. +func WithHomeDir(dir, name string, force bool) Opt { + return func(f *File) { + filename := filepath.Join(homeDir(), dir, name) + if force || exists(filename) { + f.fullname = filename + } + } +} + +// WithPerms specifies the permissions to create a file with (default 0600). +func WithPerms(perms int) Opt { + return func(f *File) { + f.perms = perms + } +} + +func (f *File) Name() string { + return f.fullname +} + +func (f *File) Open() (io.ReadCloser, error) { + return os.Open(f.fullname) +} + +func (f *File) Write(rdr io.Reader) error { + // create temp file/open + dir := filepath.Dir(f.fullname) + if err := os.MkdirAll(dir, 0o700); err != nil { + return err + } + tmp, err := os.CreateTemp(dir, filepath.Base(f.fullname)) + if err != nil { + return err + } + tmpStat, err := tmp.Stat() + if err != nil { + return err + } + tmpName := tmpStat.Name() + tmpFullname := filepath.Join(dir, tmpName) + defer os.Remove(tmpFullname) + + // copy from rdr to temp file + _, err = io.Copy(tmp, rdr) + errC := tmp.Close() + if err != nil { + return fmt.Errorf("failed to write config: %w", err) + } + if errC != nil { + return fmt.Errorf("failed to close config: %w", errC) + } + + // adjust file ownership/permissions + mode := os.FileMode(0o600) + uid := os.Getuid() + gid := os.Getgid() + // adjust defaults based on existing file if available + stat, err := os.Stat(f.fullname) + if err == nil { + // adjust mode to existing file + if stat.Mode().IsRegular() { + mode = stat.Mode() + } + uid, gid, _ = getFileOwner(stat) + } else if !errors.Is(err, fs.ErrNotExist) { + return err + } + + // update mode and owner of temp file + //#nosec G703 tempfile location is user controlled + if err := os.Chmod(tmpFullname, mode); err != nil { + return err + } + if uid > 0 && gid > 0 { + //#nosec G703 tempfile location is user controlled + _ = os.Chown(tmpFullname, uid, gid) + } + // move temp file to target filename + //#nosec G703 tempfile location is user controlled + return os.Rename(tmpFullname, f.fullname) +} + +func exists(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +func homeDir() string { + home := os.Getenv(homeEnv) + if home == "" { + u, err := user.Current() + if err == nil { + home = u.HomeDir + } + } + return home +} diff --git a/vendor/github.com/regclient/regclient/internal/conffile/conffile_unix.go b/vendor/github.com/regclient/regclient/internal/conffile/conffile_unix.go new file mode 100644 index 000000000..ffb1fd1b9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/conffile/conffile_unix.go @@ -0,0 +1,37 @@ +//go:build !windows + +package conffile + +import ( + "io/fs" + "os" + "path/filepath" + "syscall" +) + +const ( + appDirEnv = "XDG_CONFIG_HOME" + homeEnv = "HOME" +) + +func appDir() string { + appDir := os.Getenv(appDirEnv) + if appDir == "" { + home := homeDir() + appDir = filepath.Join(home, ".config") + } + return appDir +} + +func getFileOwner(stat fs.FileInfo) (int, int, error) { + var uid, gid int + if sysstat, ok := stat.Sys().(*syscall.Stat_t); ok { + uid = int(sysstat.Uid) + gid = int(sysstat.Gid) + } + return uid, gid, nil +} + +func osString(unix, _ string) string { + return unix +} diff --git a/vendor/github.com/regclient/regclient/internal/conffile/conffile_windows.go b/vendor/github.com/regclient/regclient/internal/conffile/conffile_windows.go new file mode 100644 index 000000000..5008dd477 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/conffile/conffile_windows.go @@ -0,0 +1,31 @@ +//go:build windows + +package conffile + +import ( + "io/fs" + "os" + "path/filepath" +) + +const ( + appDirEnv = "APPDATA" + homeEnv = "USERPROFILE" +) + +func appDir() string { + appDir := os.Getenv(appDirEnv) + if appDir == "" { + home := homeDir() + appDir = filepath.Join(home, "AppData") + } + return appDir +} + +func getFileOwner(_ fs.FileInfo) (int, int, error) { + return 0, 0, nil +} + +func osString(_, win string) string { + return win +} diff --git a/vendor/github.com/regclient/regclient/internal/httplink/httplink.go b/vendor/github.com/regclient/regclient/internal/httplink/httplink.go new file mode 100644 index 000000000..5ccd4f91d --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/httplink/httplink.go @@ -0,0 +1,198 @@ +// Package httplink parses the Link header from HTTP responses according to RFC5988 +package httplink + +import ( + "fmt" + "strings" + + "github.com/regclient/regclient/types/errs" +) + +type ( + Links []Link + Link struct { + URI string + Param map[string]string + } +) + +type charLU byte + +var charLUs [256]charLU + +const ( + isSpace charLU = 1 << iota + isToken + isAlphaNum +) + +func init() { + for c := range 256 { + charLUs[c] = 0 + if strings.ContainsRune(" \t\r\n", rune(c)) { + charLUs[c] |= isSpace + } + if (rune('a') <= rune(c) && rune(c) <= rune('z')) || (rune('A') <= rune(c) && rune(c) <= rune('Z') || (rune('0') <= rune(c) && rune(c) <= rune('9'))) { + charLUs[c] |= isAlphaNum | isToken + } + if strings.ContainsRune("!#$%&'()*+-./:<=>?@[]^_`{|}~", rune(c)) { + charLUs[c] |= isToken + } + } +} + +// Parse reads "Link" http headers into an array of Link structs. +// Header array should be the output of resp.Header.Values("link"). +func Parse(headers []string) (Links, error) { + links := []Link{} + for _, h := range headers { + state := "init" + var ub, pnb, pvb []byte + parms := map[string]string{} + endLink := func() { + links = append(links, Link{ + URI: string(ub), + Param: parms, + }) + // reset state + ub, pnb, pvb = []byte{}, []byte{}, []byte{} + parms = map[string]string{} + } + endParm := func() { + if _, ok := parms[string(pnb)]; !ok { + parms[string(pnb)] = string(pvb) + } + // reset parm + pnb, pvb = []byte{}, []byte{} + } + for i, b := range []byte(h) { + switch state { + case "init": + if b == '<' { + state = "uriQuoted" + } else if charLUs[b]&isToken != 0 { + state = "uri" + ub = append(ub, b) + } else if charLUs[b]&isSpace != 0 || b == ',' { + // noop + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "uri": + // parse tokens until space or comma + if charLUs[b]&isToken != 0 { + ub = append(ub, b) + } else if charLUs[b]&isSpace != 0 { + state = "fieldSep" + } else if b == ';' { + state = "parmName" + } else if b == ',' { + state = "init" + endLink() + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "uriQuoted": + // parse tokens until quote + if b == '>' { + state = "fieldSep" + } else { + ub = append(ub, b) + } + case "fieldSep": + if b == ';' { + state = "parmName" + } else if b == ',' { + state = "init" + endLink() + } else if charLUs[b]&isSpace != 0 { + // noop + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "parmName": + if len(pnb) > 0 && b == '=' { + state = "parmValue" + } else if len(pnb) > 0 && b == '*' { + state = "parmNameStar" + } else if charLUs[b]&isAlphaNum != 0 { + pnb = append(pnb, b) + } else if len(pnb) == 0 && charLUs[b]&isSpace != 0 { + // noop + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "parmNameStar": + if b == '=' { + state = "parmValue" + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "parmValue": + if len(pvb) == 0 { + if charLUs[b]&isToken != 0 { + pvb = append(pvb, b) + } else if b == '"' { + state = "parmValueQuoted" + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + } else { + if charLUs[b]&isToken != 0 { + pvb = append(pvb, b) + } else if charLUs[b]&isSpace != 0 { + state = "fieldSep" + endParm() + } else if b == ';' { + state = "parmName" + endParm() + } else if b == ',' { + state = "init" + endParm() + endLink() + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + } + case "parmValueQuoted": + if b == '"' { + state = "fieldSep" + endParm() + } else { + pvb = append(pvb, b) + } + } + } + // check for valid state at end of header + switch state { + case "parmValue": + endParm() + endLink() + case "uri", "fieldSep": + endLink() + case "init": + // noop + default: + return nil, fmt.Errorf("unexpected end state %s for header %s: %w", state, h, errs.ErrParsingFailed) + } + } + + return links, nil +} + +// Get returns a link with a specific parm value, e.g. rel="next" +func (links Links) Get(parm, val string) (Link, error) { + for _, link := range links { + if link.Param != nil && link.Param[parm] == val { + return link, nil + } + } + return Link{}, errs.ErrNotFound +} diff --git a/vendor/github.com/regclient/regclient/internal/limitread/limitread.go b/vendor/github.com/regclient/regclient/internal/limitread/limitread.go new file mode 100644 index 000000000..77115ef0c --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/limitread/limitread.go @@ -0,0 +1,29 @@ +// Package limitread provides a reader that will error if the limit is ever exceeded +package limitread + +import ( + "fmt" + "io" + + "github.com/regclient/regclient/types/errs" +) + +type LimitRead struct { + Reader io.Reader + Limit int64 +} + +func (lr *LimitRead) Read(p []byte) (int, error) { + if lr.Limit < 0 { + return 0, fmt.Errorf("read limit exceeded%.0w", errs.ErrSizeLimitExceeded) + } + if int64(len(p)) > lr.Limit+1 { + p = p[0 : lr.Limit+1] + } + n, err := lr.Reader.Read(p) + lr.Limit -= int64(n) + if lr.Limit < 0 { + return n, fmt.Errorf("read limit exceeded%.0w", errs.ErrSizeLimitExceeded) + } + return n, err +} diff --git a/vendor/github.com/regclient/regclient/internal/pqueue/pqueue.go b/vendor/github.com/regclient/regclient/internal/pqueue/pqueue.go new file mode 100644 index 000000000..2eea09601 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/pqueue/pqueue.go @@ -0,0 +1,257 @@ +// Package pqueue implements a priority queue. +package pqueue + +import ( + "context" + "fmt" + "slices" + "sync" +) + +type Queue[T any] struct { + mu sync.Mutex + max int + next func(queued, active []*T) int + active []*T + queued []*T + wait []*chan struct{} +} + +// Opts is used to configure a new priority queue. +type Opts[T any] struct { + Max int // maximum concurrent entries, defaults to 1. + Next func(queued, active []*T) int // function to lookup index of next queued entry to release, defaults to oldest entry. +} + +// New creates a new priority queue. +func New[T any](opts Opts[T]) *Queue[T] { + if opts.Max <= 0 { + opts.Max = 1 + } + return &Queue[T]{ + max: opts.Max, + next: opts.Next, + } +} + +// Acquire adds a new entry to the queue and returns once it is ready. +// The returned function must be called when the queued job completes to release the next entry. +// If there is any error, the returned function will be nil. +func (q *Queue[T]) Acquire(ctx context.Context, e T) (func(), error) { + if q == nil { + return func() {}, nil + } + found, err := q.checkContext(ctx) + if err != nil { + return nil, err + } + if found { + return func() {}, nil + } + q.mu.Lock() + if len(q.active)+len(q.queued) < q.max { + q.active = append(q.active, &e) + q.mu.Unlock() + return q.releaseFn(&e), nil + } + // limit reached, add to queue and wait + w := make(chan struct{}, 1) + q.queued = append(q.queued, &e) + q.wait = append(q.wait, &w) + q.mu.Unlock() + // wait on both context and queue + select { + case <-ctx.Done(): + // context abort, remove queued entry + q.mu.Lock() + if i := slices.Index(q.queued, &e); i >= 0 { + q.queued = slices.Delete(q.queued, i, i+1) + q.wait = slices.Delete(q.wait, i, i+1) + q.mu.Unlock() + return nil, ctx.Err() + } + q.mu.Unlock() + // queued entry found, assume race condition with context and entry being released, release next entry + q.release(&e) + return nil, ctx.Err() + case <-w: + return q.releaseFn(&e), nil + } +} + +// TryAcquire attempts to add an entry on to the list of active entries. +// If the returned function is nil, the queue was not available. +// If the returned function is not nil, it must be called when the job is complete to release the next entry. +func (q *Queue[T]) TryAcquire(ctx context.Context, e T) (func(), error) { + if q == nil { + return func() {}, nil + } + found, err := q.checkContext(ctx) + if err != nil { + return nil, err + } + if found { + return func() {}, nil + } + q.mu.Lock() + defer q.mu.Unlock() + if len(q.active)+len(q.queued) < q.max { + q.active = append(q.active, &e) + return q.releaseFn(&e), nil + } + return nil, nil +} + +// release next entry or noop. +func (q *Queue[T]) release(prev *T) { + q.mu.Lock() + defer q.mu.Unlock() + // remove prev entry from active list + if i := slices.Index(q.active, prev); i >= 0 { + q.active = slices.Delete(q.active, i, i+1) + } + // skip checks when at limit or nothing queued + if len(q.queued) == 0 { + if len(q.active) == 0 { + // free up slices if this was the last active entry + q.active = nil + q.queued = nil + q.wait = nil + } + return + } + if len(q.active) >= q.max { + return + } + i := 0 + if q.next != nil && len(q.queued) > 1 { + i = q.next(q.queued, q.active) + // validate response + i = max(min(i, len(q.queued)-1), 0) + } + // release queued entry, move to active list, and remove from queued/wait lists + close(*q.wait[i]) + q.active = append(q.active, q.queued[i]) + q.queued = slices.Delete(q.queued, i, i+1) + q.wait = slices.Delete(q.wait, i, i+1) +} + +// releaseFn is a convenience wrapper around [release]. +func (q *Queue[T]) releaseFn(prev *T) func() { + return func() { + q.release(prev) + } +} + +// TODO: is there a way to make a different context key for each generic type? +type ctxType int + +var ctxKey ctxType + +type valMulti[T any] struct { + qList []*Queue[T] +} + +// AcquireMulti is used to simultaneously lock multiple queues without the risk of deadlock. +// The returned context needs to be used on calls to [Acquire] or [TryAcquire] which will immediately succeed since the resource is already acquired. +// Attempting to acquire other resources with [Acquire], [TryAcquire], or [AcquireMulti] using the returned context and will fail for being outside of the transaction. +// The returned function must be called to release the resources. +// The returned function is not thread safe, ensure no other simultaneous calls to [Acquire] or [TryAcquire] using the returned context have finished before it is called. +func AcquireMulti[T any](ctx context.Context, e T, qList ...*Queue[T]) (context.Context, func(), error) { + // verify context not already holding locks + qCtx := ctx.Value(ctxKey) + if qCtx != nil { + if qCtxVal, ok := qCtx.(*valMulti[T]); !ok || qCtxVal.qList != nil { + return ctx, nil, fmt.Errorf("context already used by another AcquireMulti request") + } + } + // delete nil entries + for i := len(qList) - 1; i >= 0; i-- { + if qList[i] == nil { + qList = slices.Delete(qList, i, i+1) + } + } + // empty/nil list is a noop + if len(qList) == 0 { + return ctx, func() {}, nil + } + // dedup entries from the list + for i := len(qList) - 2; i >= 0; i-- { + for j := len(qList) - 1; j > i; j-- { + if qList[i] == qList[j] { + qList[j] = qList[len(qList)-1] + qList = qList[:len(qList)-1] + } + } + } + // Loop through queues to acquire, waiting on the first, and attempting the remaining. + // If any of the remaining entries cannot be immediately acquired, reset and make it the new queue to wait on. + lockI := 0 + doneList := make([]func(), len(qList)) + for { + acquired := true + i := 0 + done, err := qList[lockI].Acquire(ctx, e) + if err != nil { + return ctx, nil, err + } + doneList[lockI] = done + for i < len(qList) { + if i != lockI { + doneList[i], err = qList[i].TryAcquire(ctx, e) + if doneList[i] == nil || err != nil { + acquired = false + break + } + } + i++ + } + if err == nil && acquired { + break + } + // cleanup on failed attempt + if lockI > i { + doneList[lockI]() + } + // track blocking index for a retry + lockI = i + for i > 0 { + i-- + doneList[i]() + } + // abort on errors + if err != nil { + return ctx, nil, err + } + } + // success, update context + ctxVal := valMulti[T]{qList: qList} + newCtx := context.WithValue(ctx, ctxKey, &ctxVal) + cleanup := func() { + ctxVal.qList = nil + // dequeue in reverse order to minimize chance of another AcquireMulti being freed and immediately blocking on the next queue + for i := len(doneList) - 1; i >= 0; i-- { + doneList[i]() + } + } + return newCtx, cleanup, nil +} + +func (q *Queue[T]) checkContext(ctx context.Context) (bool, error) { + qCtx := ctx.Value(ctxKey) + if qCtx == nil { + return false, nil + } + qCtxVal, ok := qCtx.(*valMulti[T]) + if !ok { + return false, nil // another type is using the context, treat it as unset + } + if qCtxVal.qList == nil { + return false, nil + } + if slices.Contains(qCtxVal.qList, q) { + // instance already locked + return true, nil + } + return true, fmt.Errorf("cannot acquire new locks during a transaction") +} diff --git a/vendor/github.com/regclient/regclient/internal/reghttp/http.go b/vendor/github.com/regclient/regclient/internal/reghttp/http.go new file mode 100644 index 000000000..7f857881c --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/reghttp/http.go @@ -0,0 +1,975 @@ +// Package reghttp is used for HTTP requests to a registry +package reghttp + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "slices" + "sort" + "strconv" + "strings" + "sync" + "time" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/regclient/regclient/config" + "github.com/regclient/regclient/internal/auth" + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/warning" +) + +var ( + defaultDelayInit, _ = time.ParseDuration("0.1s") + defaultDelayMax, _ = time.ParseDuration("30s") + warnRegexp = regexp.MustCompile(`^299\s+-\s+"([^"]+)"`) +) + +const ( + DefaultRetryLimit = 5 // number of times a request will be retried + backoffResetCount = 5 // number of successful requests needed to reduce the backoff +) + +// Client is an HTTP client wrapper. +// It handles features like authentication, retries, backoff delays, TLS settings. +type Client struct { + httpClient *http.Client // upstream [http.Client], this is wrapped per repository for an auth handler on redirects + getConfigHost func(string) *config.Host // call-back to get the [config.Host] for a specific registry + host map[string]*clientHost // host specific settings, wrap access with a mutex lock + rootCAPool [][]byte // list of root CAs for configuring the http.Client transport + rootCADirs []string // list of directories for additional root CAs + retryLimit int // number of retries before failing a request, this applies to each host, and each request + delayInit time.Duration // how long to initially delay requests on a failure + delayMax time.Duration // maximum time to delay a request + slog *slog.Logger // logging for tracing and failures + userAgent string // user agent to specify in http request headers + mu sync.Mutex // mutex to prevent data races +} + +type clientHost struct { + config *config.Host // config entry + httpClient *http.Client // modified http client for registry specific settings + userAgent string // user agent to specify in http request headers + slog *slog.Logger // logging for tracing and failures + auth map[string]*auth.Auth // map of auth handlers by repository + backoffCur int // current count of backoffs for this host + backoffLast time.Time // time the last request was released, this may be in the future if there is a queue, or zero if no delay is needed + backoffReset int // count of successful requests when a backoff is experienced, once [backoffResetCount] is reached, [backoffCur] is reduced by one and this is reset to 0 + reqFreq time.Duration // how long between submitting requests for this host + reqNext time.Time // time to release the next request + throttle *pqueue.Queue[reqmeta.Data] // limit concurrent requests to the host + mu sync.Mutex // mutex to prevent data races +} + +// Req is a request to send to a registry. +type Req struct { + MetaKind reqmeta.Kind // kind of request for the priority queue + Host string // registry name, hostname and mirrors will be looked up from host configuration + Method string // http method to call + DirectURL *url.URL // url to query, overrides repository, path, and query + Repository string // repository to scope the request + Path string // path of the request within a repository + Query url.Values // url query parameters + BodyLen int64 // length of body to send + BodyBytes []byte // bytes of the body, overridden by BodyFunc + BodyFunc func() (io.ReadCloser, error) // function to return a new body + Headers http.Header // headers to send in the request + NoPrefix bool // do not include the repository prefix + NoMirrors bool // do not send request to a mirror + ExpectLen int64 // expected size of the returned body + TransactLen int64 // size of an overall transaction for the priority queue + IgnoreErr bool // ignore http errors and do not trigger backoffs +} + +// Resp is used to handle the result of a request. +type Resp struct { + ctx context.Context + client *Client + req *Req + resp *http.Response + mirror string + done bool + reader io.Reader + readCur, readMax int64 + retryCount int + throttleDone func() +} + +// Opts is used to configure client options. +type Opts func(*Client) + +// NewClient returns a client for handling requests. +func NewClient(opts ...Opts) *Client { + c := Client{ + httpClient: &http.Client{}, + host: map[string]*clientHost{}, + retryLimit: DefaultRetryLimit, + delayInit: defaultDelayInit, + delayMax: defaultDelayMax, + slog: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})), + rootCAPool: [][]byte{}, + rootCADirs: []string{}, + } + for _, opt := range opts { + opt(&c) + } + return &c +} + +// WithCerts adds certificates. +func WithCerts(certs [][]byte) Opts { + return func(c *Client) { + c.rootCAPool = append(c.rootCAPool, certs...) + } +} + +// WithCertDirs adds directories to check for host specific certs. +func WithCertDirs(dirs []string) Opts { + return func(c *Client) { + c.rootCADirs = append(c.rootCADirs, dirs...) + } +} + +// WithCertFiles adds certificates by filename. +func WithCertFiles(files []string) Opts { + return func(c *Client) { + for _, f := range files { + //#nosec G304 command is run by a user accessing their own files + cert, err := os.ReadFile(f) + if err != nil { + c.slog.Warn("Failed to read certificate", + slog.String("err", err.Error()), + slog.String("file", f)) + } else { + c.rootCAPool = append(c.rootCAPool, cert) + } + } + } +} + +// WithConfigHostFn adds the callback to request a [config.Host] struct. +// The function must normalize the hostname for Docker Hub support. +func WithConfigHostFn(gch func(string) *config.Host) Opts { + return func(c *Client) { + c.getConfigHost = gch + } +} + +// WithDelay initial time to wait between retries (increased with exponential backoff). +func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts { + return func(c *Client) { + if delayInit > 0 { + c.delayInit = delayInit + } + // delayMax must be at least delayInit, if 0 initialize to 30x delayInit + if delayMax > c.delayInit { + c.delayMax = delayMax + } else if delayMax > 0 { + c.delayMax = c.delayInit + } else { + c.delayMax = c.delayInit * 30 + } + } +} + +// WithHTTPClient uses a specific http client with retryable requests. +func WithHTTPClient(hc *http.Client) Opts { + return func(c *Client) { + c.httpClient = hc + } +} + +// WithRetryLimit restricts the number of retries (defaults to 5). +func WithRetryLimit(rl int) Opts { + return func(c *Client) { + if rl > 0 { + c.retryLimit = rl + } + } +} + +// WithLog injects a slog Logger configuration. +func WithLog(slog *slog.Logger) Opts { + return func(c *Client) { + c.slog = slog + } +} + +// WithTransport uses a specific http transport with retryable requests. +func WithTransport(t *http.Transport) Opts { + return func(c *Client) { + c.httpClient = &http.Client{Transport: t} + } +} + +// WithUserAgent sets a user agent header. +func WithUserAgent(ua string) Opts { + return func(c *Client) { + c.userAgent = ua + } +} + +// Do runs a request, returning the response result. +func (c *Client) Do(ctx context.Context, req *Req) (*Resp, error) { + resp := &Resp{ + ctx: ctx, + client: c, + req: req, + readCur: 0, + readMax: req.ExpectLen, + } + err := resp.next() + return resp, err +} + +// next sends requests until a mirror responds or all requests fail. +func (resp *Resp) next() error { + var err error + c := resp.client + req := resp.req + // lookup reqHost entry + reqHost := c.getHost(req.Host) + // create sorted list of mirrors, based on backoffs, upstream, and priority + hosts := make([]*clientHost, 0, 1+len(reqHost.config.Mirrors)) + if !req.NoMirrors { + for _, m := range reqHost.config.Mirrors { + hosts = append(hosts, c.getHost(m)) + } + } + hosts = append(hosts, reqHost) + sort.Slice(hosts, sortHostsCmp(hosts, reqHost.config.Name)) + // loop over requests to mirrors and retries + curHost := 0 + for { + backoff := false + dropHost := false + retryHost := false + if len(hosts) == 0 { + if err != nil { + return err + } + return errs.ErrAllRequestsFailed + } + if curHost >= len(hosts) { + curHost = 0 + } + h := hosts[curHost] + resp.mirror = h.config.Name + // there is an intentional extra retry in this check to allow for auth requests + if resp.retryCount > c.retryLimit { + return errs.ErrRetryLimitExceeded + } + resp.retryCount++ + + // check that context isn't canceled/done + ctxErr := resp.ctx.Err() + if ctxErr != nil { + return ctxErr + } + // wait for other concurrent requests to this host + throttleDone, throttleErr := h.throttle.Acquire(resp.ctx, reqmeta.Data{ + Kind: req.MetaKind, + Size: req.BodyLen + req.ExpectLen + req.TransactLen, + }) + if throttleErr != nil { + return throttleErr + } + + // try each host in a closure to handle all the backoff/dropHost from one place + loopErr := func() error { + var err error + if req.Method == "HEAD" && h.config.APIOpts != nil { + var disableHead bool + disableHead, err = strconv.ParseBool(h.config.APIOpts["disableHead"]) + if err == nil && disableHead { + dropHost = true + return fmt.Errorf("head requests disabled for host \"%s\": %w", h.config.Name, errs.ErrUnsupportedAPI) + } + } + + // build the url + var u url.URL + if req.DirectURL != nil { + u = *req.DirectURL + } else { + u = url.URL{ + Host: h.config.Hostname, + Scheme: "https", + } + path := strings.Builder{} + path.WriteString("/v2") + if h.config.PathPrefix != "" && !req.NoPrefix { + path.WriteString("/" + h.config.PathPrefix) + } + if req.Repository != "" { + path.WriteString("/" + req.Repository) + } + path.WriteString("/" + req.Path) + u.Path = path.String() + if h.config.TLS == config.TLSDisabled { + u.Scheme = "http" + } + query := url.Values{} + if req.Query != nil { + query = req.Query + } + if h.config.Hostname != reqHost.config.Hostname { + query.Set("ns", reqHost.config.Hostname) + } + u.RawQuery = query.Encode() + } + // close previous response + if resp.resp != nil && resp.resp.Body != nil { + _ = resp.resp.Body.Close() + } + // delay for backoff if needed + bu := resp.backoffGet() + if !bu.IsZero() && bu.After(time.Now()) { + sleepTime := time.Until(bu) + c.slog.Debug("Sleeping for backoff", + slog.String("Host", h.config.Name), + slog.Duration("Duration", sleepTime)) + select { + case <-resp.ctx.Done(): + return errs.ErrCanceled + case <-time.After(sleepTime): + } + } + var httpReq *http.Request + httpReq, err = http.NewRequestWithContext(resp.ctx, req.Method, u.String(), nil) + if err != nil { + dropHost = true + return err + } + if req.BodyFunc != nil { + body, err := req.BodyFunc() + if err != nil { + dropHost = true + return err + } + httpReq.Body = body + httpReq.GetBody = req.BodyFunc + httpReq.ContentLength = req.BodyLen + } else if len(req.BodyBytes) > 0 { + body := io.NopCloser(bytes.NewReader(req.BodyBytes)) + httpReq.Body = body + httpReq.GetBody = func() (io.ReadCloser, error) { return body, nil } + httpReq.ContentLength = req.BodyLen + } + if len(req.Headers) > 0 { + httpReq.Header = req.Headers.Clone() + } + if c.userAgent != "" && httpReq.Header.Get("User-Agent") == "" { + httpReq.Header.Add("User-Agent", c.userAgent) + } + if resp.readCur > 0 && resp.readMax > 0 { + if req.Headers.Get("Range") == "" { + httpReq.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", resp.readCur, resp.readMax)) + } else { + // TODO: support Seek within a range request + dropHost = true + return fmt.Errorf("unable to resume a connection within a range request") + } + } + + hAuth := h.getAuth(req.Repository) + if hAuth != nil { + // include docker generated scope to emulate docker clients + if req.Repository != "" { + scope := "repository:" + req.Repository + ":pull" + if req.Method != "HEAD" && req.Method != "GET" { + scope = scope + ",push" + } + _ = hAuth.AddScope(h.config.Hostname, scope) + } + // add auth headers + err = hAuth.UpdateRequest(httpReq) + if err != nil { + if errors.Is(err, errs.ErrHTTPUnauthorized) { + dropHost = true + } else { + backoff = true + } + return err + } + } + + // delay for the rate limit + if h.reqFreq > 0 { + sleep := time.Duration(0) + h.mu.Lock() + if time.Now().Before(h.reqNext) { + sleep = time.Until(h.reqNext) + h.reqNext = h.reqNext.Add(h.reqFreq) + } else { + h.reqNext = time.Now().Add(h.reqFreq) + } + h.mu.Unlock() + if sleep > 0 { + time.Sleep(sleep) + } + } + + // send request + hc := h.getHTTPClient(req.Repository) + //#nosec G704 inputs are user controlled and sanitized + resp.resp, err = hc.Do(httpReq) + if err != nil { + c.slog.Debug("Request failed", + slog.String("URL", u.String()), + slog.String("err", err.Error())) + backoff = true + return err + } + + statusCode := resp.resp.StatusCode + if statusCode < 200 || statusCode >= 300 { + switch statusCode { + case http.StatusUnauthorized: + // if auth can be done, retry same host without delay, otherwise drop/backoff + if hAuth != nil { + err = hAuth.HandleResponse(resp.resp) + } else { + err = fmt.Errorf("authentication handler unavailable") + } + if err != nil { + if errors.Is(err, errs.ErrEmptyChallenge) || errors.Is(err, errs.ErrNoNewChallenge) || errors.Is(err, errs.ErrHTTPUnauthorized) { + c.slog.Debug("Failed to handle auth request", + slog.String("URL", u.String()), + slog.String("Err", err.Error())) + } else { + c.slog.Warn("Failed to handle auth request", + slog.String("URL", u.String()), + slog.String("Err", err.Error())) + } + dropHost = true + } else { + err = fmt.Errorf("authentication required") + retryHost = true + } + return err + case http.StatusNotFound: + // if not found, drop mirror for this req, but other requests don't need backoff + dropHost = true + case http.StatusRequestedRangeNotSatisfiable: + // if range request error (blob push), drop mirror for this req, but other requests don't need backoff + dropHost = true + case http.StatusTooManyRequests, http.StatusRequestTimeout, http.StatusGatewayTimeout, http.StatusBadGateway, http.StatusInternalServerError: + // server is likely overloaded, backoff but still retry + backoff = true + default: + // all other errors indicate a bigger issue, don't retry and set backoff + backoff = true + dropHost = true + } + errHTTP := HTTPError(resp.resp.StatusCode) + errBody, _ := io.ReadAll(resp.resp.Body) + _ = resp.resp.Body.Close() + return fmt.Errorf("request failed: %w: %s", errHTTP, errBody) + } + + resp.reader = resp.resp.Body + resp.done = false + // set variables from headers if found + clHeader := resp.resp.Header.Get("Content-Length") + if resp.readCur == 0 && clHeader != "" { + cl, parseErr := strconv.ParseInt(clHeader, 10, 64) + if parseErr != nil { + c.slog.Debug("failed to parse content-length header", + slog.String("err", parseErr.Error()), + slog.String("header", clHeader)) + } else if resp.readMax > 0 { + if resp.readMax != cl { + return fmt.Errorf("unexpected content-length, expected %d, received %d", resp.readMax, cl) + } + } else { + resp.readMax = cl + } + } + // verify Content-Range header when range request used, fail if missing + if httpReq.Header.Get("Range") != "" && resp.resp.Header.Get("Content-Range") == "" { + dropHost = true + _ = resp.resp.Body.Close() + return fmt.Errorf("range request not supported by server") + } + return nil + }() + // return on success + if loopErr == nil { + resp.throttleDone = throttleDone + return nil + } + // backoff, dropHost, and/or go to next host in the list + if backoff { + if req.IgnoreErr { + // don't set a backoff, immediately drop the host when errors ignored + dropHost = true + } else { + boErr := resp.backoffSet() + if boErr != nil { + // reached backoff limit + dropHost = true + } + } + } + throttleDone() + // when error does not allow retries, abort with the last known err value + if err != nil && errors.Is(loopErr, errs.ErrNotRetryable) { + return err + } + err = loopErr + if dropHost { + hosts = slices.Delete(hosts, curHost, curHost+1) + } else if !retryHost { + curHost++ + } + } +} + +// GetThrottle returns the current [pqueue.Queue] for a host used to throttle connections. +// This can be used to acquire multiple throttles before performing a request across multiple hosts. +func (c *Client) GetThrottle(host string) *pqueue.Queue[reqmeta.Data] { + ch := c.getHost(host) + return ch.throttle +} + +// HTTPResponse returns the [http.Response] from the last request. +func (resp *Resp) HTTPResponse() *http.Response { + return resp.resp +} + +// Read provides a retryable read from the body of the response. +func (resp *Resp) Read(b []byte) (int, error) { + if resp.done { + return 0, io.EOF + } + if resp.resp == nil { + return 0, errs.ErrNotFound + } + // perform the read + i, err := resp.reader.Read(b) + resp.readCur += int64(i) + if err == io.EOF || err == io.ErrUnexpectedEOF { + if resp.resp.Request.Method == "HEAD" || resp.readCur >= resp.readMax { + resp.backoffReset() + resp.done = true + } else { + // short read, retry? + resp.client.slog.Debug("EOF before reading all content, retrying", + slog.Int64("curRead", resp.readCur), + slog.Int64("contentLen", resp.readMax)) + // retry + respErr := resp.backoffSet() + if respErr == nil { + respErr = resp.next() + } + // unrecoverable EOF + if respErr != nil { + resp.client.slog.Warn("Failed to recover from short read", + slog.String("err", respErr.Error())) + resp.done = true + return i, err + } + // retry successful, no EOF + return i, nil + } + } + + if err == nil { + return i, nil + } + return i, err +} + +// Close frees up resources from the request. +func (resp *Resp) Close() error { + if resp.throttleDone != nil { + resp.throttleDone() + resp.throttleDone = nil + } + if resp.resp == nil { + return errs.ErrNotFound + } + if !resp.done { + resp.backoffReset() + } + resp.done = true + return resp.resp.Body.Close() +} + +// Seek provides a limited ability seek within the request response. +func (resp *Resp) Seek(offset int64, whence int) (int64, error) { + newOffset := resp.readCur + switch whence { + case io.SeekStart: + newOffset = offset + case io.SeekCurrent: + newOffset += offset + case io.SeekEnd: + if resp.readMax <= 0 { + return resp.readCur, fmt.Errorf("seek from end is not supported") + } else if resp.readMax+offset < 0 { + return resp.readCur, fmt.Errorf("seek past beginning of the file is not supported") + } + newOffset = resp.readMax + offset + default: + return resp.readCur, fmt.Errorf("unknown value of whence: %d", whence) + } + if newOffset != resp.readCur { + resp.readCur = newOffset + // rerun the request to restart + resp.retryCount-- // do not count a seek as a retry + err := resp.next() + if err != nil { + return resp.readCur, err + } + } + return resp.readCur, nil +} + +func (resp *Resp) backoffGet() time.Time { + c := resp.client + ch := c.getHost(resp.mirror) + ch.mu.Lock() + defer ch.mu.Unlock() + if ch.backoffCur > 0 { + delay := c.delayInit << ch.backoffCur + delay = min(delay, c.delayMax) + next := ch.backoffLast.Add(delay) + now := time.Now() + if now.After(next) { + next = now + } + ch.backoffLast = next + return next + } + // reset a stale "retry-after" time + if !ch.backoffLast.IsZero() && ch.backoffLast.Before(time.Now()) { + ch.backoffLast = time.Time{} + } + return ch.backoffLast +} + +func (resp *Resp) backoffSet() error { + c := resp.client + ch := c.getHost(resp.mirror) + ch.mu.Lock() + defer ch.mu.Unlock() + // check rate limit header and use that directly if possible + if resp.resp != nil && resp.resp.Header.Get("Retry-After") != "" { + ras := resp.resp.Header.Get("Retry-After") + ra, _ := time.ParseDuration(ras + "s") + if ra > 0 { + next := time.Now().Add(ra) + if ch.backoffLast.Before(next) { + ch.backoffLast = next + } + return nil + } + } + // Else track the number of backoffs and fail when the limit is exceeded. + // New requests always get at least one try, but fail fast if the server has been throwing errors. + ch.backoffCur++ + if ch.backoffLast.IsZero() { + ch.backoffLast = time.Now() + } + if ch.backoffCur >= c.retryLimit { + return fmt.Errorf("%w: backoffs %d", errs.ErrBackoffLimit, ch.backoffCur) + } + + return nil +} + +func (resp *Resp) backoffReset() { + c := resp.client + ch := c.getHost(resp.mirror) + ch.mu.Lock() + defer ch.mu.Unlock() + if ch.backoffCur > 0 { + ch.backoffReset++ + // If enough successful requests are seen, lower the backoffCur count. + // This requires multiple successful requests of a flaky server, but quickly drops when above the retry limit. + if ch.backoffReset > backoffResetCount || ch.backoffCur > c.retryLimit { + ch.backoffReset = 0 + ch.backoffCur-- + if ch.backoffCur == 0 { + // reset the last time to the zero value + ch.backoffLast = time.Time{} + } + } + } +} + +// getHost looks up or creates a clientHost for a given registry. +func (c *Client) getHost(host string) *clientHost { + c.mu.Lock() + defer c.mu.Unlock() + if h, ok := c.host[host]; ok { + return h + } + var conf *config.Host + if c.getConfigHost != nil { + conf = c.getConfigHost(host) + } else { + conf = config.HostNewName(host) + } + if conf.Name != host { + if h, ok := c.host[conf.Name]; ok { + return h + } + } + h := &clientHost{ + config: conf, + userAgent: c.userAgent, + slog: c.slog, + auth: map[string]*auth.Auth{}, + } + if h.config.ReqPerSec > 0 { + h.reqFreq = time.Duration(float64(time.Second) / h.config.ReqPerSec) + } + if h.config.ReqConcurrent > 0 { + h.throttle = pqueue.New(pqueue.Opts[reqmeta.Data]{Max: int(h.config.ReqConcurrent), Next: reqmeta.DataNext}) + } + // copy the http client and configure registry specific settings + hc := *c.httpClient + h.httpClient = &hc + if h.httpClient.Transport == nil { + h.httpClient.Transport = http.DefaultTransport.(*http.Transport).Clone() + } + // configure transport for insecure requests and root certs + if h.config.TLS == config.TLSInsecure || len(c.rootCAPool) > 0 || len(c.rootCADirs) > 0 || h.config.RegCert != "" || (h.config.ClientCert != "" && h.config.ClientKey != "") { + t, ok := h.httpClient.Transport.(*http.Transport) + if ok { + var tlsc *tls.Config + if t.TLSClientConfig != nil { + tlsc = t.TLSClientConfig.Clone() + } else { + //#nosec G402 the default TLS 1.2 minimum version is allowed to support older registries + tlsc = &tls.Config{} + } + if h.config.TLS == config.TLSInsecure { + tlsc.InsecureSkipVerify = true + } else { + rootPool, err := makeRootPool(c.rootCAPool, c.rootCADirs, h.config.Hostname, h.config.RegCert) + if err != nil { + c.slog.Warn("failed to setup CA pool", + slog.String("err", err.Error())) + } else { + tlsc.RootCAs = rootPool + } + } + if h.config.ClientCert != "" && h.config.ClientKey != "" { + cert, err := tls.X509KeyPair([]byte(h.config.ClientCert), []byte(h.config.ClientKey)) + if err != nil { + c.slog.Warn("failed to configure client certs", + slog.String("err", err.Error())) + } else { + tlsc.Certificates = []tls.Certificate{cert} + } + } + t.TLSClientConfig = tlsc + h.httpClient.Transport = t + } + } + // wrap the transport for logging and to handle warning headers + h.httpClient.Transport = &wrapTransport{c: c, orig: h.httpClient.Transport} + + c.host[conf.Name] = h + if conf.Name != host { + // save another reference for faster lookups + c.host[host] = h + } + return h +} + +// getHTTPClient returns a client specific to the repo being queried. +// Repository specific authentication needs a dedicated CheckRedirect handler. +func (ch *clientHost) getHTTPClient(repo string) *http.Client { + hc := *ch.httpClient + hc.CheckRedirect = ch.checkRedirect(repo, hc.CheckRedirect) + return &hc +} + +// checkRedirect wraps http.CheckRedirect to inject auth headers to specific hosts in the redirect chain +func (ch *clientHost) checkRedirect(repo string, orig func(req *http.Request, via []*http.Request) error) func(req *http.Request, via []*http.Request) error { + return func(req *http.Request, via []*http.Request) error { + // fail on too many redirects + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + // add auth headers if appropriate for the target host + hAuth := ch.getAuth(repo) + err := hAuth.UpdateRequest(req) + if err != nil { + return err + } + // wrap original redirect check + if orig != nil { + return orig(req, via) + } + return nil + } +} + +// getAuth returns an auth, which may be repository specific. +func (ch *clientHost) getAuth(repo string) *auth.Auth { + ch.mu.Lock() + defer ch.mu.Unlock() + if !ch.config.RepoAuth { + repo = "" // without RepoAuth, unset the provided repo + } + if _, ok := ch.auth[repo]; !ok { + ch.auth[repo] = auth.NewAuth( + auth.WithLog(ch.slog), + auth.WithHTTPClient(ch.httpClient), + auth.WithCreds(ch.AuthCreds()), + auth.WithClientID(ch.userAgent), + ) + } + return ch.auth[repo] +} + +func (ch *clientHost) AuthCreds() func(h string) auth.Cred { + if ch == nil || ch.config == nil { + return auth.DefaultCredsFn + } + return func(h string) auth.Cred { + hCred := ch.config.GetCred() + return auth.Cred{User: hCred.User, Password: hCred.Password, Token: hCred.Token} + } +} + +type wrapTransport struct { + c *Client + orig http.RoundTripper +} + +func (wt *wrapTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := wt.orig.RoundTrip(req) + // copy headers to censor auth field + reqHead := req.Header.Clone() + if reqHead.Get("Authorization") != "" { + reqHead.Set("Authorization", "[censored]") + } + if err != nil { + wt.c.slog.Debug("reg http request", + slog.String("req-method", req.Method), + slog.String("req-url", req.URL.String()), + slog.Any("req-headers", reqHead), + slog.String("err", err.Error())) + } else { + // extract any warnings + for _, wh := range resp.Header.Values("Warning") { + if match := warnRegexp.FindStringSubmatch(wh); len(match) == 2 { + // TODO(bmitch): pass other fields (registry hostname) with structured logging + warning.Handle(req.Context(), wt.c.slog, match[1]) + } + } + wt.c.slog.Log(req.Context(), types.LevelTrace, "reg http request", + slog.String("req-method", req.Method), + slog.String("req-url", req.URL.String()), + slog.Any("req-headers", reqHead), + slog.String("resp-status", resp.Status), + slog.Any("resp-headers", resp.Header)) + } + return resp, err +} + +// HTTPError returns an error based on the status code. +func HTTPError(statusCode int) error { + switch statusCode { + case 401: + return fmt.Errorf("%w [http %d]", errs.ErrHTTPUnauthorized, statusCode) + case 403: + return fmt.Errorf("%w [http %d]", errs.ErrHTTPUnauthorized, statusCode) + case 404: + return fmt.Errorf("%w [http %d]", errs.ErrNotFound, statusCode) + case 429: + return fmt.Errorf("%w [http %d]", errs.ErrHTTPRateLimit, statusCode) + default: + return fmt.Errorf("%w: %s [http %d]", errs.ErrHTTPStatus, http.StatusText(statusCode), statusCode) + } +} + +func makeRootPool(rootCAPool [][]byte, rootCADirs []string, hostname string, hostcert string) (*x509.CertPool, error) { + pool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + for _, ca := range rootCAPool { + if ok := pool.AppendCertsFromPEM(ca); !ok { + return nil, fmt.Errorf("failed to load ca: %s", ca) + } + } + for _, dir := range rootCADirs { + hostDir := filepath.Join(dir, hostname) + files, err := os.ReadDir(hostDir) + if err != nil { + if !os.IsNotExist(err) { + return nil, fmt.Errorf("failed to read directory %s: %w", hostDir, err) + } + continue + } + for _, f := range files { + if f.IsDir() { + continue + } + if strings.HasSuffix(f.Name(), ".crt") { + f := filepath.Join(hostDir, f.Name()) + //#nosec G304 file from a known directory and extension read by the user running the command on their own host + cert, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", f, err) + } + if ok := pool.AppendCertsFromPEM(cert); !ok { + return nil, fmt.Errorf("failed to import cert from %s", f) + } + } + } + } + if hostcert != "" { + if ok := pool.AppendCertsFromPEM([]byte(hostcert)); !ok { + // try to parse the certificate and generate a useful error + block, _ := pem.Decode([]byte(hostcert)) + if block == nil { + err = fmt.Errorf("pem.Decode is nil") + } else { + _, err = x509.ParseCertificate(block.Bytes) + } + return nil, fmt.Errorf("failed to load host specific ca (registry: %s): %w: %s", hostname, err, hostcert) + } + } + return pool, nil +} + +// sortHostCmp to sort host list of mirrors. +func sortHostsCmp(hosts []*clientHost, upstream string) func(i, j int) bool { + now := time.Now() + // sort by backoff first, then priority decending, then upstream name last + return func(i, j int) bool { + if now.Before(hosts[i].backoffLast) || now.Before(hosts[j].backoffLast) { + return hosts[i].backoffLast.Before(hosts[j].backoffLast) + } + if hosts[i].config.Priority != hosts[j].config.Priority { + return hosts[i].config.Priority < hosts[j].config.Priority + } + return hosts[i].config.Name != upstream + } +} diff --git a/vendor/github.com/regclient/regclient/internal/reqmeta/data.go b/vendor/github.com/regclient/regclient/internal/reqmeta/data.go new file mode 100644 index 000000000..2a7b94a90 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/reqmeta/data.go @@ -0,0 +1,88 @@ +// Package reqmeta provides metadata on requests for prioritizing with a pqueue. +package reqmeta + +type Data struct { + Kind Kind + Size int64 +} + +type Kind int + +const ( + Unknown Kind = iota + Head + Manifest + Query + Blob +) + +const ( + smallLimit = 4194304 // 4MiB + largePct = 0.9 // anything above 90% of largest queued entry size is large +) + +func DataNext(queued, active []*Data) int { + if len(queued) == 0 { + return -1 + } + // After removing one small entry, split remaining requests 50/50 between large and old (truncated int division always rounds down). + // If len active = 2, this function returns the 3rd entry (+1), minus 1 for the small, divide by 2 to split with old = goal of 1. + largeGoal := len(active) / 2 + largeI := 0 + var largeSize int64 + if largeGoal > 0 { + // find the largest queued blob requests + for i, cur := range queued { + if cur.Kind == Blob && cur.Size > largeSize { + largeI = i + largeSize = cur.Size + } + } + } + largeCutoff := int64(float64(largeSize) * 0.9) + // count active requests by type + small := 0 + large := 0 + old := 0 + for _, cur := range active { + if cur.Kind != Blob && cur.Size <= smallLimit { + small++ + } else if cur.Kind == Blob && largeSize > 0 && cur.Size >= largeCutoff { + large++ + } else { + old++ + } + } + // if there is at least one active, and none are small, return the best small entry if available. + if len(active) > 0 && small == 0 { + var sizeI int64 + bestI := -1 + kindI := Unknown + for i, cur := range queued { + // the small search skips blobs and large requests + if cur.Kind == Blob || cur.Size > smallLimit { + continue + } + // the best small entry is the: + // - first one found if no other matches + // - one with a better Kind (Head > Manifest > Query) + // - one with the same kind but smaller request + if bestI < 0 || + (cur.Kind != Unknown && (kindI == Unknown || cur.Kind < kindI)) || + (cur.Kind == kindI && cur.Size > 0 && (cur.Size < sizeI || sizeI <= 0)) { + bestI = i + kindI = cur.Kind + sizeI = cur.Size + } + } + if bestI >= 0 { + return bestI + } + } + // Prefer the biggest of these blobs to minimize the size of the last running blob. + if largeGoal > 0 && large < largeGoal && largeSize > 0 { + return largeI + } + // enough small and large, or none available, so return the oldest queued entry to avoid starvation. + return 0 +} diff --git a/vendor/github.com/regclient/regclient/internal/sloghandle/logrus.go b/vendor/github.com/regclient/regclient/internal/sloghandle/logrus.go new file mode 100644 index 000000000..ac549d18a --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/sloghandle/logrus.go @@ -0,0 +1,125 @@ +//go:build !wasm + +// Package sloghandle provides a transition handler for migrating from logrus to slog. +package sloghandle + +import ( + "context" + "log/slog" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/types" +) + +func Logrus(logger *logrus.Logger) *logrusHandler { + return &logrusHandler{ + logger: logger, + } +} + +type logrusHandler struct { + logger *logrus.Logger + attrs []slog.Attr + groups []string +} + +func (h *logrusHandler) Enabled(_ context.Context, level slog.Level) bool { + ll := h.logger.GetLevel() + if curLevel, ok := logrusToSlog[ll]; ok { + return level >= curLevel + } + return true +} + +func (h *logrusHandler) Handle(ctx context.Context, r slog.Record) error { + log := logrus.NewEntry(h.logger).WithContext(ctx) + if !r.Time.IsZero() { + log = log.WithTime(r.Time) + } + fields := logrus.Fields{} + for _, a := range h.attrs { + if a.Key != "" { + fields[a.Key] = a.Value + } + } + r.Attrs(func(a slog.Attr) bool { + if a.Key != "" { + fields[a.Key] = a.Value + } + return true + }) + if len(fields) > 0 { + log = log.WithFields(fields) + } + log.Log(slogToLogrus(r.Level), r.Message) + return nil +} + +func (h *logrusHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + ret := h.clone() + prefix := "" + if len(h.groups) > 0 { + prefix = strings.Join(h.groups, ":") + ":" + } + for _, a := range attrs { + if a.Key == "" { + continue + } + ret.attrs = append(ret.attrs, slog.Attr{ + Key: prefix + a.Key, + Value: a.Value, + }) + } + return ret +} + +func (h *logrusHandler) WithGroup(name string) slog.Handler { + if name == "" { + return h + } + ret := h.clone() + ret.groups = append(ret.groups, name) + return ret +} + +func (h *logrusHandler) clone() *logrusHandler { + attrs := make([]slog.Attr, len(h.attrs)) + copy(attrs, h.attrs) + groups := make([]string, len(h.groups)) + copy(groups, h.groups) + return &logrusHandler{ + logger: h.logger, + attrs: attrs, + groups: groups, + } +} + +var logrusToSlog = map[logrus.Level]slog.Level{ + logrus.TraceLevel: types.LevelTrace, + logrus.DebugLevel: slog.LevelDebug, + logrus.InfoLevel: slog.LevelInfo, + logrus.WarnLevel: slog.LevelWarn, + logrus.ErrorLevel: slog.LevelError, + logrus.FatalLevel: slog.LevelError + 4, + logrus.PanicLevel: slog.LevelError + 8, +} + +func slogToLogrus(level slog.Level) logrus.Level { + if level <= types.LevelTrace { + return logrus.TraceLevel + } else if level <= slog.LevelDebug { + return logrus.DebugLevel + } else if level <= slog.LevelInfo { + return logrus.InfoLevel + } else if level <= slog.LevelWarn { + return logrus.WarnLevel + } else if level <= slog.LevelError { + return logrus.ErrorLevel + } else if level <= slog.LevelError+4 { + return logrus.FatalLevel + } else { + return logrus.PanicLevel + } +} diff --git a/vendor/github.com/regclient/regclient/internal/strparse/strparse.go b/vendor/github.com/regclient/regclient/internal/strparse/strparse.go new file mode 100644 index 000000000..f67fdafe9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/strparse/strparse.go @@ -0,0 +1,91 @@ +// Package strparse is used to parse strings +package strparse + +import ( + "fmt" + + "github.com/regclient/regclient/types/errs" +) + +// SplitCSKV splits a comma separated key=value list into a map +func SplitCSKV(s string) (map[string]string, error) { + state := "key" + key := "" + val := "" + result := map[string]string{} + procKV := func() { + if key != "" { + result[key] = val + } + state = "key" + key = "" + val = "" + } + for _, c := range s { + switch state { + case "key": + switch c { + case '"': + state = "keyQuote" + case '\\': + state = "keyEscape" + case '=': + state = "val" + case ',': + procKV() + default: + key = key + string(c) + } + case "keyQuote": + switch c { + case '"': + state = "key" + case '\\': + state = "keyEscapeQuote" + default: + key = key + string(c) + } + case "keyEscape": + key = key + string(c) + state = "key" + case "keyEscapeQuote": + key = key + string(c) + state = "keyQuote" + case "val": + switch c { + case '"': + state = "valQuote" + case ',': + procKV() + case '\\': + state = "valEscape" + default: + val = val + string(c) + } + case "valQuote": + switch c { + case '"': + state = "val" + case '\\': + state = "valEscapeQuote" + default: + val = val + string(c) + } + case "valEscape": + val = val + string(c) + state = "val" + case "valEscapeQuote": + val = val + string(c) + state = "valQuote" + default: + return nil, fmt.Errorf("unhandled state: %s", state) + } + } + switch state { + case "val", "key": + procKV() + default: + return nil, fmt.Errorf("string parsing failed, end state: %s%.0w", state, errs.ErrParsingFailed) + } + return result, nil +} diff --git a/vendor/github.com/regclient/regclient/internal/timejson/timejson.go b/vendor/github.com/regclient/regclient/internal/timejson/timejson.go new file mode 100644 index 000000000..c5aad642d --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/timejson/timejson.go @@ -0,0 +1,41 @@ +// Package timejson extends time methods with marshal/unmarshal for json +package timejson + +import ( + "encoding/json" + "errors" + "time" +) + +var errInvalid = errors.New("invalid duration") + +// Duration is an alias to time.Duration +// Implementation taken from https://stackoverflow.com/questions/48050945/how-to-unmarshal-json-into-durations +type Duration time.Duration + +// MarshalJSON converts a duration to json +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Duration(d).String()) +} + +// UnmarshalJSON converts json to a duration +func (d *Duration) UnmarshalJSON(b []byte) error { + var v any + if err := json.Unmarshal(b, &v); err != nil { + return err + } + switch value := v.(type) { + case float64: + *d = Duration(time.Duration(value)) + return nil + case string: + timeDur, err := time.ParseDuration(value) + if err != nil { + return err + } + *d = Duration(timeDur) + return nil + default: + return errInvalid + } +} diff --git a/vendor/github.com/regclient/regclient/internal/units/size.go b/vendor/github.com/regclient/regclient/internal/units/size.go new file mode 100644 index 000000000..b1893c9f6 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/units/size.go @@ -0,0 +1,59 @@ +// Package units is taken from https://github.com/docker/go-units +package units + +// Copyright 2015 Docker, Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// https://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" +) + +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) + +func getSizeAndUnit(size float64, base float64, unitList []string) (float64, string) { + i := 0 + unitsLimit := len(unitList) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, unitList[i] +} + +// CustomSize returns a human-readable approximation of a size using custom format. +func CustomSize(format string, size float64, base float64, unitList []string) string { + size, unit := getSizeAndUnit(size, base, unitList) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision. +func HumanSizeWithPrecision(size float64, width, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%*.*f%s", width, precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// with a width of 5 (eg. "2.746MB", "796.0KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 5, 3) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44.2kiB", "17.6MiB"). +func BytesSize(size float64) string { + return CustomSize("%5.3f%s", size, 1024.0, binaryAbbrs) +} diff --git a/vendor/github.com/regclient/regclient/internal/version/version.go b/vendor/github.com/regclient/regclient/internal/version/version.go new file mode 100644 index 000000000..5e5715e58 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/version/version.go @@ -0,0 +1,32 @@ +// Package version returns details on the Go and Git repo used in the build +package version + +import ( + "bytes" + "fmt" + "text/tabwriter" +) + +const ( + stateClean = "clean" + stateDirty = "dirty" + unknown = "unknown" + biVCSDate = "vcs.time" + biVCSCommit = "vcs.revision" + biVCSModified = "vcs.modified" +) + +func (i Info) MarshalPretty() ([]byte, error) { + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + fmt.Fprintf(tw, "VCSTag:\t%s\n", i.VCSTag) + fmt.Fprintf(tw, "VCSRef:\t%s\n", i.VCSRef) + fmt.Fprintf(tw, "VCSCommit:\t%s\n", i.VCSCommit) + fmt.Fprintf(tw, "VCSState:\t%s\n", i.VCSState) + fmt.Fprintf(tw, "VCSDate:\t%s\n", i.VCSDate) + fmt.Fprintf(tw, "Platform:\t%s\n", i.Platform) + fmt.Fprintf(tw, "GoVer:\t%s\n", i.GoVer) + fmt.Fprintf(tw, "GoCompiler:\t%s\n", i.GoCompiler) + err := tw.Flush() + return buf.Bytes(), err +} diff --git a/vendor/github.com/regclient/regclient/internal/version/version_buildinfo.go b/vendor/github.com/regclient/regclient/internal/version/version_buildinfo.go new file mode 100644 index 000000000..cd7ce60c2 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/version/version_buildinfo.go @@ -0,0 +1,74 @@ +//go:build go1.18 + +package version + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +var vcsTag = "" + +type Info struct { + GoVer string `json:"goVersion"` // go version + GoCompiler string `json:"goCompiler"` // go compiler + Platform string `json:"platform"` // os/arch + VCSCommit string `json:"vcsCommit"` // commit sha + VCSDate string `json:"vcsDate"` // commit date in RFC3339 format + VCSRef string `json:"vcsRef"` // commit sha + dirty if state is not clean + VCSState string `json:"vcsState"` // clean or dirty + VCSTag string `json:"vcsTag"` // tag is not available from Go + Debug *debug.BuildInfo `json:"debug,omitempty"` // build info debugging data +} + +func GetInfo() Info { + i := Info{ + GoVer: unknown, + Platform: unknown, + VCSCommit: unknown, + VCSDate: unknown, + VCSRef: unknown, + VCSState: unknown, + VCSTag: vcsTag, + } + + i.GoVer = runtime.Version() + i.GoCompiler = runtime.Compiler + i.Platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) + + if bi, ok := debug.ReadBuildInfo(); ok && bi != nil { + i.Debug = bi + if i.VCSTag == "" { + i.VCSTag = bi.Main.Version + } + date := biSetting(bi, biVCSDate) + if t, err := time.Parse(time.RFC3339, date); err == nil { + i.VCSDate = t.UTC().Format(time.RFC3339) + } + i.VCSCommit = biSetting(bi, biVCSCommit) + i.VCSRef = i.VCSCommit + modified := biSetting(bi, biVCSModified) + if modified == "true" { + i.VCSState = stateDirty + i.VCSRef += "-" + stateDirty + } else if modified == "false" { + i.VCSState = stateClean + } + } + + return i +} + +func biSetting(bi *debug.BuildInfo, key string) string { + if bi == nil { + return unknown + } + for _, setting := range bi.Settings { + if setting.Key == key { + return setting.Value + } + } + return unknown +} diff --git a/vendor/github.com/regclient/regclient/internal/version/version_old.go b/vendor/github.com/regclient/regclient/internal/version/version_old.go new file mode 100644 index 000000000..beb1c42ab --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/version/version_old.go @@ -0,0 +1,37 @@ +//go:build !go1.18 + +package version + +import ( + "fmt" + "runtime" +) + +type Info struct { + GoVer string `json:"goVersion"` // go version + GoCompiler string `json:"goCompiler"` // go compiler + Platform string `json:"platform"` // os/arch + VCSCommit string `json:"vcsCommit"` // commit sha + VCSDate string `json:"vcsDate"` // commit date in RFC3339 format + VCSRef string `json:"vcsRef"` // commit sha + dirty if state is not clean + VCSState string `json:"vcsState"` // clean or dirty + VCSTag string `json:"vcsTag"` // tag +} + +func GetInfo() Info { + i := Info{ + GoVer: unknown, + Platform: unknown, + VCSCommit: unknown, + VCSDate: unknown, + VCSRef: unknown, + VCSState: unknown, + VCSTag: "", + } + + i.GoVer = runtime.Version() + i.GoCompiler = runtime.Compiler + i.Platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) + + return i +} diff --git a/vendor/github.com/regclient/regclient/manifest.go b/vendor/github.com/regclient/regclient/manifest.go new file mode 100644 index 000000000..ef871ebf1 --- /dev/null +++ b/vendor/github.com/regclient/regclient/manifest.go @@ -0,0 +1,206 @@ +package regclient + +import ( + "context" + "fmt" + "log/slog" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +type manifestOpt struct { + d descriptor.Descriptor + platform *platform.Platform + schemeOpts []scheme.ManifestOpts + requireDigest bool +} + +// ManifestOpts define options for the Manifest* commands. +type ManifestOpts func(*manifestOpt) + +// WithManifest passes a manifest to ManifestDelete. +func WithManifest(m manifest.Manifest) ManifestOpts { + return func(opts *manifestOpt) { + opts.schemeOpts = append(opts.schemeOpts, scheme.WithManifest(m)) + } +} + +// WithManifestCheckReferrers checks for referrers field on ManifestDelete. +// This will update the client managed referrer listing. +func WithManifestCheckReferrers() ManifestOpts { + return func(opts *manifestOpt) { + opts.schemeOpts = append(opts.schemeOpts, scheme.WithManifestCheckReferrers()) + } +} + +// WithManifestChild for ManifestPut indicates the manifest is not the top level manifest being copied. +// This is used by the ocidir scheme to determine what entries to include in the index.json. +func WithManifestChild() ManifestOpts { + return func(opts *manifestOpt) { + opts.schemeOpts = append(opts.schemeOpts, scheme.WithManifestChild()) + } +} + +// WithManifestDesc includes the descriptor for ManifestGet. +// This is used to automatically extract a Data field if available. +func WithManifestDesc(d descriptor.Descriptor) ManifestOpts { + return func(opts *manifestOpt) { + opts.d = d + } +} + +// WithManifestPlatform resolves the platform specific manifest on Get and Head requests. +// This causes an additional GET query to a registry when an Index or Manifest List is encountered. +// This option is ignored if the retrieved manifest is not an Index or Manifest List. +func WithManifestPlatform(p platform.Platform) ManifestOpts { + return func(opts *manifestOpt) { + opts.platform = &p + } +} + +// WithManifestRequireDigest falls back from a HEAD to a GET request when digest headers aren't received. +func WithManifestRequireDigest() ManifestOpts { + return func(opts *manifestOpt) { + opts.requireDigest = true + } +} + +// ManifestDelete removes a manifest, including all tags pointing to that registry. +// The reference must include the digest to delete (see TagDelete for deleting a tag). +// All tags pointing to the manifest will be deleted. +func (rc *RegClient) ManifestDelete(ctx context.Context, r ref.Ref, opts ...ManifestOpts) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} + for _, fn := range opts { + fn(&opt) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + return schemeAPI.ManifestDelete(ctx, r, opt.schemeOpts...) +} + +// ManifestGet retrieves a manifest. +func (rc *RegClient) ManifestGet(ctx context.Context, r ref.Ref, opts ...ManifestOpts) (manifest.Manifest, error) { + if !r.IsSet() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} + for _, fn := range opts { + fn(&opt) + } + if opt.d.Digest != "" { + r = r.AddDigest(opt.d.Digest.String()) + data, err := opt.d.GetData() + if err == nil { + return manifest.New( + manifest.WithDesc(opt.d), + manifest.WithRaw(data), + manifest.WithRef(r), + ) + } + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + m, err := schemeAPI.ManifestGet(ctx, r) + if err != nil { + return m, err + } + if opt.platform != nil && !m.IsList() { + rc.slog.Debug("ignoring platform option, image is not an index", + slog.String("platform", opt.platform.String()), + slog.String("ref", r.CommonName())) + } + // this will loop to handle a nested index + for opt.platform != nil && m.IsList() { + d, err := manifest.GetPlatformDesc(m, opt.platform) + if err != nil { + return m, err + } + r = r.SetDigest(d.Digest.String()) + m, err = schemeAPI.ManifestGet(ctx, r) + if err != nil { + return m, err + } + } + return m, err +} + +// ManifestHead queries for the existence of a manifest and returns metadata (digest, media-type, size). +func (rc *RegClient) ManifestHead(ctx context.Context, r ref.Ref, opts ...ManifestOpts) (manifest.Manifest, error) { + if !r.IsSet() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} + for _, fn := range opts { + fn(&opt) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + m, err := schemeAPI.ManifestHead(ctx, r) + if err != nil { + return m, err + } + if opt.platform != nil && !m.IsList() { + rc.slog.Debug("ignoring platform option, image is not an index", + slog.String("platform", opt.platform.String()), + slog.String("ref", r.CommonName())) + } + // this will loop to handle a nested index + for opt.platform != nil && m.IsList() { + if !m.IsSet() { + m, err = schemeAPI.ManifestGet(ctx, r) + } + d, err := manifest.GetPlatformDesc(m, opt.platform) + if err != nil { + return m, err + } + r = r.SetDigest(d.Digest.String()) + m, err = schemeAPI.ManifestHead(ctx, r) + if err != nil { + return m, err + } + } + if opt.requireDigest && m.GetDescriptor().Digest.String() == "" { + m, err = schemeAPI.ManifestGet(ctx, r) + } + return m, err +} + +// ManifestPut pushes a manifest. +// Any descriptors referenced by the manifest typically need to be pushed first. +func (rc *RegClient) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...ManifestOpts) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} + for _, fn := range opts { + fn(&opt) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + return schemeAPI.ManifestPut(ctx, r, m, opt.schemeOpts...) +} diff --git a/vendor/github.com/regclient/regclient/ping.go b/vendor/github.com/regclient/regclient/ping.go new file mode 100644 index 000000000..4298f7ac3 --- /dev/null +++ b/vendor/github.com/regclient/regclient/ping.go @@ -0,0 +1,18 @@ +package regclient + +import ( + "context" + + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping verifies access to a registry or equivalent. +func (rc *RegClient) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return ping.Result{}, err + } + + return schemeAPI.Ping(ctx, r) +} diff --git a/vendor/github.com/regclient/regclient/pkg/archive/archive.go b/vendor/github.com/regclient/regclient/pkg/archive/archive.go new file mode 100644 index 000000000..d183afd4e --- /dev/null +++ b/vendor/github.com/regclient/regclient/pkg/archive/archive.go @@ -0,0 +1,2 @@ +// Package archive is used to read and write tar files +package archive diff --git a/vendor/github.com/regclient/regclient/pkg/archive/compress.go b/vendor/github.com/regclient/regclient/pkg/archive/compress.go new file mode 100644 index 000000000..84199943b --- /dev/null +++ b/vendor/github.com/regclient/regclient/pkg/archive/compress.go @@ -0,0 +1,160 @@ +package archive + +import ( + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/zstd" + "github.com/ulikunitz/xz" +) + +// CompressType identifies the detected compression type +type CompressType int + +const ( + CompressNone CompressType = iota // uncompressed or unable to detect compression + CompressBzip2 // bzip2 + CompressGzip // gzip + CompressXz // xz + CompressZstd // zstd +) + +// compressHeaders are used to detect the compression type +var compressHeaders = map[CompressType][]byte{ + CompressBzip2: []byte("\x42\x5A\x68"), + CompressGzip: []byte("\x1F\x8B\x08"), + CompressXz: []byte("\xFD\x37\x7A\x58\x5A\x00"), + CompressZstd: []byte("\x28\xB5\x2F\xFD"), +} + +func Compress(r io.Reader, oComp CompressType) (io.ReadCloser, error) { + switch oComp { + // note, bzip2 compression is not supported + case CompressGzip: + return writeToRead(r, newGzipWriter) + case CompressXz: + return writeToRead(r, xz.NewWriter) + case CompressZstd: + return writeToRead(r, newZstdWriter) + case CompressNone: + return io.NopCloser(r), nil + default: + return nil, ErrUnknownType + } +} + +// newGzipWriter generates a writer and an always nil error. +func newGzipWriter(w io.Writer) (io.WriteCloser, error) { + return gzip.NewWriter(w), nil +} + +// newZstdWriter generates a writer with the default options. +func newZstdWriter(w io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(w) +} + +// writeToRead uses a pipe + goroutine + copy to switch from a writer to a reader. +func writeToRead[wc io.WriteCloser](src io.Reader, newWriterFn func(io.Writer) (wc, error)) (io.ReadCloser, error) { + pr, pw := io.Pipe() + go func() { + // buffer output to avoid lots of small reads + bw := bufio.NewWriterSize(pw, 2<<16) + dest, err := newWriterFn(bw) + if err != nil { + _ = pw.CloseWithError(err) + return + } + if _, err := io.Copy(dest, src); err != nil { + _ = pw.CloseWithError(err) + } + if err := dest.Close(); err != nil { + _ = pw.CloseWithError(err) + } + if err := bw.Flush(); err != nil { + _ = pw.CloseWithError(err) + } + _ = pw.Close() + }() + return pr, nil +} + +// Decompress extracts gzip and bzip streams +func Decompress(r io.Reader) (io.Reader, error) { + // create bufio to peak on first few bytes + br := bufio.NewReader(r) + head, err := br.Peek(10) + if err != nil && !errors.Is(err, io.EOF) { + return br, fmt.Errorf("failed to detect compression: %w", err) + } + + // compare peaked data against known compression types + switch DetectCompression(head) { + case CompressBzip2: + return bzip2.NewReader(br), nil + case CompressGzip: + return gzip.NewReader(br) + case CompressXz: + return xz.NewReader(br) + case CompressZstd: + return zstd.NewReader(br) + default: + return br, nil + } +} + +// DetectCompression identifies the compression type based on the first few bytes +func DetectCompression(head []byte) CompressType { + for c, b := range compressHeaders { + if bytes.HasPrefix(head, b) { + return c + } + } + return CompressNone +} + +func (ct CompressType) String() string { + mt, err := ct.MarshalText() + if err != nil { + return "unknown" + } + return string(mt) +} + +func (ct CompressType) MarshalText() ([]byte, error) { + switch ct { + case CompressNone: + return []byte("none"), nil + case CompressBzip2: + return []byte("bzip2"), nil + case CompressGzip: + return []byte("gzip"), nil + case CompressXz: + return []byte("xz"), nil + case CompressZstd: + return []byte("zstd"), nil + } + return nil, fmt.Errorf("unknown compression type") +} + +func (ct *CompressType) UnmarshalText(text []byte) error { + switch string(text) { + case "none": + *ct = CompressNone + case "bzip2": + *ct = CompressBzip2 + case "gzip": + *ct = CompressGzip + case "xz": + *ct = CompressXz + case "zstd": + *ct = CompressZstd + default: + return fmt.Errorf("unknown compression type %s", string(text)) + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/pkg/archive/errors.go b/vendor/github.com/regclient/regclient/pkg/archive/errors.go new file mode 100644 index 000000000..b31989005 --- /dev/null +++ b/vendor/github.com/regclient/regclient/pkg/archive/errors.go @@ -0,0 +1,13 @@ +package archive + +import "errors" + +var ( + // ErrNotImplemented used for routines that need to be developed still + ErrNotImplemented = errors.New("this archive routine is not implemented yet") + // ErrUnknownType used for unknown compression types + ErrUnknownType = errors.New("unknown compression type") + // ErrXzUnsupported because there isn't a Go package for this and I'm + // avoiding dependencies on external binaries + ErrXzUnsupported = errors.New("xz compression is currently unsupported") +) diff --git a/vendor/github.com/regclient/regclient/pkg/archive/tar.go b/vendor/github.com/regclient/regclient/pkg/archive/tar.go new file mode 100644 index 000000000..b6a18ce3b --- /dev/null +++ b/vendor/github.com/regclient/regclient/pkg/archive/tar.go @@ -0,0 +1,170 @@ +package archive + +import ( + "archive/tar" + "compress/gzip" + "context" + "fmt" + "io" + "io/fs" + "math" + "os" + "path/filepath" + "time" +) + +// TarOpts configures options for Create/Extract tar +type TarOpts func(*tarOpts) + +// TODO: add support for compressed files with bzip +type tarOpts struct { + // allowRelative bool // allow relative paths outside of target folder + compress string +} + +// TarCompressGzip option to use gzip compression on tar files +func TarCompressGzip(to *tarOpts) { + to.compress = "gzip" +} + +// TarUncompressed option to tar (noop) +func TarUncompressed(to *tarOpts) { +} + +// TODO: add option for full path or to adjust the relative path + +// Tar creation +func Tar(ctx context.Context, path string, w io.Writer, opts ...TarOpts) error { + to := tarOpts{} + for _, opt := range opts { + opt(&to) + } + + twOut := w + if to.compress == "gzip" { + gw := gzip.NewWriter(w) + defer gw.Close() + twOut = gw + } + + tw := tar.NewWriter(twOut) + defer tw.Close() + + // walk the path performing a recursive tar + err := filepath.Walk(path, func(file string, fi os.FileInfo, err error) error { + // return any errors filepath encounters accessing the file + if err != nil { + return err + } + + // TODO: handle symlinks, security attributes, hard links + // TODO: add options for file owner and timestamps + // TODO: add options to override time, or disable access/change stamps + + // adjust for relative path + relPath, err := filepath.Rel(path, file) + if err != nil || relPath == "." { + return nil + } + + header, err := tar.FileInfoHeader(fi, relPath) + if err != nil { + return err + } + + header.Format = tar.FormatPAX + header.Name = filepath.ToSlash(relPath) + header.AccessTime = time.Time{} + header.ChangeTime = time.Time{} + header.ModTime = header.ModTime.Truncate(time.Second) + + if err = tw.WriteHeader(header); err != nil { + return err + } + + // open file and copy contents into tar writer + if header.Typeflag == tar.TypeReg && header.Size > 0 { + //#nosec G304 filename is limited to provided path directory + f, err := os.Open(file) + if err != nil { + return err + } + if _, err = io.Copy(tw, f); err != nil { + return err + } + err = f.Close() + if err != nil { + return fmt.Errorf("failed to close file: %w", err) + } + } + return nil + }) + return err +} + +// Extract Tar +func Extract(ctx context.Context, path string, r io.Reader, opts ...TarOpts) error { + to := tarOpts{} + for _, opt := range opts { + opt(&to) + } + + // verify path exists + fi, err := os.Stat(path) + if err != nil { + return err + } + if !fi.IsDir() { + return fmt.Errorf("extract path must be a directory: \"%s\"", path) + } + + // decompress + rd, err := Decompress(r) + if err != nil { + return err + } + + rt := tar.NewReader(rd) + for { + hdr, err := rt.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + // join a cleaned version of the filename with the path + fn := filepath.Join(path, filepath.Clean("/"+hdr.Name)) + switch hdr.Typeflag { + case tar.TypeDir: + if hdr.Mode < 0 || hdr.Mode > math.MaxUint32 { + return fmt.Errorf("integer conversion overflow/underflow (file mode = %d)", hdr.Mode) + } + err = os.MkdirAll(fn, fs.FileMode(hdr.Mode)) + if err != nil { + return err + } + case tar.TypeReg: + // TODO: configure file mode, creation timestamp, etc + //#nosec G304 filename is limited to provided path directory + fh, err := os.Create(fn) + if err != nil { + return err + } + n, err := io.CopyN(fh, rt, hdr.Size) + errC := fh.Close() + if err != nil { + return err + } + if errC != nil { + return fmt.Errorf("failed to close file: %w", errC) + } + if n != hdr.Size { + return fmt.Errorf("size mismatch extracting \"%s\", expected %d, extracted %d", hdr.Name, hdr.Size, n) + } + // TODO: handle other tar types (symlinks, etc) + } + } + + return nil +} diff --git a/vendor/github.com/regclient/regclient/referrer.go b/vendor/github.com/regclient/regclient/referrer.go new file mode 100644 index 000000000..2c168124b --- /dev/null +++ b/vendor/github.com/regclient/regclient/referrer.go @@ -0,0 +1,57 @@ +package regclient + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" + "github.com/regclient/regclient/types/warning" +) + +// ReferrerList retrieves a list of referrers to a manifest. +// The descriptor list should contain manifests that each have a subject field matching the requested ref. +func (rc *RegClient) ReferrerList(ctx context.Context, rSubject ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + if !rSubject.IsSet() { + return referrer.ReferrerList{}, fmt.Errorf("ref is not set: %s%.0w", rSubject.CommonName(), errs.ErrInvalidReference) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // set the digest on the subject reference + config := scheme.ReferrerConfig{} + for _, opt := range opts { + opt(&config) + } + if rSubject.Digest == "" || config.Platform != "" { + mo := []ManifestOpts{WithManifestRequireDigest()} + if config.Platform != "" { + p, err := platform.Parse(config.Platform) + if err != nil { + return referrer.ReferrerList{}, fmt.Errorf("failed to lookup referrer platform: %w", err) + } + mo = append(mo, WithManifestPlatform(p)) + } + m, err := rc.ManifestHead(ctx, rSubject, mo...) + if err != nil { + return referrer.ReferrerList{}, fmt.Errorf("failed to get digest for subject: %w", err) + } + rSubject = rSubject.SetDigest(m.GetDescriptor().Digest.String()) + } + // lookup the scheme for the appropriate ref + var r ref.Ref + if config.SrcRepo.IsSet() { + r = config.SrcRepo + } else { + r = rSubject + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return referrer.ReferrerList{}, err + } + return schemeAPI.ReferrerList(ctx, rSubject, opts...) +} diff --git a/vendor/github.com/regclient/regclient/regclient.go b/vendor/github.com/regclient/regclient/regclient.go new file mode 100644 index 000000000..60808605f --- /dev/null +++ b/vendor/github.com/regclient/regclient/regclient.go @@ -0,0 +1,274 @@ +// Package regclient is used to access OCI registries. +package regclient + +import ( + "fmt" + "io" + "log/slog" + "time" + + "github.com/regclient/regclient/config" + "github.com/regclient/regclient/internal/version" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/scheme/ocidir" + "github.com/regclient/regclient/scheme/reg" +) + +const ( + // DefaultUserAgent sets the header on http requests. + DefaultUserAgent = "regclient/regclient" + // DockerCertDir default location for docker certs. + DockerCertDir = "/etc/docker/certs.d" + // DockerRegistry is the well known name of Docker Hub, "docker.io". + DockerRegistry = config.DockerRegistry + // DockerRegistryAuth is the name of Docker Hub seen in docker's config.json. + DockerRegistryAuth = config.DockerRegistryAuth + // DockerRegistryDNS is the actual registry DNS name for Docker Hub. + DockerRegistryDNS = config.DockerRegistryDNS +) + +// RegClient is used to access OCI distribution-spec registries. +type RegClient struct { + hosts map[string]*config.Host + hostDefault *config.Host + regOpts []reg.Opts + schemes map[string]scheme.API + slog *slog.Logger + userAgent string +} + +// Opt functions are used by [New] to create a [*RegClient]. +type Opt func(*RegClient) + +// New returns a registry client. +func New(opts ...Opt) *RegClient { + rc := RegClient{ + hosts: map[string]*config.Host{}, + userAgent: DefaultUserAgent, + regOpts: []reg.Opts{}, + schemes: map[string]scheme.API{}, + slog: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})), + } + + info := version.GetInfo() + if info.VCSTag != "" { + rc.userAgent = fmt.Sprintf("%s (%s)", rc.userAgent, info.VCSTag) + } else { + rc.userAgent = fmt.Sprintf("%s (%s)", rc.userAgent, info.VCSRef) + } + + // inject Docker Hub settings + _ = rc.hostSet(*config.HostNewName(config.DockerRegistryAuth)) + + for _, opt := range opts { + opt(&rc) + } + + // configure regOpts + hostList := []*config.Host{} + for _, h := range rc.hosts { + hostList = append(hostList, h) + } + rc.regOpts = append(rc.regOpts, + reg.WithConfigHosts(hostList), + reg.WithConfigHostDefault(rc.hostDefault), + reg.WithSlog(rc.slog), + reg.WithUserAgent(rc.userAgent), + ) + + // setup scheme's + rc.schemes["reg"] = reg.New(rc.regOpts...) + rc.schemes["ocidir"] = ocidir.New( + ocidir.WithSlog(rc.slog), + ) + + rc.slog.Debug("regclient initialized", + slog.String("VCSRef", info.VCSRef), + slog.String("VCSTag", info.VCSTag)) + + return &rc +} + +// WithBlobLimit sets the max size for chunked blob uploads which get stored in memory. +// +// Deprecated: replace with WithRegOpts(reg.WithBlobLimit(limit)), see [WithRegOpts] and [reg.WithBlobLimit]. +// +//go:fix inline +func WithBlobLimit(limit int64) Opt { + return WithRegOpts(reg.WithBlobLimit(limit)) +} + +// WithBlobSize overrides default blob sizes. +// +// Deprecated: replace with WithRegOpts(reg.WithBlobSize(chunk, max)), see [WithRegOpts] and [reg.WithBlobSize]. +// +//go:fix inline +func WithBlobSize(chunk, max int64) Opt { + return WithRegOpts(reg.WithBlobSize(chunk, max)) +} + +// WithCertDir adds a path of certificates to trust similar to Docker's /etc/docker/certs.d. +// +// Deprecated: replace with WithRegOpts(reg.WithCertDirs(path)), see [WithRegOpts] and [reg.WithCertDirs]. +// +//go:fix inline +func WithCertDir(path ...string) Opt { + return WithRegOpts(reg.WithCertDirs(path)) +} + +// WithConfigHost adds a list of config host settings. +func WithConfigHost(configHost ...config.Host) Opt { + return func(rc *RegClient) { + rc.hostLoad("host", configHost) + } +} + +// WithConfigHostDefault adds default settings for new hosts. +func WithConfigHostDefault(configHost config.Host) Opt { + return func(rc *RegClient) { + rc.hostDefault = &configHost + } +} + +// WithConfigHosts adds a list of config host settings. +// +// Deprecated: replace with [WithConfigHost]. +// +//go:fix inline +func WithConfigHosts(configHosts []config.Host) Opt { + return WithConfigHost(configHosts...) +} + +// WithDockerCerts adds certificates trusted by docker in /etc/docker/certs.d. +func WithDockerCerts() Opt { + return WithRegOpts(reg.WithCertDirs([]string{DockerCertDir})) +} + +// WithDockerCreds adds configuration from users docker config with registry logins. +// This changes the default value from the config file, and should be added after the config file is loaded. +func WithDockerCreds() Opt { + return func(rc *RegClient) { + configHosts, err := config.DockerLoad() + if err != nil { + rc.slog.Warn("Failed to load docker creds", + slog.String("err", err.Error())) + return + } + rc.hostLoad("docker", configHosts) + } +} + +// WithDockerCredsFile adds configuration from a named docker config file with registry logins. +// This changes the default value from the config file, and should be added after the config file is loaded. +func WithDockerCredsFile(fname string) Opt { + return func(rc *RegClient) { + configHosts, err := config.DockerLoadFile(fname) + if err != nil { + rc.slog.Warn("Failed to load docker creds", + slog.String("err", err.Error())) + return + } + rc.hostLoad("docker-file", configHosts) + } +} + +// WithRegOpts passes through opts to the reg scheme. +func WithRegOpts(opts ...reg.Opts) Opt { + return func(rc *RegClient) { + if len(opts) == 0 { + return + } + rc.regOpts = append(rc.regOpts, opts...) + } +} + +// WithRetryDelay specifies the time permitted for retry delays. +// +// Deprecated: replace with WithRegOpts(reg.WithDelay(delayInit, delayMax)), see [WithRegOpts] and [reg.WithDelay]. +// +//go:fix inline +func WithRetryDelay(delayInit, delayMax time.Duration) Opt { + return WithRegOpts(reg.WithDelay(delayInit, delayMax)) +} + +// WithRetryLimit specifies the number of retries for non-fatal errors. +// +// Deprecated: replace with WithRegOpts(reg.WithRetryLimit(retryLimit)), see [WithRegOpts] and [reg.WithRetryLimit]. +// +//go:fix inline +func WithRetryLimit(retryLimit int) Opt { + return WithRegOpts(reg.WithRetryLimit(retryLimit)) +} + +// WithSlog configures the slog Logger. +func WithSlog(slog *slog.Logger) Opt { + return func(rc *RegClient) { + rc.slog = slog + } +} + +// WithUserAgent specifies the User-Agent http header. +func WithUserAgent(ua string) Opt { + return func(rc *RegClient) { + rc.userAgent = ua + } +} + +func (rc *RegClient) hostLoad(src string, hosts []config.Host) { + for _, configHost := range hosts { + if configHost.Name == "" { + if configHost.Pass != "" { + configHost.Pass = "***" + } + if configHost.Token != "" { + configHost.Token = "***" + } + rc.slog.Warn("Ignoring registry config without a name", + slog.Any("entry", configHost)) + continue + } + if configHost.Name == DockerRegistry || configHost.Name == DockerRegistryDNS || configHost.Name == DockerRegistryAuth { + configHost.Name = DockerRegistry + if configHost.Hostname == "" || configHost.Hostname == DockerRegistry || configHost.Hostname == DockerRegistryAuth { + configHost.Hostname = DockerRegistryDNS + } + } + tls, _ := configHost.TLS.MarshalText() + rc.slog.Debug("Loading config", + slog.Int64("blobChunk", configHost.BlobChunk), + slog.Int64("blobMax", configHost.BlobMax), + slog.String("helper", configHost.CredHelper), + slog.String("hostname", configHost.Hostname), + slog.Any("mirrors", configHost.Mirrors), + slog.String("name", configHost.Name), + slog.String("pathPrefix", configHost.PathPrefix), + slog.Bool("repoAuth", configHost.RepoAuth), + slog.String("source", src), + slog.String("tls", string(tls)), + slog.String("user", configHost.User)) + err := rc.hostSet(configHost) + if err != nil { + rc.slog.Warn("Failed to update host config", + slog.String("host", configHost.Name), + slog.String("user", configHost.User), + slog.String("error", err.Error())) + } + } +} + +func (rc *RegClient) hostSet(newHost config.Host) error { + name := newHost.Name + var err error + if _, ok := rc.hosts[name]; !ok { + // merge newHost with default host settings + rc.hosts[name] = config.HostNewDefName(rc.hostDefault, name) + err = rc.hosts[name].Merge(newHost, nil) + } else { + // merge newHost with existing settings + err = rc.hosts[name].Merge(newHost, rc.slog) + } + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/regclient_nowasm.go b/vendor/github.com/regclient/regclient/regclient_nowasm.go new file mode 100644 index 000000000..bde8e8e89 --- /dev/null +++ b/vendor/github.com/regclient/regclient/regclient_nowasm.go @@ -0,0 +1,19 @@ +//go:build !wasm + +package regclient + +import ( + "log/slog" + + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/sloghandle" +) + +// WithLog configuring logging with a logrus Logger. +// Note that regclient has switched to log/slog for logging and my eventually deprecate logrus support. +func WithLog(log *logrus.Logger) Opt { + return func(rc *RegClient) { + rc.slog = slog.New(sloghandle.Logrus(log)) + } +} diff --git a/vendor/github.com/regclient/regclient/release.md b/vendor/github.com/regclient/regclient/release.md new file mode 100644 index 000000000..104aad395 --- /dev/null +++ b/vendor/github.com/regclient/regclient/release.md @@ -0,0 +1,36 @@ +# Release v0.11.2 + +Features: + +- Add support for regctl config in XDG and APPDATA. ([PR 1038][pr-1038]) +- Add `ImageWithBlobReaderHook` for callbacks per layer when copying an image. ([PR 1046][pr-1046]) + +Fixes: + +- Do not sign released images multiple times. ([PR 1027][pr-1027]) +- regctl/action update for path fix. ([PR 1031][pr-1031]) +- Remove default values from regctl config. ([PR 1039][pr-1039]) +- Apply Go modernizations with `go fix` from 1.26.0. ([PR 1053][pr-1053]) +- Adjust test repo names to avoid races. ([PR 1054][pr-1054]) +- Automatically upgrade goimports and gorelease. ([PR 1056][pr-1056]) + +Other Changes: + +- Add `REGCTL_CONFIG` to `regctl` help messages. ([PR 1037][pr-1037]) +- Go upgrade fixes CVE-2025-68121, govulncheck indicates this project is not vulnerable. ([PR 1047][pr-1047]) + +Contributors: + +- @sudo-bmitch +- @vrajashkr + +[pr-1027]: https://github.com/regclient/regclient/pull/1027 +[pr-1031]: https://github.com/regclient/regclient/pull/1031 +[pr-1037]: https://github.com/regclient/regclient/pull/1037 +[pr-1038]: https://github.com/regclient/regclient/pull/1038 +[pr-1039]: https://github.com/regclient/regclient/pull/1039 +[pr-1047]: https://github.com/regclient/regclient/pull/1047 +[pr-1046]: https://github.com/regclient/regclient/pull/1046 +[pr-1053]: https://github.com/regclient/regclient/pull/1053 +[pr-1054]: https://github.com/regclient/regclient/pull/1054 +[pr-1056]: https://github.com/regclient/regclient/pull/1056 diff --git a/vendor/github.com/regclient/regclient/repo.go b/vendor/github.com/regclient/regclient/repo.go new file mode 100644 index 000000000..c2a2d59f3 --- /dev/null +++ b/vendor/github.com/regclient/regclient/repo.go @@ -0,0 +1,33 @@ +package regclient + +import ( + "context" + "fmt" + "strings" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/repo" +) + +type repoLister interface { + RepoList(ctx context.Context, hostname string, opts ...scheme.RepoOpts) (*repo.RepoList, error) +} + +// RepoList returns a list of repositories on a registry. +// Note the underlying "_catalog" API is not supported on many cloud registries. +func (rc *RegClient) RepoList(ctx context.Context, hostname string, opts ...scheme.RepoOpts) (*repo.RepoList, error) { + i := strings.Index(hostname, "/") + if i > 0 { + return nil, fmt.Errorf("invalid hostname: %s%.0w", hostname, errs.ErrParsingFailed) + } + schemeAPI, err := rc.schemeGet("reg") + if err != nil { + return nil, err + } + rl, ok := schemeAPI.(repoLister) + if !ok { + return nil, errs.ErrNotImplemented + } + return rl.RepoList(ctx, hostname, opts...) +} diff --git a/vendor/github.com/regclient/regclient/scheme.go b/vendor/github.com/regclient/regclient/scheme.go new file mode 100644 index 000000000..a18a7af9d --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme.go @@ -0,0 +1,33 @@ +package regclient + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" +) + +func (rc *RegClient) schemeGet(scheme string) (scheme.API, error) { + s, ok := rc.schemes[scheme] + if !ok { + return nil, fmt.Errorf("%w: unknown scheme \"%s\"", errs.ErrNotImplemented, scheme) + } + return s, nil +} + +// Close is used to free resources associated with a reference. +// With ocidir, this may trigger a garbage collection process. +func (rc *RegClient) Close(ctx context.Context, r ref.Ref) error { + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + // verify Closer api is defined, noop if missing + sc, ok := schemeAPI.(scheme.Closer) + if !ok { + return nil + } + return sc.Close(ctx, r) +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go b/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go new file mode 100644 index 000000000..64f78b562 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go @@ -0,0 +1,160 @@ +package ocidir + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "os" + "path" + + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" +) + +// BlobDelete removes a blob from the repository. +// This method does not verify that blobs are unused. +// Calling the [OCIDir.Close] method to trigger the garbage collection is preferred. +func (o *OCIDir) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { + err := d.Digest.Validate() + if err != nil { + return fmt.Errorf("failed to validate digest %s: %w", d.Digest.String(), err) + } + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + return os.Remove(file) +} + +// BlobGet retrieves a blob, returning a reader +func (o *OCIDir) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + err := d.Digest.Validate() + if err != nil { + return nil, fmt.Errorf("failed to validate digest %s: %w", d.Digest.String(), err) + } + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + //#nosec G304 users should validate references they attempt to open + fd, err := os.Open(file) + if err != nil { + return nil, err + } + if d.Size <= 0 { + fi, err := fd.Stat() + if err != nil { + _ = fd.Close() + return nil, err + } + d.Size = fi.Size() + } + br := blob.NewReader( + blob.WithRef(r), + blob.WithReader(fd), + blob.WithDesc(d), + ) + o.slog.Debug("retrieved blob", + slog.String("ref", r.CommonName()), + slog.String("file", file)) + return br, nil +} + +// BlobHead verifies the existence of a blob, the reader contains the headers but no body to read +func (o *OCIDir) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + err := d.Digest.Validate() + if err != nil { + return nil, fmt.Errorf("failed to validate digest %s: %w", d.Digest.String(), err) + } + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + //#nosec G304 users should validate references they attempt to open + fd, err := os.Open(file) + if err != nil { + return nil, err + } + defer fd.Close() + if d.Size <= 0 { + fi, err := fd.Stat() + if err != nil { + return nil, err + } + d.Size = fi.Size() + } + br := blob.NewReader( + blob.WithRef(r), + blob.WithDesc(d), + ) + return br, nil +} + +// BlobMount attempts to perform a server side copy of the blob +func (o *OCIDir) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error { + return errs.ErrUnsupported +} + +// BlobPut sends a blob to the repository, returns the digest and size when successful +func (o *OCIDir) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { + t := o.throttleGet(r, false) + done, err := t.Acquire(ctx, reqmeta.Data{Kind: reqmeta.Blob, Size: d.Size}) + if err != nil { + return d, err + } + defer done() + + err = o.initIndex(r, false) + if err != nil { + return d, err + } + digester := d.DigestAlgo().Digester() + rdr = io.TeeReader(rdr, digester.Hash()) + // write the blob to a tmp file + dir := path.Join(r.Path, "blobs", d.DigestAlgo().String()) + tmpPattern := "*.tmp" + //#nosec G301 defer to user umask settings + err = os.MkdirAll(dir, 0o777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return d, fmt.Errorf("failed creating %s: %w", dir, err) + } + tmpFile, err := os.CreateTemp(dir, tmpPattern) + if err != nil { + return d, fmt.Errorf("failed creating blob tmp file: %w", err) + } + fi, err := tmpFile.Stat() + if err != nil { + return d, fmt.Errorf("failed to stat blob tmpfile: %w", err) + } + tmpName := fi.Name() + i, err := io.Copy(tmpFile, rdr) + errC := tmpFile.Close() + if err != nil { + return d, err + } + if errC != nil { + return d, errC + } + // validate result matches descriptor, or update descriptor if it wasn't defined + if d.Digest.Validate() != nil { + d.Digest = digester.Digest() + } else if d.Digest != digester.Digest() { + return d, fmt.Errorf("unexpected digest, expected %s, computed %s", d.Digest, digester.Digest()) + } + if d.Size <= 0 { + d.Size = i + } else if i != d.Size { + return d, fmt.Errorf("unexpected blob length, expected %d, received %d", d.Size, i) + } + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + //#nosec G703 inputs are user controlled + err = os.Rename(path.Join(dir, tmpName), file) + if err != nil { + return d, fmt.Errorf("failed to write blob (rename tmp file %s to %s): %w", path.Join(dir, tmpName), file, err) + } + o.slog.Debug("pushed blob", + slog.String("ref", r.CommonName()), + slog.String("file", file)) + + o.mu.Lock() + o.refMod(r) + o.mu.Unlock() + return d, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/close.go b/vendor/github.com/regclient/regclient/scheme/ocidir/close.go new file mode 100644 index 000000000..23eae0d12 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/close.go @@ -0,0 +1,117 @@ +package ocidir + +import ( + "context" + "fmt" + "log/slog" + "os" + "path" + + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/ref" +) + +// Close triggers a garbage collection if the underlying path has been modified +func (o *OCIDir) Close(ctx context.Context, r ref.Ref) error { + if !o.gc { + return nil + } + + o.mu.Lock() + defer o.mu.Unlock() + if gc, ok := o.modRefs[r.Path]; !ok || !gc.mod || gc.locks > 0 { + // unmodified or locked, skip gc + return nil + } + + // perform GC + o.slog.Debug("running GC", + slog.String("ref", r.CommonName())) + dl := map[string]bool{} + // recurse through index, manifests, and blob lists, generating a digest list + index, err := o.readIndex(r, true) + if err != nil { + return err + } + im, err := manifest.New(manifest.WithOrig(index)) + if err != nil { + return err + } + err = o.closeProcManifest(ctx, r, im, &dl) + if err != nil { + return err + } + + // go through filesystem digest list, removing entries not seen in recursive pass + blobsPath := path.Join(r.Path, "blobs") + blobDirs, err := os.ReadDir(blobsPath) + if err != nil { + return err + } + for _, blobDir := range blobDirs { + if !blobDir.IsDir() { + // should this warn or delete unexpected files in the blobs folder? + continue + } + digestFiles, err := os.ReadDir(path.Join(blobsPath, blobDir.Name())) + if err != nil { + return err + } + for _, digestFile := range digestFiles { + digest := fmt.Sprintf("%s:%s", blobDir.Name(), digestFile.Name()) + if !dl[digest] { + o.slog.Debug("ocidir garbage collect", + slog.String("digest", digest)) + // delete + err = os.Remove(path.Join(blobsPath, blobDir.Name(), digestFile.Name())) + if err != nil { + return fmt.Errorf("failed to delete %s: %w", path.Join(blobsPath, blobDir.Name(), digestFile.Name()), err) + } + } + } + } + delete(o.modRefs, r.Path) + return nil +} + +func (o *OCIDir) closeProcManifest(ctx context.Context, r ref.Ref, m manifest.Manifest, dl *map[string]bool) error { + if mi, ok := m.(manifest.Indexer); ok { + // go through manifest list, updating dl, and recursively processing nested manifests + ml, err := mi.GetManifestList() + if err != nil { + return err + } + for _, cur := range ml { + cr := r.SetDigest(cur.Digest.String()) + (*dl)[cr.Digest] = true + cm, err := o.manifestGet(ctx, cr) + if err != nil { + // ignore errors in case a manifest has been deleted or sparse copy + o.slog.Debug("could not retrieve manifest", + slog.String("ref", cr.CommonName()), + slog.String("err", err.Error())) + continue + } + err = o.closeProcManifest(ctx, cr, cm, dl) + if err != nil { + return err + } + } + } + if mi, ok := m.(manifest.Imager); ok { + // get config from manifest if it exists + cd, err := mi.GetConfig() + if err == nil { + (*dl)[cd.Digest.String()] = true + } + // finally add all layers to digest list + layers, err := mi.GetLayers() + if err != nil { + return err + } + for _, layer := range layers { + (*dl)[layer.Digest.String()] = true + } + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go b/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go new file mode 100644 index 000000000..3ef9c40fa --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go @@ -0,0 +1,305 @@ +package ocidir + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "os" + "path" + "slices" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/ref" +) + +// ManifestDelete removes a manifest, including all tags that point to that manifest +func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.ManifestOpts) error { + o.mu.Lock() + defer o.mu.Unlock() + + if r.Digest == "" { + return fmt.Errorf("digest required to delete manifest, reference %s%.0w", r.CommonName(), errs.ErrMissingDigest) + } + + mc := scheme.ManifestConfig{} + for _, opt := range opts { + opt(&mc) + } + + // always check for refers with ocidir + if mc.Manifest == nil { + m, err := o.manifestGet(ctx, r) + if err != nil { + return fmt.Errorf("failed to pull manifest for refers: %w", err) + } + mc.Manifest = m + } + if mc.Manifest != nil { + if ms, ok := mc.Manifest.(manifest.Subjecter); ok { + sDesc, err := ms.GetSubject() + if err == nil && sDesc != nil && sDesc.Digest != "" { + // attempt to delete the referrer, but ignore if the referrer entry wasn't found + err = o.referrerDelete(ctx, r, mc.Manifest) + if err != nil && !errors.Is(err, errs.ErrNotFound) && !errors.Is(err, fs.ErrNotExist) { + return err + } + } + } + } + + // get index + changed := false + index, err := o.readIndex(r, true) + if err != nil { + return fmt.Errorf("failed to read index: %w", err) + } + for i := len(index.Manifests) - 1; i >= 0; i-- { + // remove matching entry from index + if r.Digest != "" && index.Manifests[i].Digest.String() == r.Digest { + changed = true + index.Manifests = slices.Delete(index.Manifests, i, i+1) + } + } + // push manifest back out + if changed { + err = o.writeIndex(r, index, true) + if err != nil { + return fmt.Errorf("failed to write index: %w", err) + } + } + + // delete from filesystem like a registry would do + d := digest.Digest(r.Digest) + file := path.Join(r.Path, "blobs", d.Algorithm().String(), d.Encoded()) + err = os.Remove(file) + if err != nil { + return fmt.Errorf("failed to delete manifest: %w", err) + } + o.refMod(r) + return nil +} + +// ManifestGet retrieves a manifest from a repository +func (o *OCIDir) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { + o.mu.Lock() + defer o.mu.Unlock() + return o.manifestGet(ctx, r) +} + +func (o *OCIDir) manifestGet(_ context.Context, r ref.Ref) (manifest.Manifest, error) { + index, err := o.readIndex(r, true) + if err != nil { + return nil, fmt.Errorf("unable to read oci index: %w", err) + } + if r.Digest == "" && r.Tag == "" { + r = r.SetTag("latest") + } + desc, err := indexGet(index, r) + if err != nil { + if r.Digest != "" { + desc.Digest = digest.Digest(r.Digest) + } else { + return nil, err + } + } + if desc.Digest == "" { + return nil, errs.ErrNotFound + } + if err = desc.Digest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest in index: %s: %w", string(desc.Digest), err) + } + file := path.Join(r.Path, "blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + //#nosec G304 users should validate references they attempt to open + fd, err := os.Open(file) + if err != nil { + return nil, fmt.Errorf("failed to open manifest: %w", err) + } + defer fd.Close() + mb, err := io.ReadAll(fd) + if err != nil { + return nil, fmt.Errorf("failed to read manifest: %w", err) + } + if desc.Size == 0 { + desc.Size = int64(len(mb)) + } + o.slog.Debug("retrieved manifest", + slog.String("ref", r.CommonName()), + slog.String("file", file)) + return manifest.New( + manifest.WithRef(r), + manifest.WithDesc(desc), + manifest.WithRaw(mb), + ) +} + +// ManifestHead gets metadata about the manifest (existence, digest, mediatype, size) +func (o *OCIDir) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { + index, err := o.readIndex(r, false) + if err != nil { + return nil, fmt.Errorf("unable to read oci index: %w", err) + } + if r.Digest == "" && r.Tag == "" { + r = r.SetTag("latest") + } + desc, err := indexGet(index, r) + if err != nil { + if r.Digest != "" { + desc.Digest = digest.Digest(r.Digest) + } else { + return nil, err + } + } + if desc.Digest == "" { + return nil, errs.ErrNotFound + } + if err = desc.Digest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest in index: %s: %w", string(desc.Digest), err) + } + // verify underlying file exists + file := path.Join(r.Path, "blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + fi, err := os.Stat(file) + if err != nil || fi.IsDir() { + return nil, errs.ErrNotFound + } + // if missing, set media type on desc + if desc.MediaType == "" { + //#nosec G304 users should validate references they attempt to open + raw, err := os.ReadFile(file) + if err != nil { + return nil, err + } + mt := struct { + MediaType string `json:"mediaType,omitempty"` + SchemaVersion int `json:"schemaVersion,omitempty"` + Signatures []any `json:"signatures,omitempty"` + }{} + err = json.Unmarshal(raw, &mt) + if err != nil { + return nil, err + } + if mt.MediaType != "" { + desc.MediaType = mt.MediaType + desc.Size = int64(len(raw)) + } else if mt.SchemaVersion == 1 && len(mt.Signatures) > 0 { + desc.MediaType = mediatype.Docker1ManifestSigned + } else if mt.SchemaVersion == 1 { + desc.MediaType = mediatype.Docker1Manifest + desc.Size = int64(len(raw)) + } + } + return manifest.New( + manifest.WithRef(r), + manifest.WithDesc(desc), + ) +} + +// ManifestPut sends a manifest to the repository +func (o *OCIDir) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...scheme.ManifestOpts) error { + o.mu.Lock() + defer o.mu.Unlock() + return o.manifestPut(ctx, r, m, opts...) +} + +func (o *OCIDir) manifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...scheme.ManifestOpts) error { + config := scheme.ManifestConfig{} + for _, opt := range opts { + opt(&config) + } + if !config.Child && r.Digest == "" && r.Tag == "" { + r = r.SetTag("latest") + } + err := o.initIndex(r, true) + if err != nil { + return err + } + desc := m.GetDescriptor() + if err = desc.Digest.Validate(); err != nil { + return fmt.Errorf("invalid digest for manifest: %s: %w", string(desc.Digest), err) + } + b, err := m.RawBody() + if err != nil { + return fmt.Errorf("could not serialize manifest: %w", err) + } + if r.Digest != "" && desc.Digest.String() != r.Digest { + // Digest algorithm may have changed, try recreating the manifest with the provided ref. + // This will fail if the ref digest does not match the manifest. + m, err = manifest.New(manifest.WithRef(r), manifest.WithRaw(b)) + if err != nil { + return fmt.Errorf("failed to rebuilding manifest with ref \"%s\": %w", r.CommonName(), err) + } + } + if r.Tag != "" { + desc.Annotations = map[string]string{ + aOCIRefName: r.Tag, + } + } + // create manifest CAS file + dir := path.Join(r.Path, "blobs", desc.Digest.Algorithm().String()) + //#nosec G301 defer to user umask settings + err = os.MkdirAll(dir, 0o777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return fmt.Errorf("failed creating %s: %w", dir, err) + } + // write to a tmp file, rename after validating + tmpFile, err := os.CreateTemp(dir, desc.Digest.Encoded()+".*.tmp") + if err != nil { + return fmt.Errorf("failed to create manifest tmpfile: %w", err) + } + fi, err := tmpFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat manifest tmpfile: %w", err) + } + tmpName := fi.Name() + _, err = tmpFile.Write(b) + errC := tmpFile.Close() + if err != nil { + return fmt.Errorf("failed to write manifest tmpfile: %w", err) + } + if errC != nil { + return fmt.Errorf("failed to close manifest tmpfile: %w", errC) + } + file := path.Join(dir, desc.Digest.Encoded()) + //#nosec G703 inputs are user controlled + err = os.Rename(path.Join(dir, tmpName), file) + if err != nil { + return fmt.Errorf("failed to write manifest (rename tmpfile): %w", err) + } + + // verify/update index + err = o.updateIndex(r, desc, config.Child, true) + if err != nil { + return err + } + o.refMod(r) + o.slog.Debug("pushed manifest", + slog.String("ref", r.CommonName()), + slog.String("file", file)) + + // update referrers if defined on this manifest + if ms, ok := m.(manifest.Subjecter); ok { + mDesc, err := ms.GetSubject() + if err != nil { + return err + } + if mDesc != nil && mDesc.Digest != "" { + err = o.referrerPut(ctx, r, m) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go new file mode 100644 index 000000000..91a95f840 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go @@ -0,0 +1,408 @@ +// Package ocidir implements the OCI Image Layout scheme with a directory (not packed in a tar) +package ocidir + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "os" + "path" + "slices" + "strings" + "sync" + + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" +) + +const ( + imageLayoutFile = "oci-layout" + aOCIRefName = "org.opencontainers.image.ref.name" + aCtrdImageName = "io.containerd.image.name" + defThrottle = 3 +) + +// OCIDir is used for accessing OCI Image Layouts defined as a directory +type OCIDir struct { + slog *slog.Logger + gc bool + modRefs map[string]*ociGC + throttle map[string]*pqueue.Queue[reqmeta.Data] + throttleDef int + mu sync.Mutex +} + +type ociGC struct { + mod bool + locks int +} + +type ociConf struct { + gc bool + slog *slog.Logger + throttle int +} + +// Opts are used for passing options to ocidir +type Opts func(*ociConf) + +// New creates a new OCIDir with options +func New(opts ...Opts) *OCIDir { + conf := ociConf{ + slog: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})), + gc: true, + throttle: defThrottle, + } + for _, opt := range opts { + opt(&conf) + } + return &OCIDir{ + slog: conf.slog, + gc: conf.gc, + modRefs: map[string]*ociGC{}, + throttle: map[string]*pqueue.Queue[reqmeta.Data]{}, + throttleDef: conf.throttle, + } +} + +// WithGC configures the garbage collection setting +// This defaults to enabled +func WithGC(gc bool) Opts { + return func(c *ociConf) { + c.gc = gc + } +} + +// WithSlog provides a slog logger. +// By default logging is disabled. +func WithSlog(slog *slog.Logger) Opts { + return func(c *ociConf) { + c.slog = slog + } +} + +// WithThrottle provides a number of concurrent write actions (blob/manifest put) +func WithThrottle(count int) Opts { + return func(c *ociConf) { + c.throttle = count + } +} + +// GCLock is used to prevent GC on a ref +func (o *OCIDir) GCLock(r ref.Ref) { + o.mu.Lock() + defer o.mu.Unlock() + if gc, ok := o.modRefs[r.Path]; ok && gc != nil { + gc.locks++ + } else { + o.modRefs[r.Path] = &ociGC{locks: 1} + } +} + +// GCUnlock removes a hold on GC of a ref, this must be done before the ref is closed +func (o *OCIDir) GCUnlock(r ref.Ref) { + o.mu.Lock() + defer o.mu.Unlock() + if gc, ok := o.modRefs[r.Path]; ok && gc != nil && gc.locks > 0 { + gc.locks-- + } +} + +// Throttle is used to limit concurrency +func (o *OCIDir) Throttle(r ref.Ref, put bool) []*pqueue.Queue[reqmeta.Data] { + tList := []*pqueue.Queue[reqmeta.Data]{} + // throttle only applies to put requests + if !put || o.throttleDef <= 0 { + return tList + } + return []*pqueue.Queue[reqmeta.Data]{o.throttleGet(r, false)} +} + +func (o *OCIDir) throttleGet(r ref.Ref, locked bool) *pqueue.Queue[reqmeta.Data] { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + if t, ok := o.throttle[r.Path]; ok { + return t + } + // init a new throttle + o.throttle[r.Path] = pqueue.New(pqueue.Opts[reqmeta.Data]{Max: o.throttleDef}) + return o.throttle[r.Path] +} + +func (o *OCIDir) initIndex(r ref.Ref, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + layoutFile := path.Join(r.Path, imageLayoutFile) + _, err := os.Stat(layoutFile) + if err == nil { + return nil + } + //#nosec G301 defer to user umask settings + err = os.MkdirAll(r.Path, 0o777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return fmt.Errorf("failed creating %s: %w", r.Path, err) + } + // create/replace oci-layout file + layout := v1.ImageLayout{ + Version: "1.0.0", + } + lb, err := json.Marshal(layout) + if err != nil { + return fmt.Errorf("cannot marshal layout: %w", err) + } + //#nosec G304 users should validate references they attempt to open + lfh, err := os.Create(layoutFile) + if err != nil { + return fmt.Errorf("cannot create %s: %w", imageLayoutFile, err) + } + defer lfh.Close() + _, err = lfh.Write(lb) + if err != nil { + return fmt.Errorf("cannot write %s: %w", imageLayoutFile, err) + } + return nil +} + +func (o *OCIDir) readIndex(r ref.Ref, locked bool) (v1.Index, error) { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + // validate dir + index := v1.Index{} + err := o.valid(r.Path, true) + if err != nil { + return index, err + } + indexFile := path.Join(r.Path, "index.json") + //#nosec G304 users should validate references they attempt to open + fh, err := os.Open(indexFile) + if err != nil { + return index, fmt.Errorf("%s cannot be open: %w", indexFile, err) + } + defer fh.Close() + ib, err := io.ReadAll(fh) + if err != nil { + return index, fmt.Errorf("%s cannot be read: %w", indexFile, err) + } + err = json.Unmarshal(ib, &index) + if err != nil { + return index, fmt.Errorf("%s cannot be parsed: %w", indexFile, err) + } + return index, nil +} + +func (o *OCIDir) updateIndex(r ref.Ref, d descriptor.Descriptor, child bool, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + indexChanged := false + index, err := o.readIndex(r, true) + if err != nil { + index = indexCreate() + indexChanged = true + } + if !child { + err := indexSet(&index, r, d) + if err != nil { + return fmt.Errorf("failed to update index: %w", err) + } + indexChanged = true + } + if indexChanged { + err = o.writeIndex(r, index, true) + if err != nil { + return fmt.Errorf("failed to write index: %w", err) + } + } + return nil +} + +func (o *OCIDir) writeIndex(r ref.Ref, i v1.Index, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + //#nosec G301 defer to user umask settings + err := os.MkdirAll(r.Path, 0o777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return fmt.Errorf("failed creating %s: %w", r.Path, err) + } + // create/replace oci-layout file + layout := v1.ImageLayout{ + Version: "1.0.0", + } + lb, err := json.Marshal(layout) + if err != nil { + return fmt.Errorf("cannot marshal layout: %w", err) + } + lfh, err := os.Create(path.Join(r.Path, imageLayoutFile)) + if err != nil { + return fmt.Errorf("cannot create %s: %w", imageLayoutFile, err) + } + defer lfh.Close() + _, err = lfh.Write(lb) + if err != nil { + return fmt.Errorf("cannot write %s: %w", imageLayoutFile, err) + } + // create/replace index.json file + tmpFile, err := os.CreateTemp(r.Path, "index.json.*.tmp") + if err != nil { + return fmt.Errorf("cannot create index tmpfile: %w", err) + } + fi, err := tmpFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat index tmpfile: %w", err) + } + tmpName := fi.Name() + b, err := json.Marshal(i) + if err != nil { + return fmt.Errorf("cannot marshal index: %w", err) + } + _, err = tmpFile.Write(b) + errC := tmpFile.Close() + if err != nil { + return fmt.Errorf("cannot write index: %w", err) + } + if errC != nil { + return fmt.Errorf("cannot close index: %w", errC) + } + indexFile := path.Join(r.Path, "index.json") + //#nosec G703 inputs are user controlled + err = os.Rename(path.Join(r.Path, tmpName), indexFile) + if err != nil { + return fmt.Errorf("cannot rename tmpfile to index: %w", err) + } + return nil +} + +// func valid (dir) (error) // check for `oci-layout` file and `index.json` for read +func (o *OCIDir) valid(dir string, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + layout := v1.ImageLayout{} + reqVer := "1.0.0" + //#nosec G304 users should validate references they attempt to open + fh, err := os.Open(path.Join(dir, imageLayoutFile)) + if err != nil { + return fmt.Errorf("%s cannot be open: %w", imageLayoutFile, err) + } + defer fh.Close() + lb, err := io.ReadAll(fh) + if err != nil { + return fmt.Errorf("%s cannot be read: %w", imageLayoutFile, err) + } + err = json.Unmarshal(lb, &layout) + if err != nil { + return fmt.Errorf("%s cannot be parsed: %w", imageLayoutFile, err) + } + if layout.Version != reqVer { + return fmt.Errorf("unsupported oci layout version, expected %s, received %s", reqVer, layout.Version) + } + return nil +} + +func (o *OCIDir) refMod(r ref.Ref) { + if gc, ok := o.modRefs[r.Path]; ok && gc != nil { + gc.mod = true + } else { + o.modRefs[r.Path] = &ociGC{mod: true} + } +} + +func indexCreate() v1.Index { + i := v1.Index{ + Versioned: v1.IndexSchemaVersion, + MediaType: mediatype.OCI1ManifestList, + Manifests: []descriptor.Descriptor{}, + Annotations: map[string]string{}, + } + return i +} + +func indexGet(index v1.Index, r ref.Ref) (descriptor.Descriptor, error) { + if r.Digest == "" && r.Tag == "" { + r = r.SetTag("latest") + } + if r.Digest != "" { + for _, im := range index.Manifests { + if im.Digest.String() == r.Digest { + return im, nil + } + } + } else if r.Tag != "" { + for _, im := range index.Manifests { + if name, ok := im.Annotations[aOCIRefName]; ok && name == r.Tag { + return im, nil + } + } + // fall back to support full image name in annotation + for _, im := range index.Manifests { + if name, ok := im.Annotations[aOCIRefName]; ok && strings.HasSuffix(name, ":"+r.Tag) { + return im, nil + } + } + } + return descriptor.Descriptor{}, errs.ErrNotFound +} + +func indexSet(index *v1.Index, r ref.Ref, d descriptor.Descriptor) error { + if index == nil { + return fmt.Errorf("index is nil") + } + if r.Tag != "" { + if d.Annotations == nil { + d.Annotations = map[string]string{} + } + d.Annotations[aOCIRefName] = r.Tag + } + if index.Manifests == nil { + index.Manifests = []descriptor.Descriptor{} + } + pos := -1 + // search for existing + for i := range index.Manifests { + var name string + if index.Manifests[i].Annotations != nil { + name = index.Manifests[i].Annotations[aOCIRefName] + } + if (name == "" && index.Manifests[i].Digest == d.Digest) || (r.Tag != "" && name == r.Tag) { + index.Manifests[i] = d + pos = i + break + } + } + if pos >= 0 { + // existing entry was replaced, remove any dup entries + for i := len(index.Manifests) - 1; i > pos; i-- { + var name string + if index.Manifests[i].Annotations != nil { + name = index.Manifests[i].Annotations[aOCIRefName] + } + // prune entries without any tag and a matching digest + // or entries with a matching tag + if (name == "" && index.Manifests[i].Digest == d.Digest) || (r.Tag != "" && name == r.Tag) { + index.Manifests = slices.Delete(index.Manifests, i, i+1) + } + } + } else { + // existing entry to replace was not found, add the descriptor + index.Manifests = append(index.Manifests, d) + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir_nowasm.go b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir_nowasm.go new file mode 100644 index 000000000..50ec0519b --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir_nowasm.go @@ -0,0 +1,19 @@ +//go:build !wasm + +package ocidir + +import ( + "log/slog" + + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/sloghandle" +) + +// WithLog provides a logrus logger. +// By default logging is disabled. +func WithLog(log *logrus.Logger) Opts { + return func(c *ociConf) { + c.slog = slog.New(sloghandle.Logrus(log)) + } +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go b/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go new file mode 100644 index 000000000..d83df3436 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go @@ -0,0 +1,29 @@ +package ocidir + +import ( + "context" + "fmt" + "os" + + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping for an ocidir verifies access to read the path. +func (o *OCIDir) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + ret := ping.Result{} + fd, err := os.Open(r.Path) + if err != nil { + return ret, err + } + defer fd.Close() + fi, err := fd.Stat() + if err != nil { + return ret, err + } + ret.Stat = fi + if !fi.IsDir() { + return ret, fmt.Errorf("failed to access %s: not a directory", r.Path) + } + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go b/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go new file mode 100644 index 000000000..7a89d27cf --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go @@ -0,0 +1,160 @@ +package ocidir + +import ( + "context" + "errors" + "fmt" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" +) + +// ReferrerList returns a list of referrers to a given reference. +// The reference must include the digest. Use [regclient.ReferrerList] to resolve the platform or tag. +func (o *OCIDir) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + o.mu.Lock() + defer o.mu.Unlock() + return o.referrerList(ctx, r, opts...) +} + +func (o *OCIDir) referrerList(ctx context.Context, rSubject ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + config := scheme.ReferrerConfig{} + for _, opt := range opts { + opt(&config) + } + var r ref.Ref + if config.SrcRepo.IsSet() { + r = config.SrcRepo.SetDigest(rSubject.Digest) + } else { + r = rSubject.SetDigest(rSubject.Digest) + } + rl := referrer.ReferrerList{ + Tags: []string{}, + } + if rSubject.Digest == "" { + return rl, fmt.Errorf("digest required to query referrers %s", rSubject.CommonName()) + } + + // pull referrer list by tag + rlTag, err := referrer.FallbackTag(r) + if err != nil { + return rl, err + } + m, err := o.manifestGet(ctx, rlTag) + if err != nil { + if errors.Is(err, errs.ErrNotFound) { + // empty list, initialize a new manifest + rl.Manifest, err = manifest.New(manifest.WithOrig(v1.Index{ + Versioned: v1.IndexSchemaVersion, + MediaType: mediatype.OCI1ManifestList, + })) + if err != nil { + return rl, err + } + return rl, nil + } + return rl, err + } + ociML, ok := m.GetOrig().(v1.Index) + if !ok { + return rl, fmt.Errorf("manifest is not an OCI index: %s", rlTag.CommonName()) + } + // update referrer list + rl.Subject = rSubject + if config.SrcRepo.IsSet() { + rl.Source = config.SrcRepo + } + rl.Manifest = m + rl.Descriptors = ociML.Manifests + rl.Annotations = ociML.Annotations + rl.Tags = append(rl.Tags, rlTag.Tag) + rl = scheme.ReferrerFilter(config, rl) + + return rl, nil +} + +// referrerDelete deletes a referrer associated with a manifest +func (o *OCIDir) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manifest) error { + // get refers field + mSubject, ok := m.(manifest.Subjecter) + if !ok { + return fmt.Errorf("manifest does not support subject: %w", errs.ErrUnsupportedMediaType) + } + subject, err := mSubject.GetSubject() + if err != nil { + return err + } + // validate/set subject descriptor + if subject == nil || subject.Digest == "" { + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) + } + + // get descriptor for subject + rSubject := r.SetDigest(subject.Digest.String()) + + // pull existing referrer list + rl, err := o.referrerList(ctx, rSubject) + if err != nil { + return err + } + err = rl.Delete(m) + if err != nil { + return err + } + + // push updated referrer list by tag + rlTag, err := referrer.FallbackTag(rSubject) + if err != nil { + return err + } + if rl.IsEmpty() { + err = o.tagDelete(ctx, rlTag) + if err == nil { + return nil + } + // if delete is not supported, fall back to pushing empty list + } + return o.manifestPut(ctx, rlTag, rl.Manifest) +} + +// referrerPut pushes a new referrer associated with a given reference +func (o *OCIDir) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest) error { + // get subject field + mSubject, ok := m.(manifest.Subjecter) + if !ok { + return fmt.Errorf("manifest does not support subject: %w", errs.ErrUnsupportedMediaType) + } + subject, err := mSubject.GetSubject() + if err != nil { + return err + } + // validate/set subject descriptor + if subject == nil || subject.Digest == "" { + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) + } + + // get descriptor for subject + rSubject := r.SetDigest(subject.Digest.String()) + + // pull existing referrer list + rl, err := o.referrerList(ctx, rSubject) + if err != nil { + return err + } + err = rl.Add(m) + if err != nil { + return err + } + + // push updated referrer list by tag + rlTag, err := referrer.FallbackTag(rSubject) + if err != nil { + return err + } + return o.manifestPut(ctx, rlTag, rl.Manifest) +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go b/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go new file mode 100644 index 000000000..741ebdac6 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go @@ -0,0 +1,89 @@ +package ocidir + +import ( + "context" + "encoding/json" + "fmt" + "slices" + "sort" + "strings" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/tag" +) + +// TagDelete removes a tag from the repository +func (o *OCIDir) TagDelete(ctx context.Context, r ref.Ref) error { + o.mu.Lock() + defer o.mu.Unlock() + return o.tagDelete(ctx, r) +} + +func (o *OCIDir) tagDelete(_ context.Context, r ref.Ref) error { + if r.Tag == "" { + return errs.ErrMissingTag + } + // get index + index, err := o.readIndex(r, true) + if err != nil { + return fmt.Errorf("failed to read index: %w", err) + } + changed := false + for i, desc := range index.Manifests { + if t, ok := desc.Annotations[aOCIRefName]; ok && t == r.Tag { + // remove matching entry from index + index.Manifests = slices.Delete(index.Manifests, i, i+1) + changed = true + } + } + if !changed { + return fmt.Errorf("failed deleting %s: %w", r.CommonName(), errs.ErrNotFound) + } + // push manifest back out + err = o.writeIndex(r, index, true) + if err != nil { + return fmt.Errorf("failed to write index: %w", err) + } + o.refMod(r) + return nil +} + +// TagList returns a list of tags from the repository +func (o *OCIDir) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) (*tag.List, error) { + // get index + index, err := o.readIndex(r, false) + if err != nil { + return nil, err + } + tl := []string{} + for _, desc := range index.Manifests { + if t, ok := desc.Annotations[aOCIRefName]; ok { + if i := strings.LastIndex(t, ":"); i >= 0 { + t = t[i+1:] + } + if !slices.Contains(tl, t) { + tl = append(tl, t) + } + } + } + sort.Strings(tl) + ib, err := json.Marshal(index) + if err != nil { + return nil, err + } + // return listing from index + t, err := tag.New( + tag.WithRaw(ib), + tag.WithRef(r), + tag.WithMT(mediatype.OCI1ManifestList), + tag.WithLayoutIndex(index), + tag.WithTags(tl), + ) + if err != nil { + return nil, err + } + return t, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/blob.go b/vendor/github.com/regclient/regclient/scheme/reg/blob.go new file mode 100644 index 000000000..38c313b9c --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/blob.go @@ -0,0 +1,671 @@ +package reg + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + "strings" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +var zeroDig = digest.SHA256.FromBytes([]byte{}) + +// BlobDelete removes a blob from the repository +func (reg *Reg) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "DELETE", + Repository: r.Repository, + Path: "blobs/" + d.Digest.String(), + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to delete blob, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), err) + } + if resp.HTTPResponse().StatusCode != 202 { + return fmt.Errorf("failed to delete blob, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + return nil +} + +// BlobGet retrieves a blob from the repository, returning a blob reader +func (reg *Reg) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + // build/send request + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + Path: "blobs/" + d.Digest.String(), + ExpectLen: d.Size, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil && len(d.URLs) > 0 { + for _, curURL := range d.URLs { + // fallback for external blobs + var u *url.URL + u, err = url.Parse(curURL) + if err != nil { + return nil, fmt.Errorf("failed to parse external url \"%s\": %w", curURL, err) + } + req = ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + DirectURL: u, + NoMirrors: true, + ExpectLen: d.Size, + } + resp, err = reg.reghttp.Do(ctx, req) + if err == nil { + break + } + } + } + if err != nil { + return nil, fmt.Errorf("failed to get blob, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), err) + } + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to get blob, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + b := blob.NewReader( + blob.WithRef(r), + blob.WithReader(resp), + blob.WithDesc(d), + blob.WithResp(resp.HTTPResponse()), + ) + return b, nil +} + +// BlobHead is used to verify if a blob exists and is accessible +func (reg *Reg) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + // build/send request + req := ®http.Req{ + MetaKind: reqmeta.Head, + Host: r.Registry, + Method: "HEAD", + Repository: r.Repository, + Path: "blobs/" + d.Digest.String(), + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil && len(d.URLs) > 0 { + for _, curURL := range d.URLs { + // fallback for external blobs + var u *url.URL + u, err = url.Parse(curURL) + if err != nil { + return nil, fmt.Errorf("failed to parse external url \"%s\": %w", curURL, err) + } + req = ®http.Req{ + MetaKind: reqmeta.Head, + Host: r.Registry, + Method: "HEAD", + Repository: r.Repository, + DirectURL: u, + NoMirrors: true, + } + resp, err = reg.reghttp.Do(ctx, req) + if err == nil { + break + } + } + } + if err != nil { + return nil, fmt.Errorf("failed to request blob head, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to request blob head, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + b := blob.NewReader( + blob.WithRef(r), + blob.WithDesc(d), + blob.WithResp(resp.HTTPResponse()), + ) + return b, nil +} + +// BlobMount attempts to perform a server side copy/mount of the blob between repositories +func (reg *Reg) BlobMount(ctx context.Context, rSrc ref.Ref, rTgt ref.Ref, d descriptor.Descriptor) error { + putURL, _, err := reg.blobMount(ctx, rTgt, d, rSrc) + // if mount fails and returns an upload location, cancel that upload + if err != nil { + _ = reg.blobUploadCancel(ctx, rTgt, putURL) + } + return err +} + +// BlobPut uploads a blob to a repository. +// Descriptor is optional, leave size and digest to zero value if unknown. +// Reader must also be an [io.Seeker] to support chunked upload fallback. +// +// This will attempt an anonymous blob mount first which some registries may support. +// It will then try doing a full put of the blob without chunking (most widely supported). +// If the full put fails, it will fall back to a chunked upload (useful for flaky networks). +func (reg *Reg) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { + var putURL *url.URL + var err error + validDesc := (d.Size > 0 && d.Digest.Validate() == nil) || (d.Size == 0 && d.Digest == zeroDig) + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + + // attempt an anonymous blob mount + if validDesc { + putURL, _, err = reg.blobMount(ctx, r, d, ref.Ref{}) + if err == nil { + return d, nil + } + if err != errs.ErrMountReturnedLocation { + putURL = nil + } + } + // fallback to requesting upload URL + if putURL == nil { + putURL, err = reg.blobGetUploadURL(ctx, r, d) + if err != nil { + return d, err + } + } + // send upload as one-chunk + tryPut := validDesc + if tryPut { + host := reg.hostGet(r.Registry) + maxPut := host.BlobMax + if maxPut == 0 { + maxPut = reg.blobMaxPut + } + if maxPut > 0 && d.Size > maxPut { + tryPut = false + } + } + if tryPut { + err = reg.blobPutUploadFull(ctx, r, d, putURL, rdr) + if err == nil { + return d, nil + } + // on failure, attempt to seek back to start to perform a chunked upload + rdrSeek, ok := rdr.(io.ReadSeeker) + if !ok { + _ = reg.blobUploadCancel(ctx, r, putURL) + return d, err + } + offset, errR := rdrSeek.Seek(0, io.SeekStart) + if errR != nil || offset != 0 { + _ = reg.blobUploadCancel(ctx, r, putURL) + return d, err + } + } + // send a chunked upload if full upload not possible or too large + d, err = reg.blobPutUploadChunked(ctx, r, d, putURL, rdr) + if err != nil { + _ = reg.blobUploadCancel(ctx, r, putURL) + } + return d, err +} + +func (reg *Reg) blobGetUploadURL(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (*url.URL, error) { + q := url.Values{} + if d.DigestAlgo() != digest.Canonical { + // TODO(bmitch): EXPERIMENTAL parameter, registry support and OCI spec change needed + q.Add(paramBlobDigestAlgo, d.DigestAlgo().String()) + } + // request an upload location + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + NoMirrors: true, + Method: "POST", + Repository: r.Repository, + Path: "blobs/uploads/", + Query: q, + TransactLen: d.Size, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to send blob post, ref %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 202 { + return nil, fmt.Errorf("failed to send blob post, ref %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + // if min size header received, check/adjust host settings + minSizeStr := resp.HTTPResponse().Header.Get(blobChunkMinHeader) + if minSizeStr != "" { + minSize, err := strconv.ParseInt(minSizeStr, 10, 64) + if err != nil { + reg.slog.Warn("Failed to parse chunk size header", + slog.String("size", minSizeStr), + slog.String("err", err.Error())) + } else { + host := reg.hostGet(r.Registry) + reg.muHost.Lock() + if (host.BlobChunk > 0 && minSize > host.BlobChunk) || (host.BlobChunk <= 0 && minSize > reg.blobChunkSize) { + host.BlobChunk = min(minSize, reg.blobChunkLimit) + reg.slog.Debug("Registry requested min chunk size", + slog.Int64("size", host.BlobChunk), + slog.String("host", host.Name)) + } + reg.muHost.Unlock() + } + } + // Extract the location into a new putURL based on whether it's relative, fqdn with a scheme, or without a scheme. + location := resp.HTTPResponse().Header.Get("Location") + if location == "" { + return nil, fmt.Errorf("failed to send blob post, ref %s: %w", r.CommonName(), errs.ErrMissingLocation) + } + reg.slog.Debug("Upload location received", + slog.String("location", location)) + + // put url may be relative to the above post URL, so parse in that context + postURL := resp.HTTPResponse().Request.URL + putURL, err := postURL.Parse(location) + if err != nil { + reg.slog.Warn("Location url failed to parse", + slog.String("location", location), + slog.String("err", err.Error())) + return nil, fmt.Errorf("blob upload url invalid, ref %s: %w", r.CommonName(), err) + } + return putURL, nil +} + +func (reg *Reg) blobMount(ctx context.Context, rTgt ref.Ref, d descriptor.Descriptor, rSrc ref.Ref) (*url.URL, string, error) { + // build/send request + query := url.Values{} + query.Set("mount", d.Digest.String()) + ignoreErr := true // ignore errors from anonymous blob mount attempts + if rSrc.Registry == rTgt.Registry && rSrc.Repository != "" { + query.Set("from", rSrc.Repository) + ignoreErr = false + } + + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: rTgt.Registry, + NoMirrors: true, + Method: "POST", + Repository: rTgt.Repository, + Path: "blobs/uploads/", + Query: query, + IgnoreErr: ignoreErr, + TransactLen: d.Size, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, "", fmt.Errorf("failed to mount blob, digest %s, ref %s: %w", d.Digest.String(), rTgt.CommonName(), err) + } + defer resp.Close() + + // if min size header received, check/adjust host settings + minSizeStr := resp.HTTPResponse().Header.Get(blobChunkMinHeader) + if minSizeStr != "" { + minSize, err := strconv.ParseInt(minSizeStr, 10, 64) + if err != nil { + reg.slog.Warn("Failed to parse chunk size header", + slog.String("size", minSizeStr), + slog.String("err", err.Error())) + } else { + host := reg.hostGet(rTgt.Registry) + reg.muHost.Lock() + if (host.BlobChunk > 0 && minSize > host.BlobChunk) || (host.BlobChunk <= 0 && minSize > reg.blobChunkSize) { + host.BlobChunk = min(minSize, reg.blobChunkLimit) + reg.slog.Debug("Registry requested min chunk size", + slog.Int64("size", host.BlobChunk), + slog.String("host", host.Name)) + } + reg.muHost.Unlock() + } + } + // 201 indicates the blob mount succeeded + if resp.HTTPResponse().StatusCode == 201 { + return nil, "", nil + } + // 202 indicates blob mount failed but server ready to receive an upload at location + location := resp.HTTPResponse().Header.Get("Location") + uuid := resp.HTTPResponse().Header.Get("Docker-Upload-UUID") + if resp.HTTPResponse().StatusCode == 202 && location != "" { + postURL := resp.HTTPResponse().Request.URL + putURL, err := postURL.Parse(location) + if err != nil { + reg.slog.Warn("Mount location header failed to parse", + slog.String("digest", d.Digest.String()), + slog.String("target", rTgt.CommonName()), + slog.String("location", location), + slog.String("err", err.Error())) + } else { + return putURL, uuid, errs.ErrMountReturnedLocation + } + } + // all other responses unhandled + return nil, "", fmt.Errorf("failed to mount blob, digest %s, ref %s: %w", d.Digest.String(), rTgt.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) +} + +func (reg *Reg) blobPutUploadFull(ctx context.Context, r ref.Ref, d descriptor.Descriptor, putURL *url.URL, rdr io.Reader) error { + // append digest to request to use the monolithic upload option + if putURL.RawQuery != "" { + putURL.RawQuery = putURL.RawQuery + "&digest=" + url.QueryEscape(d.Digest.String()) + } else { + putURL.RawQuery = "digest=" + url.QueryEscape(d.Digest.String()) + } + + // make a reader function for the blob + readOnce := false + bodyFunc := func() (io.ReadCloser, error) { + // handle attempt to reuse blob reader (e.g. on a connection retry or fallback) + if readOnce { + rdrSeek, ok := rdr.(io.ReadSeeker) + if !ok { + return nil, fmt.Errorf("blob source is not a seeker%.0w", errs.ErrNotRetryable) + } + _, err := rdrSeek.Seek(0, io.SeekStart) + if err != nil { + return nil, fmt.Errorf("seek on blob source failed: %w%.0w", err, errs.ErrNotRetryable) + } + } + readOnce = true + return io.NopCloser(rdr), nil + } + // special case for the empty blob + if d.Size == 0 && d.Digest == zeroDig { + bodyFunc = nil + } + + // build/send request + header := http.Header{ + "Content-Type": {"application/octet-stream"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + Method: "PUT", + Repository: r.Repository, + DirectURL: putURL, + BodyFunc: bodyFunc, + BodyLen: d.Size, + Headers: header, + NoMirrors: true, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to send blob (put), digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), err) + } + defer resp.Close() + // 201 follows distribution-spec, 204 is listed as possible in the Docker registry spec + if resp.HTTPResponse().StatusCode != 201 && resp.HTTPResponse().StatusCode != 204 { + return fmt.Errorf("failed to send blob (put), digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + return nil +} + +func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, d descriptor.Descriptor, putURL *url.URL, rdr io.Reader) (descriptor.Descriptor, error) { + host := reg.hostGet(r.Registry) + bufSize := host.BlobChunk + if bufSize <= 0 { + bufSize = reg.blobChunkSize + } + bufBytes := make([]byte, 0, bufSize) + bufRdr := bytes.NewReader(bufBytes) + bufStart := int64(0) + bufChange := false + + // setup buffer and digest pipe + digester := d.DigestAlgo().Digester() + digestRdr := io.TeeReader(rdr, digester.Hash()) + finalChunk := false + chunkStart := int64(0) + chunkSize := 0 + bodyFunc := func() (io.ReadCloser, error) { + // reset to the start on every new read + _, err := bufRdr.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + return io.NopCloser(bufRdr), nil + } + chunkURL := *putURL + retryLimit := 10 // TODO: pull limit from reghttp + retryCur := 0 + var err error + + for !finalChunk || chunkStart < bufStart+int64(len(bufBytes)) { + bufChange = false + for chunkStart >= bufStart+int64(len(bufBytes)) && !finalChunk { + bufStart += int64(len(bufBytes)) + // reset length if previous read was short + if cap(bufBytes) != len(bufBytes) { + bufBytes = bufBytes[:cap(bufBytes)] + bufChange = true + } + // read a chunk into an input buffer, computing the digest + chunkSize, err = io.ReadFull(digestRdr, bufBytes) + if err == io.EOF || err == io.ErrUnexpectedEOF { + finalChunk = true + } else if err != nil { + return d, fmt.Errorf("failed to send blob chunk, ref %s: %w", r.CommonName(), err) + } + // update length on partial read + if chunkSize != len(bufBytes) { + bufBytes = bufBytes[:chunkSize] + bufChange = true + } + } + if chunkStart > bufStart && chunkStart < bufStart+int64(len(bufBytes)) { + // next chunk is inside the existing buf + bufBytes = bufBytes[chunkStart-bufStart:] + bufStart = chunkStart + chunkSize = len(bufBytes) + bufChange = true + } + if chunkSize > 0 && chunkStart != bufStart { + return d, fmt.Errorf("chunkStart (%d) != bufStart (%d)", chunkStart, bufStart) + } + if bufChange { + // need to recreate the reader on a change to the slice length, + // old reader is looking at the old slice metadata + bufRdr = bytes.NewReader(bufBytes) + } + + if chunkSize > 0 { + // write chunk + header := http.Header{ + "Content-Type": {"application/octet-stream"}, + "Content-Range": {fmt.Sprintf("%d-%d", chunkStart, chunkStart+int64(chunkSize)-1)}, + } + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + Method: "PATCH", + Repository: r.Repository, + DirectURL: &chunkURL, + BodyFunc: bodyFunc, + BodyLen: int64(chunkSize), + Headers: header, + NoMirrors: true, + TransactLen: d.Size - int64(chunkSize), + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil && !errors.Is(err, errs.ErrHTTPStatus) && !errors.Is(err, errs.ErrNotFound) { + return d, fmt.Errorf("failed to send blob (chunk), ref %s: http do: %w", r.CommonName(), err) + } + err = resp.Close() + if err != nil { + return d, fmt.Errorf("failed to close request: %w", err) + } + httpResp := resp.HTTPResponse() + // distribution-spec is 202, AWS ECR returns a 201 and rejects the put + if resp.HTTPResponse().StatusCode == 201 { + reg.slog.Debug("Early accept of chunk in PATCH before PUT request", + slog.String("ref", r.CommonName()), + slog.Int64("chunkStart", chunkStart), + slog.Int("chunkSize", chunkSize)) + } else if resp.HTTPResponse().StatusCode >= 400 && resp.HTTPResponse().StatusCode < 500 && + resp.HTTPResponse().Header.Get("Location") != "" && + resp.HTTPResponse().Header.Get("Range") != "" { + retryCur++ + reg.slog.Debug("Recoverable chunk upload error", + slog.String("ref", r.CommonName()), + slog.Int64("chunkStart", chunkStart), + slog.Int("chunkSize", chunkSize), + slog.String("range", resp.HTTPResponse().Header.Get("Range"))) + } else if resp.HTTPResponse().StatusCode != 202 { + retryCur++ + statusResp, statusErr := reg.blobUploadStatus(ctx, r, &chunkURL) + if retryCur > retryLimit || statusErr != nil { + return d, fmt.Errorf("failed to send blob (chunk), ref %s: http status: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + httpResp = statusResp + } else { + // successful request + if retryCur > 0 { + retryCur-- + } + } + rangeEnd, err := blobUploadCurBytes(httpResp) + if err == nil { + chunkStart = rangeEnd + 1 + } else { + chunkStart += int64(chunkSize) + } + location := httpResp.Header.Get("Location") + if location != "" { + reg.slog.Debug("Next chunk upload location received", + slog.String("location", location)) + prevURL := httpResp.Request.URL + parseURL, err := prevURL.Parse(location) + if err != nil { + return d, fmt.Errorf("failed to send blob (parse next chunk location), ref %s: %w", r.CommonName(), err) + } + chunkURL = *parseURL + } + } + } + + // compute digest + dOut := digester.Digest() + if d.Digest.Validate() == nil && dOut != d.Digest { + return d, fmt.Errorf("%w, expected %s, computed %s", errs.ErrDigestMismatch, d.Digest.String(), dOut.String()) + } + if d.Size != 0 && chunkStart != d.Size { + return d, fmt.Errorf("blob content size does not match descriptor, expected %d, received %d%.0w", d.Size, chunkStart, errs.ErrMismatch) + } + d.Digest = dOut + d.Size = chunkStart + + // send the final put + // append digest to request to use the monolithic upload option + if chunkURL.RawQuery != "" { + chunkURL.RawQuery = chunkURL.RawQuery + "&digest=" + url.QueryEscape(dOut.String()) + } else { + chunkURL.RawQuery = "digest=" + url.QueryEscape(dOut.String()) + } + + header := http.Header{ + "Content-Type": {"application/octet-stream"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "PUT", + Repository: r.Repository, + DirectURL: &chunkURL, + BodyLen: int64(0), + Headers: header, + NoMirrors: true, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return d, fmt.Errorf("failed to send blob (chunk digest), digest %s, ref %s: %w", dOut, r.CommonName(), err) + } + defer resp.Close() + // 201 follows distribution-spec, 204 is listed as possible in the Docker registry spec + if resp.HTTPResponse().StatusCode != 201 && resp.HTTPResponse().StatusCode != 204 { + return d, fmt.Errorf("failed to send blob (chunk digest), digest %s, ref %s: %w", dOut, r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return d, nil +} + +// blobUploadCancel stops an upload, releasing resources on the server. +func (reg *Reg) blobUploadCancel(ctx context.Context, r ref.Ref, putURL *url.URL) error { + if putURL == nil { + return fmt.Errorf("failed to cancel upload %s: url undefined", r.CommonName()) + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + NoMirrors: true, + Method: "DELETE", + Repository: r.Repository, + DirectURL: putURL, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to cancel upload %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 202 { + return fmt.Errorf("failed to cancel upload %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + return nil +} + +// blobUploadStatus provides a response with headers indicating the progress of an upload +func (reg *Reg) blobUploadStatus(ctx context.Context, r ref.Ref, putURL *url.URL) (*http.Response, error) { + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + DirectURL: putURL, + NoMirrors: true, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to get upload status: %w", err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 204 { + return resp.HTTPResponse(), fmt.Errorf("failed to get upload status: %w", reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + return resp.HTTPResponse(), nil +} + +func blobUploadCurBytes(resp *http.Response) (int64, error) { + if resp == nil { + return 0, fmt.Errorf("missing response") + } + r := resp.Header.Get("Range") + if r == "" { + return 0, fmt.Errorf("missing range header") + } + rSplit := strings.SplitN(r, "-", 2) + if len(rSplit) < 2 { + return 0, fmt.Errorf("missing offset in range header") + } + return strconv.ParseInt(rSplit[1], 10, 64) +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/manifest.go b/vendor/github.com/regclient/regclient/scheme/reg/manifest.go new file mode 100644 index 000000000..c9789da7f --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/manifest.go @@ -0,0 +1,295 @@ +package reg + +import ( + "context" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/limitread" + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +// ManifestDelete removes a manifest by reference (digest) from a registry. +// This will implicitly delete all tags pointing to that manifest. +func (reg *Reg) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.ManifestOpts) error { + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + if r.Digest == "" { + return fmt.Errorf("digest required to delete manifest, reference %s%.0w", r.CommonName(), errs.ErrMissingDigest) + } + + mc := scheme.ManifestConfig{} + for _, opt := range opts { + opt(&mc) + } + + if mc.CheckReferrers && mc.Manifest == nil { + m, err := reg.ManifestGet(ctx, r) + if err != nil { + return fmt.Errorf("failed to pull manifest for refers: %w", err) + } + mc.Manifest = m + } + if mc.Manifest != nil { + if mr, ok := mc.Manifest.(manifest.Subjecter); ok { + sDesc, err := mr.GetSubject() + if err == nil && sDesc != nil && sDesc.Digest != "" { + // attempt to delete the referrer, but ignore if the referrer entry wasn't found + err = reg.referrerDelete(ctx, r, mc.Manifest) + if err != nil && !errors.Is(err, errs.ErrNotFound) { + return err + } + } + } + } + rCache := r.SetDigest(r.Digest) + reg.cacheMan.Delete(rCache) + + // build/send request + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + NoMirrors: true, + Method: "DELETE", + Repository: r.Repository, + Path: "manifests/" + r.Digest, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to delete manifest %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 202 { + return fmt.Errorf("failed to delete manifest %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return nil +} + +// ManifestGet retrieves a manifest from the registry +func (reg *Reg) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { + var tagOrDigest string + if r.Digest != "" { + rCache := r.SetDigest(r.Digest) + if m, err := reg.cacheMan.Get(rCache); err == nil { + return m, nil + } + tagOrDigest = r.Digest + } else if r.Tag != "" { + tagOrDigest = r.Tag + } else { + return nil, fmt.Errorf("reference missing tag and digest: %s%.0w", r.CommonName(), errs.ErrMissingTagOrDigest) + } + + // build/send request + headers := http.Header{ + "Accept": []string{ + mediatype.OCI1ManifestList, + mediatype.OCI1Manifest, + mediatype.Docker2ManifestList, + mediatype.Docker2Manifest, + mediatype.Docker1ManifestSigned, + mediatype.Docker1Manifest, + mediatype.OCI1Artifact, + }, + } + req := ®http.Req{ + MetaKind: reqmeta.Manifest, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + Path: "manifests/" + tagOrDigest, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to get manifest %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to get manifest %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + // limit length + size, _ := strconv.Atoi(resp.HTTPResponse().Header.Get("Content-Length")) + if size > 0 && reg.manifestMaxPull > 0 && int64(size) > reg.manifestMaxPull { + return nil, fmt.Errorf("manifest too large, received %d, limit %d: %s%.0w", size, reg.manifestMaxPull, r.CommonName(), errs.ErrSizeLimitExceeded) + } + rdr := &limitread.LimitRead{ + Reader: resp, + Limit: reg.manifestMaxPull, + } + + // read manifest + rawBody, err := io.ReadAll(rdr) + if err != nil { + return nil, fmt.Errorf("error reading manifest for %s: %w", r.CommonName(), err) + } + + m, err := manifest.New( + manifest.WithRef(r), + manifest.WithHeader(resp.HTTPResponse().Header), + manifest.WithRaw(rawBody), + ) + if err != nil { + return nil, err + } + rCache := r.SetDigest(m.GetDescriptor().Digest.String()) + reg.cacheMan.Set(rCache, m) + return m, nil +} + +// ManifestHead returns metadata on the manifest from the registry +func (reg *Reg) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { + // build the request + var tagOrDigest string + if r.Digest != "" { + rCache := r.SetDigest(r.Digest) + if m, err := reg.cacheMan.Get(rCache); err == nil { + return m, nil + } + tagOrDigest = r.Digest + } else if r.Tag != "" { + tagOrDigest = r.Tag + } else { + return nil, fmt.Errorf("reference missing tag and digest: %s%.0w", r.CommonName(), errs.ErrMissingTagOrDigest) + } + + // build/send request + headers := http.Header{ + "Accept": []string{ + mediatype.OCI1ManifestList, + mediatype.OCI1Manifest, + mediatype.Docker2ManifestList, + mediatype.Docker2Manifest, + mediatype.Docker1ManifestSigned, + mediatype.Docker1Manifest, + mediatype.OCI1Artifact, + }, + } + req := ®http.Req{ + MetaKind: reqmeta.Head, + Host: r.Registry, + Method: "HEAD", + Repository: r.Repository, + Path: "manifests/" + tagOrDigest, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to request manifest head %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to request manifest head %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return manifest.New( + manifest.WithRef(r), + manifest.WithHeader(resp.HTTPResponse().Header), + ) +} + +// ManifestPut uploads a manifest to a registry +func (reg *Reg) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...scheme.ManifestOpts) error { + var tagOrDigest string + if r.Digest != "" { + tagOrDigest = r.Digest + } else if r.Tag != "" { + tagOrDigest = r.Tag + } else { + reg.slog.Warn("Manifest put requires a tag", + slog.String("ref", r.Reference)) + return errs.ErrMissingTag + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + + // create the request body + mj, err := m.MarshalJSON() + if err != nil { + reg.slog.Warn("Error marshaling manifest", + slog.String("ref", r.Reference), + slog.String("err", err.Error())) + return fmt.Errorf("error marshalling manifest for %s: %w", r.CommonName(), err) + } + + // limit length + if reg.manifestMaxPush > 0 && int64(len(mj)) > reg.manifestMaxPush { + return fmt.Errorf("manifest too large, calculated %d, limit %d: %s%.0w", len(mj), reg.manifestMaxPush, r.CommonName(), errs.ErrSizeLimitExceeded) + } + + // build/send request + headers := http.Header{ + "Content-Type": []string{manifest.GetMediaType(m)}, + } + q := url.Values{} + if tagOrDigest == r.Tag && m.GetDescriptor().Digest.Algorithm() != digest.Canonical { + // TODO(bmitch): EXPERIMENTAL parameter, registry support and OCI spec change needed + q.Add(paramManifestDigest, m.GetDescriptor().Digest.String()) + } + req := ®http.Req{ + MetaKind: reqmeta.Manifest, + Host: r.Registry, + NoMirrors: true, + Method: "PUT", + Repository: r.Repository, + Path: "manifests/" + tagOrDigest, + Query: q, + Headers: headers, + BodyLen: int64(len(mj)), + BodyBytes: mj, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to put manifest %s: %w", r.CommonName(), err) + } + err = resp.Close() + if err != nil { + return fmt.Errorf("failed to close request: %w", err) + } + if resp.HTTPResponse().StatusCode != 201 { + return fmt.Errorf("failed to put manifest %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + rCache := r.SetDigest(m.GetDescriptor().Digest.String()) + reg.cacheMan.Set(rCache, m) + + // update referrers if defined on this manifest + if mr, ok := m.(manifest.Subjecter); ok { + mDesc, err := mr.GetSubject() + if err != nil { + return err + } + if mDesc != nil && mDesc.Digest.String() != "" { + rSubj := r.SetDigest(mDesc.Digest.String()) + reg.cacheRL.Delete(rSubj) + if mDesc.Digest.String() != resp.HTTPResponse().Header.Get(OCISubjectHeader) { + err = reg.referrerPut(ctx, r, m) + if err != nil { + return err + } + } + } + } + + return nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/ping.go b/vendor/github.com/regclient/regclient/scheme/reg/ping.go new file mode 100644 index 000000000..e4fc0f022 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/ping.go @@ -0,0 +1,39 @@ +package reg + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping queries the /v2/ API of the registry to verify connectivity and access. +func (reg *Reg) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + ret := ping.Result{} + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + NoMirrors: true, + Method: "GET", + Path: "", + } + + resp, err := reg.reghttp.Do(ctx, req) + if resp != nil && resp.HTTPResponse() != nil { + ret.Header = resp.HTTPResponse().Header + } + if err != nil { + return ret, fmt.Errorf("failed to ping registry %s: %w", r.Registry, err) + } + defer resp.Close() + + if resp.HTTPResponse().StatusCode != 200 { + return ret, fmt.Errorf("failed to ping registry %s: %w", + r.Registry, reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/referrer.go b/vendor/github.com/regclient/regclient/scheme/reg/referrer.go new file mode 100644 index 000000000..2e9757cb4 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/referrer.go @@ -0,0 +1,366 @@ +package reg + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + + "github.com/regclient/regclient/internal/httplink" + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" + "github.com/regclient/regclient/types/warning" +) + +const OCISubjectHeader = "OCI-Subject" + +// ReferrerList returns a list of referrers to a given reference. +// The reference must include the digest. Use [regclient.ReferrerList] to resolve the platform or tag. +func (reg *Reg) ReferrerList(ctx context.Context, rSubject ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + config := scheme.ReferrerConfig{} + for _, opt := range opts { + opt(&config) + } + var r ref.Ref + if config.SrcRepo.IsSet() { + r = config.SrcRepo.SetDigest(rSubject.Digest) + } else { + r = rSubject.SetDigest(rSubject.Digest) + } + rl := referrer.ReferrerList{ + Tags: []string{}, + } + if rSubject.Digest == "" { + return rl, fmt.Errorf("digest required to query referrers %s", rSubject.CommonName()) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + + found := false + // try cache + rl, err := reg.cacheRL.Get(r) + if err == nil { + found = true + } + // try referrers API + if !found { + referrerEnabled, ok := reg.featureGet("referrer", r.Registry, r.Repository) + if !ok || referrerEnabled { + // attempt to call the referrer API + rl, err = reg.referrerListByAPI(ctx, r, config) + if !ok { + // save the referrer API state + reg.featureSet("referrer", r.Registry, r.Repository, err == nil) + } + if err == nil { + if config.MatchOpt.ArtifactType == "" { + // only cache if successful and artifactType is not filtered + reg.cacheRL.Set(r, rl) + } + found = true + } + } + } + // fall back to tag + if !found { + rl, err = reg.referrerListByTag(ctx, r) + if err == nil { + reg.cacheRL.Set(r, rl) + } + } + rl.Subject = rSubject + if config.SrcRepo.IsSet() { + rl.Source = config.SrcRepo + } + if err != nil { + return rl, err + } + + // apply client side filters and return result + rl = scheme.ReferrerFilter(config, rl) + return rl, nil +} + +func (reg *Reg) referrerListByAPI(ctx context.Context, r ref.Ref, config scheme.ReferrerConfig) (referrer.ReferrerList, error) { + rl := referrer.ReferrerList{ + Subject: r, + Tags: []string{}, + } + var link *url.URL + // loop for paging + for { + rlAdd, linkNext, err := reg.referrerListByAPIPage(ctx, r, config, link) + if err != nil { + return rl, err + } + if rl.Manifest == nil { + rl = rlAdd + } else { + rl.Descriptors = append(rl.Descriptors, rlAdd.Descriptors...) + } + if linkNext == nil { + break + } + link = linkNext + } + return rl, nil +} + +func (reg *Reg) referrerListByAPIPage(ctx context.Context, r ref.Ref, config scheme.ReferrerConfig, link *url.URL) (referrer.ReferrerList, *url.URL, error) { + rl := referrer.ReferrerList{ + Subject: r, + Tags: []string{}, + } + query := url.Values{} + if config.MatchOpt.ArtifactType != "" { + query.Set("artifactType", config.MatchOpt.ArtifactType) + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + } + if link == nil { + req.Path = "referrers/" + r.Digest + req.Query = query + req.IgnoreErr = true + } + if link != nil { + req.DirectURL = link + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return rl, nil, fmt.Errorf("failed to get referrers %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return rl, nil, fmt.Errorf("failed to get referrers %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + // read manifest + rawBody, err := io.ReadAll(resp) + if err != nil { + return rl, nil, fmt.Errorf("error reading referrers for %s: %w", r.CommonName(), err) + } + + m, err := manifest.New( + manifest.WithRef(r.SetDigest("")), + manifest.WithHeader(resp.HTTPResponse().Header), + manifest.WithRaw(rawBody), + ) + if err != nil { + return rl, nil, err + } + ociML, ok := m.GetOrig().(v1.Index) + if !ok { + return rl, nil, fmt.Errorf("unexpected manifest type for referrers: %s, %w", m.GetDescriptor().MediaType, errs.ErrUnsupportedMediaType) + } + rl.Manifest = m + rl.Descriptors = ociML.Manifests + rl.Annotations = ociML.Annotations + + // lookup next link + respHead := resp.HTTPResponse().Header + links, err := httplink.Parse((respHead.Values("Link"))) + if err != nil { + return rl, nil, err + } + next, err := links.Get("rel", "next") + if err != nil { + // no next link + link = nil + } else { + link = resp.HTTPResponse().Request.URL + if link == nil { + return rl, nil, fmt.Errorf("referrers list failed to get URL of previous request") + } + link, err = link.Parse(next.URI) + if err != nil { + return rl, nil, fmt.Errorf("referrers list failed to parse Link: %w", err) + } + } + + return rl, link, nil +} + +func (reg *Reg) referrerListByTag(ctx context.Context, r ref.Ref) (referrer.ReferrerList, error) { + rl := referrer.ReferrerList{ + Subject: r, + Tags: []string{}, + } + rlTag, err := referrer.FallbackTag(r) + if err != nil { + return rl, err + } + m, err := reg.ManifestGet(ctx, rlTag) + if err != nil { + if errors.Is(err, errs.ErrNotFound) { + // empty list, initialize a new manifest + rl.Manifest, err = manifest.New(manifest.WithOrig(v1.Index{ + Versioned: v1.IndexSchemaVersion, + MediaType: mediatype.OCI1ManifestList, + })) + if err != nil { + return rl, err + } + return rl, nil + } + return rl, err + } + ociML, ok := m.GetOrig().(v1.Index) + if !ok { + return rl, fmt.Errorf("manifest is not an OCI index: %s", rlTag.CommonName()) + } + // return resulting index + rl.Manifest = m + rl.Descriptors = ociML.Manifests + rl.Annotations = ociML.Annotations + rl.Tags = append(rl.Tags, rlTag.Tag) + return rl, nil +} + +// referrerDelete deletes a referrer associated with a manifest +func (reg *Reg) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manifest) error { + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // get subject field + mSubject, ok := m.(manifest.Subjecter) + if !ok { + return fmt.Errorf("manifest does not support the subject field: %w", errs.ErrUnsupportedMediaType) + } + subject, err := mSubject.GetSubject() + if err != nil { + return err + } + // validate/set subject descriptor + if subject == nil || subject.Digest == "" { + return fmt.Errorf("refers is not set%.0w", errs.ErrNotFound) + } + + // remove from cache + rSubject := r.SetDigest(subject.Digest.String()) + reg.cacheRL.Delete(rSubject) + + // if referrer API is available, nothing to do, return + if reg.referrerPing(ctx, rSubject) { + return nil + } + + // fallback to using tag schema for refers + rl, err := reg.referrerListByTag(ctx, rSubject) + if err != nil { + return err + } + err = rl.Delete(m) + if err != nil { + return err + } + // push updated referrer list by tag + rlTag, err := referrer.FallbackTag(rSubject) + if err != nil { + return err + } + if rl.IsEmpty() { + err = reg.TagDelete(ctx, rlTag) + if err == nil { + return nil + } + // if delete is not supported, fall back to pushing empty list + } + return reg.ManifestPut(ctx, rlTag, rl.Manifest) +} + +// referrerPut pushes a new referrer associated with a manifest +func (reg *Reg) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest) error { + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // get subject field + mSubject, ok := m.(manifest.Subjecter) + if !ok { + return fmt.Errorf("manifest does not support the subject field: %w", errs.ErrUnsupportedMediaType) + } + subject, err := mSubject.GetSubject() + if err != nil { + return err + } + // validate/set subject descriptor + if subject == nil || subject.Digest == "" { + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) + } + + // lock to avoid internal race conditions between pulling and pushing tag + reg.muRefTag.Lock() + defer reg.muRefTag.Unlock() + // fallback to using tag schema for refers + rSubject := r.SetDigest(subject.Digest.String()) + rl, err := reg.referrerListByTag(ctx, rSubject) + if err != nil { + return err + } + err = rl.Add(m) + if err != nil { + return err + } + // ensure the referrer list does not have a subject itself (avoiding circular locks) + if ms, ok := rl.Manifest.(manifest.Subjecter); ok { + mDesc, err := ms.GetSubject() + if err != nil { + return err + } + if mDesc != nil && mDesc.Digest != "" { + return fmt.Errorf("fallback referrers manifest should not have a subject: %s", rSubject.CommonName()) + } + } + // push updated referrer list by tag + rlTag, err := referrer.FallbackTag(rSubject) + if err != nil { + return err + } + if len(rl.Tags) == 0 { + rl.Tags = []string{rlTag.Tag} + } + err = reg.ManifestPut(ctx, rlTag, rl.Manifest) + if err == nil { + reg.cacheRL.Set(rSubject, rl) + } + return err +} + +// referrerPing verifies the registry supports the referrers API +func (reg *Reg) referrerPing(ctx context.Context, r ref.Ref) bool { + referrerEnabled, ok := reg.featureGet("referrer", r.Registry, r.Repository) + if ok { + return referrerEnabled + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + Path: "referrers/" + r.Digest, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + reg.featureSet("referrer", r.Registry, r.Repository, false) + return false + } + _ = resp.Close() + result := resp.HTTPResponse().StatusCode == 200 + reg.featureSet("referrer", r.Registry, r.Repository, result) + return result +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/reg.go b/vendor/github.com/regclient/regclient/scheme/reg/reg.go new file mode 100644 index 000000000..5f2a27e9c --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/reg.go @@ -0,0 +1,272 @@ +// Package reg implements the OCI registry scheme used by most images (host:port/repo:tag) +package reg + +import ( + "log/slog" + "net/http" + "sync" + "time" + + "github.com/regclient/regclient/config" + "github.com/regclient/regclient/internal/cache" + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" +) + +const ( + // blobChunkMinHeader is returned by registries requesting a minimum chunk size + blobChunkMinHeader = "OCI-Chunk-Min-Length" + // defaultBlobChunk 1M chunks, this is allocated in a memory buffer + defaultBlobChunk = 1024 * 1024 + // defaultBlobChunkLimit 1G chunks, prevents a memory exhaustion attack + defaultBlobChunkLimit = 1024 * 1024 * 1024 + // defaultBlobMax is disabled to support registries without chunked upload support + defaultBlobMax = -1 + // defaultManifestMaxPull limits the largest manifest that will be pulled + defaultManifestMaxPull = 1024 * 1024 * 8 + // defaultManifestMaxPush limits the largest manifest that will be pushed + defaultManifestMaxPush = 1024 * 1024 * 4 + // paramBlobDigestAlgo specifies the query parameter to request a specific digest algorithm. + // TODO(bmitch): EXPERIMENTAL field, registry support and OCI spec update needed + paramBlobDigestAlgo = "digest-algorithm" + // paramManifestDigest specifies the query parameter to specify the digest of a manifest pushed by tag. + // TODO(bmitch): EXPERIMENTAL field, registry support and OCI spec update needed + paramManifestDigest = "digest" +) + +// Reg is used for interacting with remote registry servers +type Reg struct { + reghttp *reghttp.Client + reghttpOpts []reghttp.Opts + slog *slog.Logger + hosts map[string]*config.Host + hostDefault *config.Host + features map[featureKey]*featureVal + blobChunkSize int64 + blobChunkLimit int64 + blobMaxPut int64 + manifestMaxPull int64 + manifestMaxPush int64 + cacheMan *cache.Cache[ref.Ref, manifest.Manifest] + cacheRL *cache.Cache[ref.Ref, referrer.ReferrerList] + muHost sync.Mutex + muRefTag sync.Mutex +} + +type featureKey struct { + kind string + reg string + repo string +} +type featureVal struct { + enabled bool + expire time.Time +} + +var featureExpire = time.Minute * time.Duration(5) + +// Opts provides options to access registries +type Opts func(*Reg) + +// New returns a Reg pointer with any provided options +func New(opts ...Opts) *Reg { + r := Reg{ + reghttpOpts: []reghttp.Opts{}, + blobChunkSize: defaultBlobChunk, + blobChunkLimit: defaultBlobChunkLimit, + blobMaxPut: defaultBlobMax, + manifestMaxPull: defaultManifestMaxPull, + manifestMaxPush: defaultManifestMaxPush, + hosts: map[string]*config.Host{}, + features: map[featureKey]*featureVal{}, + } + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithConfigHostFn(r.hostGet)) + for _, opt := range opts { + opt(&r) + } + r.reghttp = reghttp.NewClient(r.reghttpOpts...) + return &r +} + +// Throttle is used to limit concurrency +func (reg *Reg) Throttle(r ref.Ref, put bool) []*pqueue.Queue[reqmeta.Data] { + tList := []*pqueue.Queue[reqmeta.Data]{} + host := reg.hostGet(r.Registry) + t := reg.reghttp.GetThrottle(r.Registry) + if t != nil { + tList = append(tList, t) + } + if !put { + for _, mirror := range host.Mirrors { + t := reg.reghttp.GetThrottle(mirror) + if t != nil { + tList = append(tList, t) + } + } + } + return tList +} + +func (reg *Reg) hostGet(hostname string) *config.Host { + reg.muHost.Lock() + defer reg.muHost.Unlock() + if _, ok := reg.hosts[hostname]; !ok { + newHost := config.HostNewDefName(reg.hostDefault, hostname) + // check for normalized hostname + if newHost.Name != hostname { + hostname = newHost.Name + if h, ok := reg.hosts[hostname]; ok { + return h + } + } + reg.hosts[hostname] = newHost + } + return reg.hosts[hostname] +} + +// featureGet returns enabled and ok +func (reg *Reg) featureGet(kind, registry, repo string) (bool, bool) { + reg.muHost.Lock() + defer reg.muHost.Unlock() + if v, ok := reg.features[featureKey{kind: kind, reg: registry, repo: repo}]; ok { + if time.Now().Before(v.expire) { + return v.enabled, true + } + } + return false, false +} + +func (reg *Reg) featureSet(kind, registry, repo string, enabled bool) { + reg.muHost.Lock() + reg.features[featureKey{kind: kind, reg: registry, repo: repo}] = &featureVal{enabled: enabled, expire: time.Now().Add(featureExpire)} + reg.muHost.Unlock() +} + +// WithBlobSize overrides default blob sizes +func WithBlobSize(size, max int64) Opts { + return func(r *Reg) { + if size > 0 { + r.blobChunkSize = size + } + if max != 0 { + r.blobMaxPut = max + } + } +} + +// WithBlobLimit overrides default blob limit +func WithBlobLimit(limit int64) Opts { + return func(r *Reg) { + if limit > 0 { + r.blobChunkLimit = limit + } + if r.blobMaxPut > 0 && r.blobMaxPut < limit { + r.blobMaxPut = limit + } + } +} + +// WithCache defines a cache used for various requests +func WithCache(timeout time.Duration, count int) Opts { + return func(r *Reg) { + cm := cache.New[ref.Ref, manifest.Manifest](cache.WithAge(timeout), cache.WithCount(count)) + r.cacheMan = &cm + crl := cache.New[ref.Ref, referrer.ReferrerList](cache.WithAge(timeout), cache.WithCount(count)) + r.cacheRL = &crl + } +} + +// WithCerts adds certificates +func WithCerts(certs [][]byte) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithCerts(certs)) + } +} + +// WithCertDirs adds certificate directories for host specific certs +func WithCertDirs(dirs []string) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithCertDirs(dirs)) + } +} + +// WithCertFiles adds certificates by filename +func WithCertFiles(files []string) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithCertFiles(files)) + } +} + +// WithConfigHostDefault provides default settings for hosts. +func WithConfigHostDefault(ch *config.Host) Opts { + return func(r *Reg) { + r.hostDefault = ch + } +} + +// WithConfigHosts adds host configs for credentials +func WithConfigHosts(configHosts []*config.Host) Opts { + return func(r *Reg) { + for _, host := range configHosts { + if host.Name == "" { + continue + } + r.hosts[host.Name] = host + } + } +} + +// WithDelay initial time to wait between retries (increased with exponential backoff) +func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithDelay(delayInit, delayMax)) + } +} + +// WithHTTPClient uses a specific http client with retryable requests +func WithHTTPClient(hc *http.Client) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithHTTPClient(hc)) + } +} + +// WithManifestMax sets the push and pull limits for manifests +func WithManifestMax(push, pull int64) Opts { + return func(r *Reg) { + r.manifestMaxPush = push + r.manifestMaxPull = pull + } +} + +// WithRetryLimit restricts the number of retries (defaults to 5) +func WithRetryLimit(l int) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithRetryLimit(l)) + } +} + +// WithSlog injects a slog Logger configuration +func WithSlog(slog *slog.Logger) Opts { + return func(r *Reg) { + r.slog = slog + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithLog(slog)) + } +} + +// WithTransport uses a specific http transport with retryable requests +func WithTransport(t *http.Transport) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithTransport(t)) + } +} + +// WithUserAgent sets a user agent header +func WithUserAgent(ua string) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithUserAgent(ua)) + } +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/reg_nowasm.go b/vendor/github.com/regclient/regclient/scheme/reg/reg_nowasm.go new file mode 100644 index 000000000..e01ba090a --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/reg_nowasm.go @@ -0,0 +1,20 @@ +//go:build !wasm + +package reg + +import ( + "log/slog" + + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/sloghandle" +) + +// WithLog injects a logrus Logger configuration +func WithLog(log *logrus.Logger) Opts { + return func(r *Reg) { + r.slog = slog.New(sloghandle.Logrus(log)) + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithLog(r.slog)) + } +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/repo.go b/vendor/github.com/regclient/regclient/scheme/reg/repo.go new file mode 100644 index 000000000..b9b0307c2 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/repo.go @@ -0,0 +1,79 @@ +package reg + +import ( + "context" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/repo" +) + +// RepoList returns a list of repositories on a registry +// Note the underlying "_catalog" API is not supported on many cloud registries +func (reg *Reg) RepoList(ctx context.Context, hostname string, opts ...scheme.RepoOpts) (*repo.RepoList, error) { + config := scheme.RepoConfig{} + for _, opt := range opts { + opt(&config) + } + + query := url.Values{} + if config.Last != "" { + query.Set("last", config.Last) + } + if config.Limit > 0 { + query.Set("n", strconv.Itoa(config.Limit)) + } + + headers := http.Header{ + "Accept": []string{"application/json"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: hostname, + NoMirrors: true, + Method: "GET", + Path: "_catalog", + NoPrefix: true, + Query: query, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to list repositories for %s: %w", hostname, err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to list repositories for %s: %w", hostname, reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + respBody, err := io.ReadAll(resp) + if err != nil { + reg.slog.Warn("Failed to read repo list", + slog.String("err", err.Error()), + slog.String("host", hostname)) + return nil, fmt.Errorf("failed to read repo list for %s: %w", hostname, err) + } + mt := mediatype.Base(resp.HTTPResponse().Header.Get("Content-Type")) + rl, err := repo.New( + repo.WithMT(mt), + repo.WithRaw(respBody), + repo.WithHost(hostname), + repo.WithHeaders(resp.HTTPResponse().Header), + ) + if err != nil { + reg.slog.Warn("Failed to unmarshal repo list", + slog.String("err", err.Error()), + slog.String("body", string(respBody)), + slog.String("host", hostname)) + return nil, fmt.Errorf("failed to parse repo list for %s: %w", hostname, err) + } + return rl, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/tag.go b/vendor/github.com/regclient/regclient/scheme/reg/tag.go new file mode 100644 index 000000000..0ed66c874 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/tag.go @@ -0,0 +1,343 @@ +package reg + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + "time" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/httplink" + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/tag" + "github.com/regclient/regclient/types/warning" +) + +// TagDelete removes a tag from a repository. +// It first attempts the newer OCI API to delete by tag name (not widely supported). +// If the OCI API fails, it falls back to pushing a unique empty manifest and deleting that. +func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error { + var tempManifest manifest.Manifest + if r.Tag == "" { + return errs.ErrMissingTag + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + + // attempt to delete the tag directly, available in OCI distribution-spec, and Hub API + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + NoMirrors: true, + Method: "DELETE", + Repository: r.Repository, + Path: "manifests/" + r.Tag, + IgnoreErr: true, // do not trigger backoffs if this fails + } + + resp, err := reg.reghttp.Do(ctx, req) + if resp != nil { + defer resp.Close() + } + if err == nil && resp != nil && resp.HTTPResponse().StatusCode == 202 { + return nil + } + // ignore errors, fallback to creating a temporary manifest to replace the tag and deleting that manifest + + // lookup the current manifest media type + curManifest, err := reg.ManifestHead(ctx, r) + if err != nil && errors.Is(err, errs.ErrUnsupportedAPI) { + curManifest, err = reg.ManifestGet(ctx, r) + } + if err != nil { + return err + } + + // create empty image config with single label + // Note, this should be MediaType specific, but it appears that docker uses OCI for the config + now := time.Now() + conf := v1.Image{ + Created: &now, + Config: v1.ImageConfig{ + Labels: map[string]string{ + "delete-tag": r.Tag, + "delete-date": now.String(), + }, + }, + Platform: platform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + History: []v1.History{ + { + Created: &now, + CreatedBy: "# regclient", + Comment: "empty JSON blob", + }, + }, + RootFS: v1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{ + descriptor.EmptyDigest, + }, + }, + } + confB, err := json.Marshal(conf) + if err != nil { + return err + } + digester := digest.Canonical.Digester() + confBuf := bytes.NewBuffer(confB) + _, err = confBuf.WriteTo(digester.Hash()) + if err != nil { + return err + } + confDigest := digester.Digest() + + // create manifest with config, matching the original tag manifest type + switch manifest.GetMediaType(curManifest) { + case mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + tempManifest, err = manifest.New(manifest.WithOrig(v1.Manifest{ + Versioned: v1.ManifestSchemaVersion, + MediaType: mediatype.OCI1Manifest, + Config: descriptor.Descriptor{ + MediaType: mediatype.OCI1ImageConfig, + Digest: confDigest, + Size: int64(len(confB)), + }, + Layers: []descriptor.Descriptor{ + { + MediaType: mediatype.OCI1Layer, + Size: int64(len(descriptor.EmptyData)), + Digest: descriptor.EmptyDigest, + }, + }, + })) + if err != nil { + return err + } + default: // default to the docker v2 schema + tempManifest, err = manifest.New(manifest.WithOrig(schema2.Manifest{ + Versioned: schema2.ManifestSchemaVersion, + Config: descriptor.Descriptor{ + MediaType: mediatype.Docker2ImageConfig, + Digest: confDigest, + Size: int64(len(confB)), + }, + Layers: []descriptor.Descriptor{ + { + MediaType: mediatype.Docker2LayerGzip, + Size: int64(len(descriptor.EmptyData)), + Digest: descriptor.EmptyDigest, + }, + }, + })) + if err != nil { + return err + } + } + reg.slog.Debug("Sending dummy manifest to replace tag", + slog.String("ref", r.Reference)) + + // push empty layer + _, err = reg.BlobPut(ctx, r, descriptor.Descriptor{Digest: descriptor.EmptyDigest, Size: int64(len(descriptor.EmptyData))}, bytes.NewReader(descriptor.EmptyData)) + if err != nil { + return err + } + + // push config + _, err = reg.BlobPut(ctx, r, descriptor.Descriptor{Digest: confDigest, Size: int64(len(confB))}, bytes.NewReader(confB)) + if err != nil { + return fmt.Errorf("failed sending dummy config to delete %s: %w", r.CommonName(), err) + } + + // push manifest to tag + err = reg.ManifestPut(ctx, r, tempManifest) + if err != nil { + return fmt.Errorf("failed sending dummy manifest to delete %s: %w", r.CommonName(), err) + } + + // delete manifest by digest + r = r.AddDigest(tempManifest.GetDescriptor().Digest.String()) + reg.slog.Debug("Deleting dummy manifest", + slog.String("ref", r.Reference), + slog.String("digest", r.Digest)) + err = reg.ManifestDelete(ctx, r) + if err != nil { + return fmt.Errorf("failed deleting dummy manifest for %s: %w", r.CommonName(), err) + } + + return nil +} + +// TagList returns a listing to tags from the repository +func (reg *Reg) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) (*tag.List, error) { + var config scheme.TagConfig + for _, opt := range opts { + opt(&config) + } + + tl, err := reg.tagListOCI(ctx, r, config) + if err != nil { + return tl, err + } + + for { + // if limit reached, stop searching + if config.Limit > 0 && len(tl.Tags) >= config.Limit { + break + } + tlHead, err := tl.RawHeaders() + if err != nil { + return tl, err + } + links, err := httplink.Parse(tlHead.Values("Link")) + if err != nil { + return tl, err + } + next, err := links.Get("rel", "next") + // if Link header with rel="next" is defined + if err == nil { + link := tl.GetURL() + if link == nil { + return tl, fmt.Errorf("tag list, failed to get URL of previous request") + } + link, err = link.Parse(next.URI) + if err != nil { + return tl, fmt.Errorf("tag list failed to parse Link: %w", err) + } + tlAdd, err := reg.tagListLink(ctx, r, config, link) + if err != nil { + return tl, fmt.Errorf("tag list failed to get Link: %w", err) + } + err = tl.Append(tlAdd) + if err != nil { + return tl, fmt.Errorf("tag list failed to append entries: %w", err) + } + } else { + // do not automatically expand tags with OCI methods, + // OCI registries should send all possible entries up to the specified limit + break + } + } + + return tl, nil +} + +func (reg *Reg) tagListOCI(ctx context.Context, r ref.Ref, config scheme.TagConfig) (*tag.List, error) { + query := url.Values{} + if config.Last != "" { + query.Set("last", config.Last) + } + if config.Limit > 0 { + query.Set("n", strconv.Itoa(config.Limit)) + } + headers := http.Header{ + "Accept": []string{"application/json"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + Path: "tags/list", + Query: query, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to list tags for %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to list tags for %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + respBody, err := io.ReadAll(resp) + if err != nil { + reg.slog.Warn("Failed to read tag list", + slog.String("err", err.Error()), + slog.String("ref", r.CommonName())) + return nil, fmt.Errorf("failed to read tags for %s: %w", r.CommonName(), err) + } + tl, err := tag.New( + tag.WithRef(r), + tag.WithRaw(respBody), + tag.WithResp(resp.HTTPResponse()), + ) + if err != nil { + reg.slog.Warn("Failed to unmarshal tag list", + slog.String("err", err.Error()), + slog.String("body", string(respBody)), + slog.String("ref", r.CommonName())) + return tl, fmt.Errorf("failed to unmarshal tag list for %s: %w", r.CommonName(), err) + } + + return tl, nil +} + +func (reg *Reg) tagListLink(ctx context.Context, r ref.Ref, _ scheme.TagConfig, link *url.URL) (*tag.List, error) { + headers := http.Header{ + "Accept": []string{"application/json"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + DirectURL: link, + Repository: r.Repository, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to list tags for %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to list tags for %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + respBody, err := io.ReadAll(resp) + if err != nil { + reg.slog.Warn("Failed to read tag list", + slog.String("err", err.Error()), + slog.String("ref", r.CommonName())) + return nil, fmt.Errorf("failed to read tags for %s: %w", r.CommonName(), err) + } + tl, err := tag.New( + tag.WithRef(r), + tag.WithRaw(respBody), + tag.WithResp(resp.HTTPResponse()), + ) + if err != nil { + reg.slog.Warn("Failed to unmarshal tag list", + slog.String("err", err.Error()), + slog.String("body", string(respBody)), + slog.String("ref", r.CommonName())) + return tl, fmt.Errorf("failed to unmarshal tag list for %s: %w", r.CommonName(), err) + } + + return tl, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/scheme.go b/vendor/github.com/regclient/regclient/scheme/scheme.go new file mode 100644 index 000000000..936bcc10f --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/scheme.go @@ -0,0 +1,227 @@ +// Package scheme defines the interface for various reference schemes. +package scheme + +import ( + "context" + "io" + + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" + "github.com/regclient/regclient/types/tag" +) + +// API is used to interface between different methods to store images. +type API interface { + // BlobDelete removes a blob from the repository. + BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error + // BlobGet retrieves a blob, returning a reader. + BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) + // BlobHead verifies the existence of a blob, the reader contains the headers but no body to read. + BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) + // BlobMount attempts to perform a server side copy of the blob. + BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error + // BlobPut sends a blob to the repository, returns the digest and size when successful. + BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) + + // ManifestDelete removes a manifest, including all tags that point to that manifest. + ManifestDelete(ctx context.Context, r ref.Ref, opts ...ManifestOpts) error + // ManifestGet retrieves a manifest from a repository. + ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) + // ManifestHead gets metadata about the manifest (existence, digest, mediatype, size). + ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, error) + // ManifestPut sends a manifest to the repository. + ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...ManifestOpts) error + + // Ping verifies access to a registry or equivalent. + Ping(ctx context.Context, r ref.Ref) (ping.Result, error) + + // ReferrerList returns a list of referrers to a given reference. + ReferrerList(ctx context.Context, r ref.Ref, opts ...ReferrerOpts) (referrer.ReferrerList, error) + + // TagDelete removes a tag from the repository. + TagDelete(ctx context.Context, r ref.Ref) error + // TagList returns a list of tags from the repository. + TagList(ctx context.Context, r ref.Ref, opts ...TagOpts) (*tag.List, error) +} + +// Closer is used to check if a scheme implements the Close API. +type Closer interface { + Close(ctx context.Context, r ref.Ref) error +} + +// GCLocker is used to indicate locking is available for GC management. +type GCLocker interface { + // GCLock a reference to prevent GC from triggering during a put, locks are not exclusive. + GCLock(r ref.Ref) + // GCUnlock a reference to allow GC (once all locks are released). + // The reference should be closed after this step and unlock should only be called once per each Lock call. + GCUnlock(r ref.Ref) +} + +// Throttler is used to indicate the scheme implements Throttle. +type Throttler interface { + Throttle(r ref.Ref, put bool) []*pqueue.Queue[reqmeta.Data] +} + +// ManifestConfig is used by schemes to import [ManifestOpts]. +type ManifestConfig struct { + CheckReferrers bool + Child bool // used when pushing a child of a manifest list, skips indexing in ocidir + Manifest manifest.Manifest +} + +// ManifestOpts is used to set options on manifest APIs. +type ManifestOpts func(*ManifestConfig) + +// WithManifestCheckReferrers is used when deleting a manifest. +// It indicates the manifest should be fetched and referrers should be deleted if defined. +func WithManifestCheckReferrers() ManifestOpts { + return func(config *ManifestConfig) { + config.CheckReferrers = true + } +} + +// WithManifestChild indicates the API call is on a child manifest. +// This is used internally when copying multi-platform manifests. +// This bypasses tracking of an untagged digest in ocidir which is needed for garbage collection. +func WithManifestChild() ManifestOpts { + return func(config *ManifestConfig) { + config.Child = true + } +} + +// WithManifest is used to pass the manifest to a method to avoid an extra GET request. +// This is used on a delete to check for referrers. +func WithManifest(m manifest.Manifest) ManifestOpts { + return func(mc *ManifestConfig) { + mc.Manifest = m + } +} + +// ReferrerConfig is used by schemes to import [ReferrerOpts]. +type ReferrerConfig struct { + MatchOpt descriptor.MatchOpt // filter/sort results + Platform string // get referrers for a specific platform + SrcRepo ref.Ref // repo used to query referrers +} + +// ReferrerOpts is used to set options on referrer APIs. +type ReferrerOpts func(*ReferrerConfig) + +// WithReferrerMatchOpt filters results using [descriptor.MatchOpt]. +func WithReferrerMatchOpt(mo descriptor.MatchOpt) ReferrerOpts { + return func(config *ReferrerConfig) { + config.MatchOpt = config.MatchOpt.Merge(mo) + } +} + +// WithReferrerPlatform gets referrers for a single platform from a multi-platform manifest. +// Note that this is implemented by [regclient.ReferrerList] and not the individual scheme implementations. +func WithReferrerPlatform(p string) ReferrerOpts { + return func(config *ReferrerConfig) { + config.Platform = p + } +} + +// WithReferrerSource pulls referrers from a separate source. +// Note that this is implemented by [regclient.ReferrerList] and not the individual scheme implementations. +func WithReferrerSource(r ref.Ref) ReferrerOpts { + return func(config *ReferrerConfig) { + config.SrcRepo = r + } +} + +// WithReferrerAT filters by a specific artifactType value. +// +// Deprecated: replace with [WithReferrerMatchOpt]. +// +//go:fix inline +func WithReferrerAT(at string) ReferrerOpts { + return WithReferrerMatchOpt(descriptor.MatchOpt{ArtifactType: at}) +} + +// WithReferrerAnnotations filters by a list of annotations, all of which must match. +// +// Deprecated: replace with [WithReferrerMatchOpt]. +// +//go:fix inline +func WithReferrerAnnotations(annotations map[string]string) ReferrerOpts { + return WithReferrerMatchOpt(descriptor.MatchOpt{Annotations: annotations}) +} + +// WithReferrerSort orders the resulting referrers listing according to a specified annotation. +// +// Deprecated: replace with [WithReferrerMatchOpt]. +// +//go:fix inline +func WithReferrerSort(annotation string, desc bool) ReferrerOpts { + return WithReferrerMatchOpt(descriptor.MatchOpt{SortAnnotation: annotation, SortDesc: desc}) +} + +// ReferrerFilter filters the referrer list according to the config. +func ReferrerFilter(config ReferrerConfig, rlIn referrer.ReferrerList) referrer.ReferrerList { + return referrer.ReferrerList{ + Subject: rlIn.Subject, + Source: rlIn.Source, + Manifest: rlIn.Manifest, + Annotations: rlIn.Annotations, + Tags: rlIn.Tags, + Descriptors: descriptor.DescriptorListFilter(rlIn.Descriptors, config.MatchOpt), + } +} + +// RepoConfig is used by schemes to import [RepoOpts]. +type RepoConfig struct { + Limit int + Last string +} + +// RepoOpts is used to set options on repo APIs. +type RepoOpts func(*RepoConfig) + +// WithRepoLimit passes a maximum number of repositories to return to the repository list API. +// Registries may ignore this. +func WithRepoLimit(l int) RepoOpts { + return func(config *RepoConfig) { + config.Limit = l + } +} + +// WithRepoLast passes the last received repository for requesting the next batch of repositories. +// Registries may ignore this. +func WithRepoLast(l string) RepoOpts { + return func(config *RepoConfig) { + config.Last = l + } +} + +// TagConfig is used by schemes to import [TagOpts]. +type TagConfig struct { + Limit int + Last string +} + +// TagOpts is used to set options on tag APIs. +type TagOpts func(*TagConfig) + +// WithTagLimit passes a maximum number of tags to return to the tag list API. +// Registries may ignore this. +func WithTagLimit(limit int) TagOpts { + return func(t *TagConfig) { + t.Limit = limit + } +} + +// WithTagLast passes the last received tag for requesting the next batch of tags. +// Registries may ignore this. +func WithTagLast(last string) TagOpts { + return func(t *TagConfig) { + t.Last = last + } +} diff --git a/vendor/github.com/regclient/regclient/tag.go b/vendor/github.com/regclient/regclient/tag.go new file mode 100644 index 000000000..6b522804b --- /dev/null +++ b/vendor/github.com/regclient/regclient/tag.go @@ -0,0 +1,40 @@ +package regclient + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/tag" +) + +// TagDelete deletes a tag from the registry. Since there's no API for this, +// you'd want to normally just delete the manifest. However multiple tags may +// point to the same manifest, so instead you must: +// 1. Make a manifest, for this we put a few labels and timestamps to be unique. +// 2. Push that manifest to the tag. +// 3. Delete the digest for that new manifest that is only used by that tag. +func (rc *RegClient) TagDelete(ctx context.Context, r ref.Ref) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + return schemeAPI.TagDelete(ctx, r) +} + +// TagList returns a tag list from a repository +func (rc *RegClient) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) (*tag.List, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + return schemeAPI.TagList(ctx, r, opts...) +} diff --git a/vendor/github.com/regclient/regclient/types/annotations.go b/vendor/github.com/regclient/regclient/types/annotations.go new file mode 100644 index 000000000..c989eb66f --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/annotations.go @@ -0,0 +1,72 @@ +// Content in this file comes from OCI +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" + + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" + + // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. + AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" +) diff --git a/vendor/github.com/regclient/regclient/types/blob/blob.go b/vendor/github.com/regclient/regclient/types/blob/blob.go new file mode 100644 index 000000000..86d215cba --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/blob.go @@ -0,0 +1,103 @@ +// Package blob is the underlying type for pushing and pulling blobs. +package blob + +import ( + "io" + "net/http" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" +) + +// Blob interface is used for returning blobs. +type Blob interface { + // GetDescriptor returns the descriptor associated with the blob. + GetDescriptor() descriptor.Descriptor + // RawBody returns the raw content of the blob. + RawBody() ([]byte, error) + // RawHeaders returns the headers received from the registry. + RawHeaders() http.Header + // Response returns the response associated with the blob. + Response() *http.Response + + // Digest returns the provided or calculated digest of the blob. + // + // Deprecated: Digest should be replaced by GetDescriptor().Digest. + Digest() digest.Digest + // Length returns the provided or calculated length of the blob. + // + // Deprecated: Length should be replaced by GetDescriptor().Size. + Length() int64 + // MediaType returns the Content-Type header received from the registry. + // + // Deprecated: MediaType should be replaced by GetDescriptor().MediaType. + MediaType() string +} + +type blobConfig struct { + desc descriptor.Descriptor + header http.Header + image *v1.Image + r ref.Ref + rdr io.Reader + resp *http.Response + rawBody []byte +} + +// Opts is used for options to create a new blob. +type Opts func(*blobConfig) + +// WithDesc specifies the descriptor associated with the blob. +func WithDesc(d descriptor.Descriptor) Opts { + return func(bc *blobConfig) { + bc.desc = d + } +} + +// WithHeader defines the headers received when pulling a blob. +func WithHeader(header http.Header) Opts { + return func(bc *blobConfig) { + bc.header = header + } +} + +// WithImage provides the OCI Image config needed for config blobs. +func WithImage(image v1.Image) Opts { + return func(bc *blobConfig) { + bc.image = &image + } +} + +// WithRawBody defines the raw blob contents for OCIConfig. +func WithRawBody(raw []byte) Opts { + return func(bc *blobConfig) { + bc.rawBody = raw + } +} + +// WithReader defines the reader for a new blob. +func WithReader(rc io.Reader) Opts { + return func(bc *blobConfig) { + bc.rdr = rc + } +} + +// WithRef specifies the reference where the blob was pulled from. +func WithRef(r ref.Ref) Opts { + return func(bc *blobConfig) { + bc.r = r + } +} + +// WithResp includes the http response, which is used to extract the headers and reader. +func WithResp(resp *http.Response) Opts { + return func(bc *blobConfig) { + bc.resp = resp + if bc.header == nil && resp != nil { + bc.header = resp.Header + } + } +} diff --git a/vendor/github.com/regclient/regclient/types/blob/common.go b/vendor/github.com/regclient/regclient/types/blob/common.go new file mode 100644 index 000000000..c78029c0a --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/common.go @@ -0,0 +1,68 @@ +package blob + +import ( + "net/http" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/ref" +) + +// Common was previously an interface. A type alias is provided for upgrades. +type Common = *BCommon + +// BCommon is a common struct for all blobs which includes various shared methods. +type BCommon struct { + r ref.Ref + desc descriptor.Descriptor + blobSet bool + rawHeader http.Header + resp *http.Response +} + +// GetDescriptor returns the descriptor associated with the blob. +func (c *BCommon) GetDescriptor() descriptor.Descriptor { + return c.desc +} + +// Digest returns the provided or calculated digest of the blob. +// +// Deprecated: Digest should be replaced by GetDescriptor().Digest, see [GetDescriptor]. +// +//go:fix inline +func (c *BCommon) Digest() digest.Digest { + return c.desc.Digest +} + +// Length returns the provided or calculated length of the blob. +// +// Deprecated: Length should be replaced by GetDescriptor().Size, see [GetDescriptor]. +// +//go:fix inline +func (c *BCommon) Length() int64 { + return c.desc.Size +} + +// MediaType returns the Content-Type header received from the registry. +// +// Deprecated: MediaType should be replaced by GetDescriptor().MediaType, see [GetDescriptor]. +// +//go:fix inline +func (c *BCommon) MediaType() string { + return c.desc.MediaType +} + +// RawHeaders returns the headers received from the registry. +func (c *BCommon) RawHeaders() http.Header { + return c.rawHeader +} + +// Response returns the response associated with the blob. +func (c *BCommon) Response() *http.Response { + return c.resp +} diff --git a/vendor/github.com/regclient/regclient/types/blob/ociconfig.go b/vendor/github.com/regclient/regclient/types/blob/ociconfig.go new file mode 100644 index 000000000..619ea32c3 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/ociconfig.go @@ -0,0 +1,127 @@ +package blob + +import ( + "encoding/json" + "fmt" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" +) + +// OCIConfig was previously an interface. A type alias is provided for upgrading. +type OCIConfig = *BOCIConfig + +// BOCIConfig includes an OCI Image Config struct that may be extracted from or pushed to a blob. +type BOCIConfig struct { + BCommon + rawBody []byte + image v1.Image +} + +// NewOCIConfig creates a new BOCIConfig. +// When created from an existing blob, a BOCIConfig will be created using BReader.ToOCIConfig(). +func NewOCIConfig(opts ...Opts) *BOCIConfig { + bc := blobConfig{} + for _, opt := range opts { + opt(&bc) + } + if bc.image != nil && len(bc.rawBody) == 0 { + var err error + bc.rawBody, err = json.Marshal(bc.image) + if err != nil { + bc.rawBody = []byte{} + } + } + if len(bc.rawBody) > 0 { + if bc.image == nil { + bc.image = &v1.Image{} + err := json.Unmarshal(bc.rawBody, bc.image) + if err != nil { + bc.image = nil + } + } + // force descriptor to match raw body, even if we generated the raw body + bc.desc.Digest = bc.desc.DigestAlgo().FromBytes(bc.rawBody) + bc.desc.Size = int64(len(bc.rawBody)) + if bc.desc.MediaType == "" { + bc.desc.MediaType = mediatype.OCI1ImageConfig + } + } + b := BOCIConfig{ + BCommon: BCommon{ + desc: bc.desc, + r: bc.r, + rawHeader: bc.header, + resp: bc.resp, + }, + rawBody: bc.rawBody, + } + if bc.image != nil { + b.image = *bc.image + b.blobSet = true + } + return &b +} + +// GetConfig returns OCI config. +func (oc *BOCIConfig) GetConfig() v1.Image { + return oc.image +} + +// RawBody returns the original body from the request. +func (oc *BOCIConfig) RawBody() ([]byte, error) { + var err error + if !oc.blobSet { + return []byte{}, fmt.Errorf("Blob is not defined") + } + if len(oc.rawBody) == 0 { + oc.rawBody, err = json.Marshal(oc.image) + } + return oc.rawBody, err +} + +// SetConfig updates the config, including raw body and descriptor. +func (oc *BOCIConfig) SetConfig(image v1.Image) { + oc.image = image + oc.rawBody, _ = json.Marshal(oc.image) + if oc.desc.MediaType == "" { + oc.desc.MediaType = mediatype.OCI1ImageConfig + } + oc.desc.Digest = oc.desc.DigestAlgo().FromBytes(oc.rawBody) + oc.desc.Size = int64(len(oc.rawBody)) + oc.blobSet = true +} + +// MarshalJSON passes through the marshalling to the underlying image if rawBody is not available. +func (oc *BOCIConfig) MarshalJSON() ([]byte, error) { + if !oc.blobSet { + return []byte{}, fmt.Errorf("Blob is not defined") + } + if len(oc.rawBody) > 0 { + return oc.rawBody, nil + } + return json.Marshal(oc.image) +} + +// UnmarshalJSON extracts json content and populates the content. +func (oc *BOCIConfig) UnmarshalJSON(data []byte) error { + image := v1.Image{} + err := json.Unmarshal(data, &image) + if err != nil { + return err + } + oc.image = image + oc.rawBody = make([]byte, len(data)) + copy(oc.rawBody, data) + if oc.desc.MediaType == "" { + oc.desc.MediaType = mediatype.OCI1ImageConfig + } + oc.desc.Digest = oc.desc.DigestAlgo().FromBytes(oc.rawBody) + oc.desc.Size = int64(len(oc.rawBody)) + oc.blobSet = true + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/blob/reader.go b/vendor/github.com/regclient/regclient/types/blob/reader.go new file mode 100644 index 000000000..632fac20d --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/reader.go @@ -0,0 +1,210 @@ +package blob + +import ( + "fmt" + "io" + "strconv" + "sync" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/limitread" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" +) + +// Reader was previously an interface. A type alias is provided for upgrading. +type Reader = *BReader + +// BReader is used to read blobs. +type BReader struct { + BCommon + readBytes int64 + reader io.Reader + origRdr io.Reader + digester digest.Digester + mu sync.Mutex +} + +// NewReader creates a new BReader. +func NewReader(opts ...Opts) *BReader { + bc := blobConfig{} + for _, opt := range opts { + opt(&bc) + } + if bc.resp != nil { + // extract headers and reader if other fields not passed + if bc.header == nil { + bc.header = bc.resp.Header + } + if bc.rdr == nil { + bc.rdr = bc.resp.Body + } + } + if bc.header != nil { + // extract fields from header if descriptor not passed + if bc.desc.MediaType == "" { + bc.desc.MediaType = mediatype.Base(bc.header.Get("Content-Type")) + } + if bc.desc.Size == 0 { + cl, _ := strconv.Atoi(bc.header.Get("Content-Length")) + bc.desc.Size = int64(cl) + } + if bc.desc.Digest == "" { + bc.desc.Digest, _ = digest.Parse(bc.header.Get("Docker-Content-Digest")) + } + } + br := BReader{ + BCommon: BCommon{ + r: bc.r, + desc: bc.desc, + rawHeader: bc.header, + resp: bc.resp, + }, + origRdr: bc.rdr, + } + if bc.rdr != nil { + br.blobSet = true + br.digester = br.desc.DigestAlgo().Digester() + rdr := bc.rdr + if br.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: br.desc.Size, + } + } + br.reader = io.TeeReader(rdr, br.digester.Hash()) + } + return &br +} + +// Close attempts to close the reader and populates/validates the digest. +func (r *BReader) Close() error { + if r == nil || r.origRdr == nil { + return nil + } + // attempt to close if available in original reader + bc, ok := r.origRdr.(io.Closer) + if !ok { + return nil + } + return bc.Close() +} + +// RawBody returns the original body from the request. +func (r *BReader) RawBody() ([]byte, error) { + return io.ReadAll(r) +} + +// Read passes through the read operation while computing the digest and tracking the size. +func (r *BReader) Read(p []byte) (int, error) { + if r == nil || r.reader == nil { + return 0, fmt.Errorf("blob has no reader: %w", io.ErrUnexpectedEOF) + } + r.mu.Lock() + defer r.mu.Unlock() + size, err := r.reader.Read(p) + r.readBytes = r.readBytes + int64(size) + if err == io.EOF { + // check/save size + if r.desc.Size == 0 { + r.desc.Size = r.readBytes + } else if r.readBytes < r.desc.Size { + err = fmt.Errorf("%w [expected %d, received %d]: %w", errs.ErrShortRead, r.desc.Size, r.readBytes, err) + } else if r.readBytes > r.desc.Size { + err = fmt.Errorf("%w [expected %d, received %d]: %w", errs.ErrSizeLimitExceeded, r.desc.Size, r.readBytes, err) + } + // check/save digest + if r.desc.Digest.Validate() != nil { + r.desc.Digest = r.digester.Digest() + } else if r.desc.Digest != r.digester.Digest() { + err = fmt.Errorf("%w [expected %s, calculated %s]: %w", errs.ErrDigestMismatch, r.desc.Digest.String(), r.digester.Digest().String(), err) + } + } + return size, err +} + +// Seek passes through the seek operation, reseting or invalidating the digest +func (r *BReader) Seek(offset int64, whence int) (int64, error) { + if r == nil || r.origRdr == nil { + return 0, fmt.Errorf("blob has no reader") + } + r.mu.Lock() + defer r.mu.Unlock() + if offset == 0 && whence == io.SeekCurrent { + return r.readBytes, nil + } + // cannot do an arbitrary seek and still digest without a lot more complication + if offset != 0 || whence != io.SeekStart { + return r.readBytes, fmt.Errorf("unable to seek to arbitrary position") + } + rdrSeek, ok := r.origRdr.(io.Seeker) + if !ok { + return r.readBytes, fmt.Errorf("Seek unsupported") + } + o, err := rdrSeek.Seek(offset, whence) + if err != nil || o != 0 { + return r.readBytes, err + } + // reset internal offset and digest calculation + rdr := r.origRdr + if r.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: r.desc.Size, + } + } + r.digester = r.desc.DigestAlgo().Digester() + r.reader = io.TeeReader(rdr, r.digester.Hash()) + r.readBytes = 0 + + return 0, nil +} + +// ToOCIConfig converts a BReader to a BOCIConfig. +func (r *BReader) ToOCIConfig() (*BOCIConfig, error) { + if r == nil || !r.blobSet { + return nil, fmt.Errorf("blob is not defined") + } + if r.readBytes != 0 { + return nil, fmt.Errorf("unable to convert after read has been performed") + } + blobBody, err := io.ReadAll(r) + errC := r.Close() + if err != nil { + return nil, fmt.Errorf("error reading image config for %s: %w", r.r.CommonName(), err) + } + if errC != nil { + return nil, fmt.Errorf("error closing blob reader: %w", err) + } + return NewOCIConfig( + WithDesc(r.desc), + WithHeader(r.rawHeader), + WithRawBody(blobBody), + WithRef(r.r), + WithResp(r.resp), + ), nil +} + +// ToTarReader converts a BReader to a BTarReader +func (r *BReader) ToTarReader() (*BTarReader, error) { + if r == nil || !r.blobSet { + return nil, fmt.Errorf("blob is not defined") + } + r.mu.Lock() + defer r.mu.Unlock() + if r.readBytes != 0 { + return nil, fmt.Errorf("unable to convert after read has been performed") + } + return NewTarReader( + WithDesc(r.desc), + WithHeader(r.rawHeader), + WithRef(r.r), + WithResp(r.resp), + WithReader(r.reader), + ), nil +} diff --git a/vendor/github.com/regclient/regclient/types/blob/tar.go b/vendor/github.com/regclient/regclient/types/blob/tar.go new file mode 100644 index 000000000..6f47f4bbe --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/tar.go @@ -0,0 +1,191 @@ +package blob + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "path/filepath" + "strings" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/limitread" + "github.com/regclient/regclient/pkg/archive" + "github.com/regclient/regclient/types/errs" +) + +// TarReader was previously an interface. A type alias is provided for upgrading. +type TarReader = *BTarReader + +// BTarReader is used to read individual files from an image layer. +type BTarReader struct { + BCommon + origRdr io.Reader + reader io.Reader + digester digest.Digester + tr *tar.Reader +} + +// NewTarReader creates a BTarReader. +// Typically a BTarReader will be created using BReader.ToTarReader(). +func NewTarReader(opts ...Opts) *BTarReader { + bc := blobConfig{} + for _, opt := range opts { + opt(&bc) + } + tr := BTarReader{ + BCommon: BCommon{ + desc: bc.desc, + r: bc.r, + rawHeader: bc.header, + resp: bc.resp, + }, + origRdr: bc.rdr, + } + if bc.rdr != nil { + tr.blobSet = true + tr.digester = tr.desc.DigestAlgo().Digester() + rdr := bc.rdr + if tr.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: tr.desc.Size, + } + } + tr.reader = io.TeeReader(rdr, tr.digester.Hash()) + } + return &tr +} + +// Close attempts to close the reader and populates/validates the digest. +func (tr *BTarReader) Close() error { + // attempt to close if available in original reader + if trc, ok := tr.origRdr.(io.Closer); ok && trc != nil { + return trc.Close() + } + return nil +} + +// GetTarReader returns the tar.Reader for the blob. +func (tr *BTarReader) GetTarReader() (*tar.Reader, error) { + if tr.reader == nil { + return nil, fmt.Errorf("blob has no reader defined") + } + if tr.tr == nil { + dr, err := archive.Decompress(tr.reader) + if err != nil { + return nil, err + } + tr.tr = tar.NewReader(dr) + } + return tr.tr, nil +} + +// RawBody returns the original body from the request. +func (tr *BTarReader) RawBody() ([]byte, error) { + if !tr.blobSet { + return []byte{}, fmt.Errorf("Blob is not defined") + } + if tr.tr != nil { + return []byte{}, fmt.Errorf("RawBody cannot be returned after TarReader returned") + } + b, err := io.ReadAll(tr.reader) + if err != nil { + return b, err + } + if tr.digester != nil { + dig := tr.digester.Digest() + tr.digester = nil + if tr.desc.Digest.String() != "" && dig != tr.desc.Digest { + return b, fmt.Errorf("%w, expected %s, received %s", errs.ErrDigestMismatch, tr.desc.Digest.String(), dig.String()) + } + tr.desc.Digest = dig + } + err = tr.Close() + return b, err +} + +// ReadFile parses the tar to find a file. +func (tr *BTarReader) ReadFile(filename string) (*tar.Header, io.Reader, error) { + if strings.HasPrefix(filename, ".wh.") { + return nil, nil, fmt.Errorf(".wh. prefix is reserved for whiteout files") + } + // normalize filenames, + filename = filepath.Clean(filename) + if filename[0] == '/' { + filename = filename[1:] + } + // get reader + rdr, err := tr.GetTarReader() + if err != nil { + return nil, nil, err + } + // loop through files until whiteout or target file is found + whiteout := false + for { + th, err := rdr.Next() + if err != nil { + // break on eof, everything else is an error + if errors.Is(err, io.EOF) { + break + } + return nil, nil, err + } + thFile := filepath.Clean(th.Name) + if thFile[0] == '/' { + thFile = thFile[1:] + } + // found the target file + if thFile == filename { + return th, rdr, nil + } + // check/track whiteout file + name := filepath.Base(th.Name) + if !whiteout && strings.HasPrefix(name, ".wh.") && tarCmpWhiteout(th.Name, filename) { + // continue searching after finding a whiteout file + // a new file may be created in the same layer + whiteout = true + } + } + // EOF encountered + if whiteout { + return nil, nil, errs.ErrFileDeleted + } + if tr.digester != nil { + _, _ = io.Copy(io.Discard, tr.reader) // process/digest any trailing bytes from reader + dig := tr.digester.Digest() + tr.digester = nil + if tr.desc.Digest.String() != "" && dig != tr.desc.Digest { + return nil, nil, fmt.Errorf("%w, expected %s, received %s", errs.ErrDigestMismatch, tr.desc.Digest.String(), dig.String()) + } + tr.desc.Digest = dig + } + return nil, nil, errs.ErrFileNotFound +} + +func tarCmpWhiteout(whFile, tgtFile string) bool { + whSplit := strings.Split(whFile, "/") + tgtSplit := strings.Split(tgtFile, "/") + // the -1 handles the opaque whiteout + if len(whSplit)-1 > len(tgtSplit) { + return false + } + // verify the path matches up to the whiteout + for i := range whSplit[:len(whSplit)-1] { + if whSplit[i] != tgtSplit[i] { + return false + } + } + i := len(whSplit) - 1 + // opaque whiteout of entire directory + if whSplit[i] == ".wh..wh..opq" { + return true + } + // compare whiteout name to next path entry + if i > len(tgtSplit)-1 { + return false + } + whName := strings.TrimPrefix(whSplit[i], ".wh.") + return whName == tgtSplit[i] +} diff --git a/vendor/github.com/regclient/regclient/types/callback.go b/vendor/github.com/regclient/regclient/types/callback.go new file mode 100644 index 000000000..c4c2fad9b --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/callback.go @@ -0,0 +1,29 @@ +package types + +type CallbackState int + +const ( + CallbackUndef CallbackState = iota + CallbackSkipped + CallbackStarted + CallbackActive + CallbackFinished + CallbackArchived +) + +type CallbackKind int + +const ( + CallbackManifest CallbackKind = iota + CallbackBlob +) + +func (k CallbackKind) String() string { + switch k { + case CallbackBlob: + return "blob" + case CallbackManifest: + return "manifest" + } + return "unknown" +} diff --git a/vendor/github.com/regclient/regclient/types/descriptor.go b/vendor/github.com/regclient/regclient/types/descriptor.go new file mode 100644 index 000000000..6cb357627 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/descriptor.go @@ -0,0 +1,40 @@ +package types + +import "github.com/regclient/regclient/types/descriptor" + +type ( + // Descriptor is used in manifests to refer to content by media type, size, and digest. + // + // Deprecated: replace with [descriptor.Descriptor]. + //go:fix inline + Descriptor = descriptor.Descriptor + // MatchOpt defines conditions for a match descriptor. + // + // Deprecated: replace with [descriptor.MatchOpt]. + //go:fix inline + MatchOpt = descriptor.MatchOpt +) + +var ( + // EmptyData is the content of the empty JSON descriptor. See [mediatype.OCI1Empty]. + // + // Deprecated: replace with [descriptor.EmptyData]. + //go:fix inline + EmptyData = descriptor.EmptyData + // EmptyDigest is the digest of the empty JSON descriptor. See [mediatype.OCI1Empty]. + // + // Deprecated: replace with [descriptor.EmptyDigest]. + //go:fix inline + EmptyDigest = descriptor.EmptyDigest + // DescriptorListFilter returns a list of descriptors from the list matching the search options. + // When opt.SortAnnotation is set, the order of descriptors with matching annotations is undefined. + // + // Deprecated: replace with [descriptor.DescriptorListFilter] + //go:fix inline + DescriptorListFilter = descriptor.DescriptorListFilter + // DescriptorListSearch returns the first descriptor from the list matching the search options. + // + // Deprecated: replace with [descriptor.DescriptorListSearch] + //go:fix inline + DescriptorListSearch = descriptor.DescriptorListSearch +) diff --git a/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go b/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go new file mode 100644 index 000000000..1b3e39412 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go @@ -0,0 +1,349 @@ +// Package descriptor defines the OCI descriptor data structure used in manifests to reference content addressable data. +package descriptor + +import ( + "fmt" + "maps" + "sort" + "strings" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/units" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/platform" +) + +// Descriptor is used in manifests to refer to content by media type, size, and digest. +type Descriptor struct { + // MediaType describe the type of the content. + MediaType string `json:"mediaType"` + + // Digest uniquely identifies the content. + Digest digest.Digest `json:"digest"` + + // Size in bytes of content. + Size int64 `json:"size"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *platform.Platform `json:"platform,omitempty"` + + // ArtifactType is the media type of the artifact this descriptor refers to. + ArtifactType string `json:"artifactType,omitempty"` + + // digestAlgo is the preferred digest algorithm for when the digest is unset. + digestAlgo digest.Algorithm +} + +var ( + // EmptyData is the content of the empty JSON descriptor. See [mediatype.OCI1Empty]. + EmptyData = []byte("{}") + // EmptyDigest is the digest of the empty JSON descriptor. See [mediatype.OCI1Empty]. + EmptyDigest = digest.SHA256.FromBytes(EmptyData) + mtToOCI map[string]string +) + +func init() { + mtToOCI = map[string]string{ + mediatype.Docker2ManifestList: mediatype.OCI1ManifestList, + mediatype.Docker2Manifest: mediatype.OCI1Manifest, + mediatype.Docker2ImageConfig: mediatype.OCI1ImageConfig, + mediatype.Docker2Layer: mediatype.OCI1Layer, + mediatype.Docker2LayerGzip: mediatype.OCI1LayerGzip, + mediatype.Docker2LayerZstd: mediatype.OCI1LayerZstd, + mediatype.OCI1ManifestList: mediatype.OCI1ManifestList, + mediatype.OCI1Manifest: mediatype.OCI1Manifest, + mediatype.OCI1ImageConfig: mediatype.OCI1ImageConfig, + mediatype.OCI1Layer: mediatype.OCI1Layer, + mediatype.OCI1LayerGzip: mediatype.OCI1LayerGzip, + mediatype.OCI1LayerZstd: mediatype.OCI1LayerZstd, + } +} + +// DigestAlgo returns the algorithm for computing the digest. +// This prefers the algorithm used by the digest when set, falling back to the preferred digest algorithm, and finally the canonical algorithm. +func (d Descriptor) DigestAlgo() digest.Algorithm { + if d.Digest != "" && d.Digest.Validate() == nil { + return d.Digest.Algorithm() + } + if d.digestAlgo != "" && d.digestAlgo.Available() { + return d.digestAlgo + } + return digest.Canonical +} + +// DigestAlgoPrefer sets the preferred digest algorithm for when the digest is unset. +func (d *Descriptor) DigestAlgoPrefer(algo digest.Algorithm) error { + if !algo.Available() { + return fmt.Errorf("digest algorithm is not available: %s%.0w", algo.String(), errs.ErrUnsupported) + } + d.digestAlgo = algo + return nil +} + +// GetData decodes the Data field from the descriptor if available +func (d Descriptor) GetData() ([]byte, error) { + // verify length + if int64(len(d.Data)) != d.Size { + return nil, errs.ErrParsingFailed + } + // generate and verify digest + if d.Digest != d.DigestAlgo().FromBytes(d.Data) { + return nil, errs.ErrParsingFailed + } + // return data + return d.Data, nil +} + +// Equal indicates the two descriptors are identical, effectively a DeepEqual. +func (d Descriptor) Equal(d2 Descriptor) bool { + if !d.Same(d2) { + return false + } + if d.MediaType != d2.MediaType { + return false + } + if d.ArtifactType != d2.ArtifactType { + return false + } + if d.Platform == nil || d2.Platform == nil { + if d.Platform != nil || d2.Platform != nil { + return false + } + } else if !platform.Match(*d.Platform, *d2.Platform) { + return false + } + if d.URLs == nil || d2.URLs == nil { + if d.URLs != nil || d2.URLs != nil { + return false + } + } else if len(d.URLs) != len(d2.URLs) { + return false + } else { + for i := range d.URLs { + if d.URLs[i] != d2.URLs[i] { + return false + } + } + } + if d.Annotations == nil || d2.Annotations == nil { + if d.Annotations != nil || d2.Annotations != nil { + return false + } + } else if len(d.Annotations) != len(d2.Annotations) { + return false + } else { + for i := range d.Annotations { + if d.Annotations[i] != d2.Annotations[i] { + return false + } + } + } + return true +} + +// Same indicates two descriptors point to the same CAS object. +// This verifies the digest, media type, and size all match. +func (d Descriptor) Same(d2 Descriptor) bool { + if d.Digest != d2.Digest || d.Size != d2.Size { + return false + } + // loosen the check on media type since this can be converted from a build + if d.MediaType != d2.MediaType && (mtToOCI[d.MediaType] != mtToOCI[d2.MediaType] || mtToOCI[d.MediaType] == "") { + return false + } + return true +} + +func (d Descriptor) MarshalPrettyTW(tw *tabwriter.Writer, prefix string) error { + fmt.Fprintf(tw, "%sDigest:\t%s\n", prefix, string(d.Digest)) + fmt.Fprintf(tw, "%sMediaType:\t%s\n", prefix, d.MediaType) + if d.ArtifactType != "" { + fmt.Fprintf(tw, "%sArtifactType:\t%s\n", prefix, d.ArtifactType) + } + switch d.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + // skip printing size for descriptors to manifests + default: + if d.Size > 100000 { + fmt.Fprintf(tw, "%sSize:\t%s\n", prefix, units.HumanSize(float64(d.Size))) + } else { + fmt.Fprintf(tw, "%sSize:\t%dB\n", prefix, d.Size) + } + } + if p := d.Platform; p != nil && p.OS != "" { + fmt.Fprintf(tw, "%sPlatform:\t%s\n", prefix, p.String()) + if p.OSVersion != "" { + fmt.Fprintf(tw, "%sOSVersion:\t%s\n", prefix, p.OSVersion) + } + if len(p.OSFeatures) > 0 { + fmt.Fprintf(tw, "%sOSFeatures:\t%s\n", prefix, strings.Join(p.OSFeatures, ", ")) + } + } + if len(d.URLs) > 0 { + fmt.Fprintf(tw, "%sURLs:\t%s\n", prefix, strings.Join(d.URLs, ", ")) + } + if d.Annotations != nil { + fmt.Fprintf(tw, "%sAnnotations:\t\n", prefix) + for k, v := range d.Annotations { + fmt.Fprintf(tw, "%s %s:\t%s\n", prefix, k, v) + } + } + return nil +} + +// MatchOpt defines conditions for a match descriptor. +type MatchOpt struct { + Platform *platform.Platform // Platform to match including compatible platforms (darwin/arm64 matches linux/arm64) + ArtifactType string // Match ArtifactType in the descriptor + Annotations map[string]string // Match each of the specified annotations and their value, an empty value verifies the key is set + SortAnnotation string // Sort the results by an annotation, string based comparison, descriptors without the annotation are sorted last + SortDesc bool // Set to true to sort in descending order +} + +// Merge applies changes to a MatchOpt, overwriting existing values, and returning a new MatchOpt. +func (mo MatchOpt) Merge(changes MatchOpt) MatchOpt { + ret := MatchOpt{ + ArtifactType: changes.ArtifactType, + SortAnnotation: changes.SortAnnotation, + SortDesc: changes.SortDesc, + } + if ret.ArtifactType == "" { + ret.ArtifactType = mo.ArtifactType + } + if changes.Platform != nil { + p := *changes.Platform + ret.Platform = &p + } else if mo.Platform != nil { + p := *mo.Platform + ret.Platform = &p + } + if ret.SortAnnotation == "" { + ret.SortAnnotation = mo.SortAnnotation + } + if !ret.SortDesc { + ret.SortDesc = mo.SortDesc + } + if len(mo.Annotations) > 0 { + ret.Annotations = maps.Clone(mo.Annotations) + } + if len(changes.Annotations) > 0 { + if ret.Annotations == nil { + ret.Annotations = changes.Annotations + } else { + maps.Copy(ret.Annotations, changes.Annotations) + } + } + return ret +} + +// Match returns true if the descriptor matches the options, including compatible platforms. +func (d Descriptor) Match(opt MatchOpt) bool { + if opt.ArtifactType != "" && d.ArtifactType != opt.ArtifactType { + return false + } + if len(opt.Annotations) > 0 { + if d.Annotations == nil { + return false + } + for k, v := range opt.Annotations { + if dv, ok := d.Annotations[k]; !ok || (v != "" && v != dv) { + return false + } + } + } + if opt.Platform != nil { + if d.Platform == nil { + return false + } + if !platform.Compatible(*opt.Platform, *d.Platform) { + return false + } + } + return true +} + +// DescriptorListFilter returns a list of descriptors from the list matching the search options. +// When opt.SortAnnotation is set, the order of descriptors with matching annotations is undefined. +func DescriptorListFilter(dl []Descriptor, opt MatchOpt) []Descriptor { + ret := []Descriptor{} + for _, d := range dl { + if d.Match(opt) { + ret = append(ret, d) + } + } + if opt.SortAnnotation != "" { + sort.Slice(ret, func(i, j int) bool { + // if annotations are not defined, sort to the very end + if ret[i].Annotations == nil { + return false + } + if _, ok := ret[i].Annotations[opt.SortAnnotation]; !ok { + return false + } + if ret[j].Annotations == nil { + return true + } + if _, ok := ret[j].Annotations[opt.SortAnnotation]; !ok { + return true + } + // else sort by string + if strings.Compare(ret[i].Annotations[opt.SortAnnotation], ret[j].Annotations[opt.SortAnnotation]) < 0 { + return !opt.SortDesc + } + return opt.SortDesc + }) + } + return ret +} + +// DescriptorListSearch returns the first descriptor from the list matching the search options. +func DescriptorListSearch(dl []Descriptor, opt MatchOpt) (Descriptor, error) { + if opt.ArtifactType != "" || opt.SortAnnotation != "" || len(opt.Annotations) > 0 { + dl = DescriptorListFilter(dl, opt) + } + var ret Descriptor + var retPlat platform.Platform + if len(dl) == 0 { + return ret, errs.ErrNotFound + } + if opt.Platform == nil { + return dl[0], nil + } + found := false + comp := platform.NewCompare(*opt.Platform) + for _, d := range dl { + if d.Platform == nil { + continue + } + if comp.Better(*d.Platform, retPlat) { + found = true + ret = d + retPlat = *d.Platform + } + } + if !found { + return ret, errs.ErrNotFound + } + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/types/doc.go b/vendor/github.com/regclient/regclient/types/doc.go new file mode 100644 index 000000000..828541d3c --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/doc.go @@ -0,0 +1,3 @@ +// Package types defines various types that have no other internal imports +// This allows them to be used between other packages without creating import loops +package types diff --git a/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go b/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go new file mode 100644 index 000000000..923797305 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go @@ -0,0 +1,135 @@ +// Package schema1 defines the manifest and json marshal/unmarshal for docker schema1 +package schema1 + +import ( + "encoding/json" + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" +) + +var ( + // ManifestSchemaVersion provides a pre-initialized version structure schema1 manifests. + ManifestSchemaVersion = docker.Versioned{ + SchemaVersion: 1, + MediaType: mediatype.Docker1Manifest, + } + // ManifestSignedSchemaVersion provides a pre-initialized version structure schema1 signed manifests. + ManifestSignedSchemaVersion = docker.Versioned{ + SchemaVersion: 1, + MediaType: mediatype.Docker1ManifestSigned, + } +) + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest defines the schema v1 docker manifest +type Manifest struct { + docker.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, without any attached signatures. + // The manifest byte representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of raw. +// If Raw is nil, marshals the inner contents. +// Applications requiring a marshaled signed manifest should simply use Raw directly, since the the content produced by json.Marshal will be compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// TODO: verify Payload and Signatures methods are required + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return mediatype.Docker1ManifestSigned, sm.all, nil +} + +// Signatures returns the signatures as provided by (*libtrust.JSONSignature).Signatures. +// The byte slices are opaque jws signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/vendor/github.com/regclient/regclient/types/docker/schema2/doc.go b/vendor/github.com/regclient/regclient/types/docker/schema2/doc.go new file mode 100644 index 000000000..ada3e7954 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/schema2/doc.go @@ -0,0 +1,2 @@ +// Package schema2 contains structs for Docker schema v2 manifests. +package schema2 diff --git a/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go b/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go new file mode 100644 index 000000000..16bb8cb51 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go @@ -0,0 +1,29 @@ +package schema2 + +import ( + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" +) + +// ManifestSchemaVersion is a pre-configured versioned field for manifests +var ManifestSchemaVersion = docker.Versioned{ + SchemaVersion: 2, + MediaType: mediatype.Docker2Manifest, +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + docker.Versioned + + // Config references the image configuration as a blob. + Config descriptor.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []descriptor.Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image index. + // Note, this is not a defined docker schema2 field. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go b/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go new file mode 100644 index 000000000..8fa804712 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go @@ -0,0 +1,25 @@ +package schema2 + +import ( + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" +) + +// ManifestListSchemaVersion is a pre-configured versioned field for manifest lists +var ManifestListSchemaVersion = docker.Versioned{ + SchemaVersion: 2, + MediaType: mediatype.Docker2ManifestList, +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + docker.Versioned + + // Manifests lists descriptors in the manifest list + Manifests []descriptor.Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + // Note, this is not a defined docker schema2 field. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/docker/versioned.go b/vendor/github.com/regclient/regclient/types/docker/versioned.go new file mode 100644 index 000000000..9685f2c78 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/versioned.go @@ -0,0 +1,10 @@ +// Package docker defines the common types for all docker schemas +package docker + +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/error.go b/vendor/github.com/regclient/regclient/types/error.go new file mode 100644 index 000000000..135a16005 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/error.go @@ -0,0 +1,186 @@ +package types + +import "github.com/regclient/regclient/types/errs" + +var ( + // ErrAllRequestsFailed when there are no mirrors left to try + // + // Deprecated: replace with [errs.ErrAllRequestsFailed]. + //go:fix inline + ErrAllRequestsFailed = errs.ErrAllRequestsFailed + // ErrAPINotFound if an api is not available for the host + // + // Deprecated: replace with [errs.ErrAPINotFound]. + //go:fix inline + ErrAPINotFound = errs.ErrAPINotFound + // ErrBackoffLimit maximum backoff attempts reached + // + // Deprecated: replace with [errs.ErrBackoffLimit]. + //go:fix inline + ErrBackoffLimit = errs.ErrBackoffLimit + // ErrCanceled if the context was canceled + // + // Deprecated: replace with [errs.ErrCanceled]. + //go:fix inline + ErrCanceled = errs.ErrCanceled + // ErrDigestMismatch if the expected digest wasn't received + // + // Deprecated: replace with [errs.ErrDigestMismatch]. + //go:fix inline + ErrDigestMismatch = errs.ErrDigestMismatch + // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header + // + // Deprecated: replace with [errs.ErrEmptyChallenge]. + //go:fix inline + ErrEmptyChallenge = errs.ErrEmptyChallenge + // ErrFileDeleted indicates a requested file has been deleted + // + // Deprecated: replace with [errs.ErrFileDeleted]. + //go:fix inline + ErrFileDeleted = errs.ErrFileDeleted + // ErrFileNotFound indicates a requested file is not found + // + // Deprecated: replace with [errs.ErrFileNotFound]. + //go:fix inline + ErrFileNotFound = errs.ErrFileNotFound + // ErrHTTPStatus if the http status code was unexpected + // + // Deprecated: replace with [errs.ErrHTTPStatus]. + //go:fix inline + ErrHTTPStatus = errs.ErrHTTPStatus + // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header + // + // Deprecated: replace with [errs.ErrInvalidChallenge]. + //go:fix inline + ErrInvalidChallenge = errs.ErrInvalidChallenge + // ErrInvalidReference indicates the reference to an image is has an invalid syntax + // + // Deprecated: replace with [errs.ErrInvalidReference]. + //go:fix inline + ErrInvalidReference = errs.ErrInvalidReference + // ErrLoopDetected indicates a child node points back to the parent + // + // Deprecated: replace with [errs.ErrLoopDetected]. + //go:fix inline + ErrLoopDetected = errs.ErrLoopDetected + // ErrManifestNotSet indicates the manifest is not set, it must be pulled with a ManifestGet first + // + // Deprecated: replace with [errs.ErrManifestNotSet]. + //go:fix inline + ErrManifestNotSet = errs.ErrManifestNotSet + // ErrMissingAnnotation returned when a needed annotation is not found + // + // Deprecated: replace with [errs.ErrMissingAnnotation]. + //go:fix inline + ErrMissingAnnotation = errs.ErrMissingAnnotation + // ErrMissingDigest returned when image reference does not include a digest + // + // Deprecated: replace with [errs.ErrMissingDigest]. + //go:fix inline + ErrMissingDigest = errs.ErrMissingDigest + // ErrMissingLocation returned when the location header is missing + // + // Deprecated: replace with [errs.ErrMissingLocation]. + //go:fix inline + ErrMissingLocation = errs.ErrMissingLocation + // ErrMissingName returned when name missing for host + // + // Deprecated: replace with [errs.ErrMissingName]. + //go:fix inline + ErrMissingName = errs.ErrMissingName + // ErrMissingTag returned when image reference does not include a tag + // + // Deprecated: replace with [errs.ErrMissingTag]. + //go:fix inline + ErrMissingTag = errs.ErrMissingTag + // ErrMissingTagOrDigest returned when image reference does not include a tag or digest + // + // Deprecated: replace with [errs.ErrMissingTagOrDigest]. + //go:fix inline + ErrMissingTagOrDigest = errs.ErrMissingTagOrDigest + // ErrMismatch returned when a comparison detects a difference + // + // Deprecated: replace with [errs.ErrMismatch]. + //go:fix inline + ErrMismatch = errs.ErrMismatch + // ErrMountReturnedLocation when a blob mount fails but a location header is received + // + // Deprecated: replace with [errs.ErrMountReturnedLocation]. + //go:fix inline + ErrMountReturnedLocation = errs.ErrMountReturnedLocation + // ErrNoNewChallenge indicates a challenge update did not result in any change + // + // Deprecated: replace with [errs.ErrNoNewChallenge]. + //go:fix inline + ErrNoNewChallenge = errs.ErrNoNewChallenge + // ErrNotFound isn't there, search for your value elsewhere + // + // Deprecated: replace with [errs.ErrNotFound]. + //go:fix inline + ErrNotFound = errs.ErrNotFound + // ErrNotImplemented returned when method has not been implemented yet + // + // Deprecated: replace with [errs.ErrNotImplemented]. + //go:fix inline + ErrNotImplemented = errs.ErrNotImplemented + // ErrNotRetryable indicates the process cannot be retried + // + // Deprecated: replace with [errs.ErrNotRetryable]. + //go:fix inline + ErrNotRetryable = errs.ErrNotRetryable + // ErrParsingFailed when a string cannot be parsed + // + // Deprecated: replace with [errs.ErrParsingFailed]. + //go:fix inline + ErrParsingFailed = errs.ErrParsingFailed + // ErrRetryNeeded indicates a request needs to be retried + // + // Deprecated: replace with [errs.ErrRetryNeeded]. + //go:fix inline + ErrRetryNeeded = errs.ErrRetryNeeded + // ErrShortRead if contents are less than expected the size + // + // Deprecated: replace with [errs.ErrShortRead]. + //go:fix inline + ErrShortRead = errs.ErrShortRead + // ErrSizeLimitExceeded if contents exceed the size limit + // + // Deprecated: replace with [errs.ErrSizeLimitExceeded]. + //go:fix inline + ErrSizeLimitExceeded = errs.ErrSizeLimitExceeded + // ErrUnavailable when a requested value is not available + // + // Deprecated: replace with [errs.ErrUnavailable]. + //go:fix inline + ErrUnavailable = errs.ErrUnavailable + // ErrUnsupported indicates the request was unsupported + // + // Deprecated: replace with [errs.ErrUnsupported]. + //go:fix inline + ErrUnsupported = errs.ErrUnsupported + // ErrUnsupportedAPI happens when an API is not supported on a registry + // + // Deprecated: replace with [errs.ErrUnsupportedAPI]. + //go:fix inline + ErrUnsupportedAPI = errs.ErrUnsupportedAPI + // ErrUnsupportedConfigVersion happens when config file version is greater than this command supports + // + // Deprecated: replace with [errs.ErrUnsupportedConfigVersion]. + //go:fix inline + ErrUnsupportedConfigVersion = errs.ErrUnsupportedConfigVersion + // ErrUnsupportedMediaType returned when media type is unknown or unsupported + // + // Deprecated: replace with [errs.ErrUnsupportedMediaType]. + //go:fix inline + ErrUnsupportedMediaType = errs.ErrUnsupportedMediaType + // ErrHTTPRateLimit when requests exceed server rate limit + // + // Deprecated: replace with [errs.ErrHTTPRateLimit]. + //go:fix inline + ErrHTTPRateLimit = errs.ErrHTTPRateLimit + // ErrHTTPUnauthorized when authentication fails + // + // Deprecated: replace with [errs.ErrHTTPUnauthorized]. + //go:fix inline + ErrHTTPUnauthorized = errs.ErrHTTPUnauthorized +) diff --git a/vendor/github.com/regclient/regclient/types/errs/error.go b/vendor/github.com/regclient/regclient/types/errs/error.go new file mode 100644 index 000000000..182155359 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/errs/error.go @@ -0,0 +1,91 @@ +// Package errs is used for predefined error values. +package errs + +import ( + "errors" + "fmt" + "io/fs" +) + +var ( + // ErrAllRequestsFailed when there are no mirrors left to try + ErrAllRequestsFailed = errors.New("all requests failed") + // ErrAPINotFound if an api is not available for the host + ErrAPINotFound = errors.New("API not found") + // ErrBackoffLimit maximum backoff attempts reached + ErrBackoffLimit = errors.New("backoff limit reached") + // ErrCanceled if the context was canceled + ErrCanceled = errors.New("context was canceled") + // ErrDigestMismatch if the expected digest wasn't received + ErrDigestMismatch = errors.New("digest mismatch") + // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header + ErrEmptyChallenge = errors.New("empty challenge header") + // ErrFileDeleted indicates a requested file has been deleted + ErrFileDeleted = errors.New("file deleted") + // ErrFileNotFound indicates a requested file is not found + ErrFileNotFound = fmt.Errorf("file not found%.0w", fs.ErrNotExist) + // ErrHTTPStatus if the http status code was unexpected + ErrHTTPStatus = errors.New("unexpected http status code") + // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header + ErrInvalidChallenge = errors.New("invalid challenge header") + // ErrInvalidReference indicates the reference to an image is has an invalid syntax + ErrInvalidReference = errors.New("invalid reference") + // ErrLoopDetected indicates a child node points back to the parent + ErrLoopDetected = errors.New("loop detected") + // ErrManifestNotSet indicates the manifest is not set, it must be pulled with a ManifestGet first + ErrManifestNotSet = errors.New("manifest not set") + // ErrMissingAnnotation returned when a needed annotation is not found + ErrMissingAnnotation = errors.New("annotation is missing") + // ErrMissingDigest returned when image reference does not include a digest + ErrMissingDigest = errors.New("digest missing from image reference") + // ErrMissingLocation returned when the location header is missing + ErrMissingLocation = errors.New("location header missing") + // ErrMissingName returned when name missing for host + ErrMissingName = errors.New("name missing") + // ErrMissingTag returned when image reference does not include a tag + ErrMissingTag = errors.New("tag missing from image reference") + // ErrMissingTagOrDigest returned when image reference does not include a tag or digest + ErrMissingTagOrDigest = errors.New("tag or Digest missing from image reference") + // ErrMismatch returned when a comparison detects a difference + ErrMismatch = errors.New("content does not match") + // ErrMountReturnedLocation when a blob mount fails but a location header is received + ErrMountReturnedLocation = errors.New("blob mount returned a location to upload") + // ErrNoLogin indicates there is no user login defined for a registry + ErrNoLogin = errors.New("no login found") + // ErrNoNewChallenge indicates a challenge update did not result in any change + ErrNoNewChallenge = errors.New("no new challenge") + // ErrNotFound isn't there, search for your value elsewhere + ErrNotFound = errors.New("not found") + // ErrNotImplemented returned when method has not been implemented yet + ErrNotImplemented = errors.New("not implemented") + // ErrNotRetryable indicates the process cannot be retried + ErrNotRetryable = errors.New("not retryable") + // ErrParsingFailed when a string cannot be parsed + ErrParsingFailed = errors.New("parsing failed") + // ErrRetryNeeded indicates a request needs to be retried + ErrRetryNeeded = errors.New("retry needed") + // ErrRetryLimitExceeded indicates too many retries have occurred + ErrRetryLimitExceeded = errors.New("retry limit exceeded") + // ErrShortRead if contents are less than expected the size + ErrShortRead = errors.New("short read") + // ErrSizeLimitExceeded if contents exceed the size limit + ErrSizeLimitExceeded = errors.New("size limit exceeded") + // ErrUnavailable when a requested value is not available + ErrUnavailable = errors.New("unavailable") + // ErrUnsupported indicates the request was unsupported + ErrUnsupported = errors.New("unsupported") + // ErrUnsupportedAPI happens when an API is not supported on a registry + ErrUnsupportedAPI = errors.New("unsupported API") + // ErrUnsupportedConfigVersion happens when config file version is greater than this command supports + ErrUnsupportedConfigVersion = errors.New("unsupported config version") + // ErrUnsupportedMediaType returned when media type is unknown or unsupported + ErrUnsupportedMediaType = errors.New("unsupported media type") +) + +// custom HTTP errors extend the ErrHTTPStatus error +var ( + // ErrHTTPRateLimit when requests exceed server rate limit + ErrHTTPRateLimit = fmt.Errorf("rate limit exceeded%.0w", ErrHTTPStatus) + // ErrHTTPUnauthorized when authentication fails + ErrHTTPUnauthorized = fmt.Errorf("unauthorized%.0w", ErrHTTPStatus) +) diff --git a/vendor/github.com/regclient/regclient/types/manifest/common.go b/vendor/github.com/regclient/regclient/types/manifest/common.go new file mode 100644 index 000000000..70139b786 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/common.go @@ -0,0 +1,127 @@ +package manifest + +import ( + "net/http" + "strconv" + "strings" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" +) + +type common struct { + r ref.Ref + desc descriptor.Descriptor + manifSet bool + ratelimit types.RateLimit + rawHeader http.Header + rawBody []byte +} + +// GetDigest returns the digest +func (m *common) GetDigest() digest.Digest { + return m.desc.Digest +} + +// GetDescriptor returns the descriptor +func (m *common) GetDescriptor() descriptor.Descriptor { + return m.desc +} + +// GetMediaType returns the media type +func (m *common) GetMediaType() string { + return m.desc.MediaType +} + +// GetRateLimit returns the rate limit when the manifest was pulled from a registry. +// This supports the headers used by Docker Hub. +func (m *common) GetRateLimit() types.RateLimit { + return m.ratelimit +} + +// GetRef returns the reference from the upstream registry +func (m *common) GetRef() ref.Ref { + return m.r +} + +// HasRateLimit indicates if the rate limit is set +func (m *common) HasRateLimit() bool { + return m.ratelimit.Set +} + +// IsList indicates if the manifest is a docker Manifest List or OCI Index +func (m *common) IsList() bool { + switch m.desc.MediaType { + case MediaTypeDocker2ManifestList, MediaTypeOCI1ManifestList: + return true + default: + return false + } +} + +// IsSet indicates if the manifest is defined. +// A false indicates this is from a HEAD request, providing the digest, media-type, and other headers, but no body. +func (m *common) IsSet() bool { + return m.manifSet +} + +// RawBody returns the raw body from the manifest if available. +func (m *common) RawBody() ([]byte, error) { + if len(m.rawBody) == 0 { + return m.rawBody, errs.ErrManifestNotSet + } + return m.rawBody, nil +} + +// RawHeaders returns any headers included when manifest was pulled from a registry. +func (m *common) RawHeaders() (http.Header, error) { + return m.rawHeader, nil +} + +func (m *common) setRateLimit(header http.Header) { + // check for rate limit headers + rlLimit := header.Get("RateLimit-Limit") + rlRemain := header.Get("RateLimit-Remaining") + rlReset := header.Get("RateLimit-Reset") + if rlLimit != "" { + lpSplit := strings.Split(rlLimit, ",") + lSplit := strings.Split(lpSplit[0], ";") + rlLimitI, err := strconv.Atoi(lSplit[0]) + if err != nil { + m.ratelimit.Limit = 0 + } else { + m.ratelimit.Limit = rlLimitI + } + if len(lSplit) > 1 { + m.ratelimit.Policies = lpSplit + } else if len(lpSplit) > 1 { + m.ratelimit.Policies = lpSplit[1:] + } + } + if rlRemain != "" { + rSplit := strings.Split(rlRemain, ";") + rlRemainI, err := strconv.Atoi(rSplit[0]) + if err != nil { + m.ratelimit.Remain = 0 + } else { + m.ratelimit.Remain = rlRemainI + m.ratelimit.Set = true + } + } + if rlReset != "" { + rlResetI, err := strconv.Atoi(rlReset) + if err != nil { + m.ratelimit.Reset = 0 + } else { + m.ratelimit.Reset = rlResetI + } + } +} diff --git a/vendor/github.com/regclient/regclient/types/manifest/docker1.go b/vendor/github.com/regclient/regclient/types/manifest/docker1.go new file mode 100644 index 000000000..b45e37fe8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/docker1.go @@ -0,0 +1,246 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "fmt" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema1" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/platform" +) + +const ( + // MediaTypeDocker1Manifest deprecated media type for docker schema1 manifests. + MediaTypeDocker1Manifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeDocker1ManifestSigned is a deprecated schema1 manifest with jws signing. + MediaTypeDocker1ManifestSigned = "application/vnd.docker.distribution.manifest.v1+prettyjws" +) + +type docker1Manifest struct { + common + schema1.Manifest +} +type docker1SignedManifest struct { + common + schema1.SignedManifest +} + +func (m *docker1Manifest) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return []descriptor.Descriptor{}, errs.ErrManifestNotSet + } + + var dl []descriptor.Descriptor + for _, sd := range m.FSLayers { + dl = append(dl, descriptor.Descriptor{ + Digest: sd.BlobSum, + }) + } + return dl, nil +} + +func (m *docker1SignedManifest) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return []descriptor.Descriptor{}, errs.ErrManifestNotSet + } + + var dl []descriptor.Descriptor + for _, sd := range m.FSLayers { + dl = append(dl, descriptor.Descriptor{ + Digest: sd.BlobSum, + }) + } + return dl, nil +} + +func (m *docker1Manifest) GetOrig() any { + return m.Manifest +} + +func (m *docker1SignedManifest) GetOrig() any { + return m.SignedManifest +} + +func (m *docker1Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetSize() (int64, error) { + return 0, fmt.Errorf("GetSize is not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) GetSize() (int64, error) { + return 0, fmt.Errorf("GetSize is not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + + return json.Marshal((m.Manifest)) +} + +func (m *docker1SignedManifest) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + return m.SignedManifest.MarshalJSON() +} + +func (m *docker1Manifest) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Layers:\t\n") + for _, d := range m.FSLayers { + fmt.Fprintf(tw, " Digest:\t%s\n", string(d.BlobSum)) + } + err := tw.Flush() + return buf.Bytes(), err +} + +func (m *docker1SignedManifest) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Layers:\t\n") + for _, d := range m.FSLayers { + fmt.Fprintf(tw, " Digest:\t%s\n", string(d.BlobSum)) + } + err := tw.Flush() + return buf.Bytes(), err +} + +func (m *docker1Manifest) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) SetLayers(dl []descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) SetLayers(dl []descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) SetOrig(origIn any) error { + orig, ok := origIn.(schema1.Manifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.Docker1Manifest { + // TODO: error? + orig.MediaType = mediatype.Docker1Manifest + } + mj, err := json.Marshal(orig) + if err != nil { + return err + } + m.manifSet = true + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker1Manifest, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + m.Manifest = orig + + return nil +} + +func (m *docker1SignedManifest) SetOrig(origIn any) error { + orig, ok := origIn.(schema1.SignedManifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.Docker1ManifestSigned { + // TODO: error? + orig.MediaType = mediatype.Docker1ManifestSigned + } + mj, err := json.Marshal(orig) + if err != nil { + return err + } + m.manifSet = true + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker1ManifestSigned, + Digest: m.desc.DigestAlgo().FromBytes(orig.Canonical), + Size: int64(len(orig.Canonical)), + } + m.SignedManifest = orig + + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/manifest/docker2.go b/vendor/github.com/regclient/regclient/types/manifest/docker2.go new file mode 100644 index 000000000..5fce95090 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/docker2.go @@ -0,0 +1,364 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/units" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/platform" +) + +const ( + // MediaTypeDocker2Manifest is the media type when pulling manifests from a v2 registry + MediaTypeDocker2Manifest = mediatype.Docker2Manifest + // MediaTypeDocker2ManifestList is the media type when pulling a manifest list from a v2 registry + MediaTypeDocker2ManifestList = mediatype.Docker2ManifestList +) + +type docker2Manifest struct { + common + schema2.Manifest +} +type docker2ManifestList struct { + common + schema2.ManifestList +} + +func (m *docker2Manifest) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} + +func (m *docker2Manifest) GetConfig() (descriptor.Descriptor, error) { + if !m.manifSet { + return descriptor.Descriptor{}, errs.ErrManifestNotSet + } + return m.Config, nil +} + +func (m *docker2Manifest) GetConfigDigest() (digest.Digest, error) { + if !m.manifSet { + return digest.Digest(""), errs.ErrManifestNotSet + } + return m.Config.Digest, nil +} + +func (m *docker2ManifestList) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} + +func (m *docker2ManifestList) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker2ManifestList) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker2Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker2ManifestList) GetManifestList() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return []descriptor.Descriptor{}, errs.ErrManifestNotSet + } + return m.Manifests, nil +} + +func (m *docker2Manifest) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return []descriptor.Descriptor{}, errs.ErrManifestNotSet + } + return m.Layers, nil +} + +func (m *docker2ManifestList) GetLayers() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker2Manifest) GetOrig() any { + return m.Manifest +} + +func (m *docker2ManifestList) GetOrig() any { + return m.ManifestList +} + +func (m *docker2Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker2ManifestList) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + d, err := descriptor.DescriptorListSearch(m.Manifests, descriptor.MatchOpt{Platform: p}) + if err != nil { + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) + } + return &d, nil +} + +func (m *docker2Manifest) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker2ManifestList) GetPlatformList() ([]*platform.Platform, error) { + dl, err := m.GetManifestList() + if err != nil { + return nil, err + } + return getPlatformList(dl) +} + +// GetSize returns the size in bytes of all layers +func (m *docker2Manifest) GetSize() (int64, error) { + if !m.manifSet { + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + return total, nil +} + +func (m *docker2Manifest) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + return json.Marshal((m.Manifest)) +} + +func (m *docker2ManifestList) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + return json.Marshal((m.ManifestList)) +} + +func (m *docker2Manifest) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + fmt.Fprintf(tw, "Total Size:\t%s\n", units.HumanSize(float64(total))) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Config:\t\n") + err := m.Config.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Layers:\t\n") + for _, d := range m.Layers { + fmt.Fprintf(tw, "\t\n") + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err = tw.Flush() + return buf.Bytes(), err +} + +func (m *docker2ManifestList) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Manifests:\t\n") + for _, d := range m.Manifests { + fmt.Fprintf(tw, "\t\n") + dRef := m.r + if dRef.Reference != "" { + dRef = dRef.AddDigest(d.Digest.String()) + fmt.Fprintf(tw, " Name:\t%s\n", dRef.CommonName()) + } + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err := tw.Flush() + return buf.Bytes(), err +} + +func (m *docker2Manifest) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} + +func (m *docker2ManifestList) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} + +func (m *docker2Manifest) SetConfig(d descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Config = d + return m.updateDesc() +} + +func (m *docker2Manifest) SetLayers(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Layers = dl + return m.updateDesc() +} + +func (m *docker2ManifestList) SetManifestList(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Manifests = dl + return m.updateDesc() +} + +func (m *docker2Manifest) SetOrig(origIn any) error { + orig, ok := origIn.(schema2.Manifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.Docker2Manifest { + // TODO: error? + orig.MediaType = mediatype.Docker2Manifest + } + m.manifSet = true + m.Manifest = orig + return m.updateDesc() +} + +func (m *docker2ManifestList) SetOrig(origIn any) error { + orig, ok := origIn.(schema2.ManifestList) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.Docker2ManifestList { + // TODO: error? + orig.MediaType = mediatype.Docker2ManifestList + } + m.manifSet = true + m.ManifestList = orig + return m.updateDesc() +} + +func (m *docker2Manifest) updateDesc() error { + mj, err := json.Marshal(m.Manifest) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker2Manifest, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} + +func (m *docker2ManifestList) updateDesc() error { + mj, err := json.Marshal(m.ManifestList) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker2ManifestList, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/manifest/manifest.go b/vendor/github.com/regclient/regclient/types/manifest/manifest.go new file mode 100644 index 000000000..1c035994e --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/manifest.go @@ -0,0 +1,594 @@ +// Package manifest abstracts the various types of supported manifests. +// Supported types include OCI index and image, and Docker manifest list and manifest. +package manifest + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + // Crypto libraries are included for go-digest. + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema1" + "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" +) + +// Manifest interface is implemented by all supported manifests but +// many calls are only supported by certain underlying media types. +type Manifest interface { + GetDescriptor() descriptor.Descriptor + GetOrig() any + GetRef() ref.Ref + IsList() bool + IsSet() bool + MarshalJSON() ([]byte, error) + RawBody() ([]byte, error) + RawHeaders() (http.Header, error) + SetOrig(any) error + + // Deprecated: GetConfig should be accessed using [Imager] interface. + GetConfig() (descriptor.Descriptor, error) + // Deprecated: GetLayers should be accessed using [Imager] interface. + GetLayers() ([]descriptor.Descriptor, error) + + // Deprecated: GetManifestList should be accessed using [Indexer] interface. + GetManifestList() ([]descriptor.Descriptor, error) + + // Deprecated: GetConfigDigest should be replaced with [GetConfig]. + GetConfigDigest() (digest.Digest, error) + // Deprecated: GetDigest should be replaced with GetDescriptor().Digest, see [GetDescriptor]. + GetDigest() digest.Digest + // Deprecated: GetMediaType should be replaced with GetDescriptor().MediaType, see [GetDescriptor]. + GetMediaType() string + // Deprecated: GetPlatformDesc method should be replaced with [manifest.GetPlatformDesc]. + GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) + // Deprecated: GetPlatformList method should be replaced with [manifest.GetPlatformList]. + GetPlatformList() ([]*platform.Platform, error) + // Deprecated: GetRateLimit method should be replaced with [manifest.GetRateLimit]. + GetRateLimit() types.RateLimit + // Deprecated: HasRateLimit method should be replaced with [manifest.HasRateLimit]. + HasRateLimit() bool +} + +// Annotator is used by manifests that support annotations. +// Note this will work for Docker manifests despite the spec not officially supporting it. +type Annotator interface { + GetAnnotations() (map[string]string, error) + SetAnnotation(key, val string) error +} + +// Indexer is used by manifests that contain a manifest list. +type Indexer interface { + GetManifestList() ([]descriptor.Descriptor, error) + SetManifestList(dl []descriptor.Descriptor) error +} + +// Imager is used by manifests packaging an image. +type Imager interface { + GetConfig() (descriptor.Descriptor, error) + GetLayers() ([]descriptor.Descriptor, error) + SetConfig(d descriptor.Descriptor) error + SetLayers(dl []descriptor.Descriptor) error + GetSize() (int64, error) +} + +// Subjecter is used by manifests that may have a subject field. +type Subjecter interface { + GetSubject() (*descriptor.Descriptor, error) + SetSubject(d *descriptor.Descriptor) error +} + +type manifestConfig struct { + r ref.Ref + desc descriptor.Descriptor + raw []byte + orig any + header http.Header +} +type Opts func(*manifestConfig) + +// New creates a new manifest based on provided options. +// The digest for the manifest will be checked against the descriptor, reference, or headers, depending on which is available first (later digests will be ignored). +func New(opts ...Opts) (Manifest, error) { + mc := manifestConfig{} + for _, opt := range opts { + opt(&mc) + } + c := common{ + r: mc.r, + desc: mc.desc, + rawBody: mc.raw, + rawHeader: mc.header, + } + if c.r.Digest != "" && c.desc.Digest == "" { + dig, err := digest.Parse(c.r.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse digest from ref: %w", err) + } + c.desc.Digest = dig + } + // extract fields from header where available + if mc.header != nil { + if c.desc.MediaType == "" { + c.desc.MediaType = mediatype.Base(mc.header.Get("Content-Type")) + } + if c.desc.Size == 0 { + cl, _ := strconv.Atoi(mc.header.Get("Content-Length")) + c.desc.Size = int64(cl) + } + if c.desc.Digest == "" { + c.desc.Digest, _ = digest.Parse(mc.header.Get("Docker-Content-Digest")) + } + c.setRateLimit(mc.header) + } + if mc.orig != nil { + return fromOrig(c, mc.orig) + } + return fromCommon(c) +} + +// WithDesc specifies the descriptor for the manifest. +func WithDesc(desc descriptor.Descriptor) Opts { + return func(mc *manifestConfig) { + mc.desc = desc + } +} + +// WithHeader provides the headers from the response when pulling the manifest. +func WithHeader(header http.Header) Opts { + return func(mc *manifestConfig) { + mc.header = header + } +} + +// WithOrig provides the original manifest variable. +func WithOrig(orig any) Opts { + return func(mc *manifestConfig) { + mc.orig = orig + } +} + +// WithRaw provides the manifest bytes or HTTP response body. +func WithRaw(raw []byte) Opts { + return func(mc *manifestConfig) { + mc.raw = raw + } +} + +// WithRef provides the reference used to get the manifest. +func WithRef(r ref.Ref) Opts { + return func(mc *manifestConfig) { + mc.r = r + } +} + +// GetDigest returns the digest from the manifest descriptor. +func GetDigest(m Manifest) digest.Digest { + d := m.GetDescriptor() + return d.Digest +} + +// GetMediaType returns the media type from the manifest descriptor. +func GetMediaType(m Manifest) string { + d := m.GetDescriptor() + return d.MediaType +} + +// GetPlatformDesc returns the descriptor for a specific platform from an index. +func GetPlatformDesc(m Manifest, p *platform.Platform) (*descriptor.Descriptor, error) { + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + mi, ok := m.(Indexer) + if !ok { + return nil, fmt.Errorf("unsupported manifest type: %s", m.GetDescriptor().MediaType) + } + dl, err := mi.GetManifestList() + if err != nil { + return nil, fmt.Errorf("failed to get manifest list: %w", err) + } + d, err := descriptor.DescriptorListSearch(dl, descriptor.MatchOpt{Platform: p}) + if err != nil { + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) + } + return &d, nil +} + +// GetPlatformList returns the list of platforms from an index. +func GetPlatformList(m Manifest) ([]*platform.Platform, error) { + mi, ok := m.(Indexer) + if !ok { + return nil, fmt.Errorf("unsupported manifest type: %s", m.GetDescriptor().MediaType) + } + dl, err := mi.GetManifestList() + if err != nil { + return nil, fmt.Errorf("failed to get manifest list: %w", err) + } + return getPlatformList(dl) +} + +// GetRateLimit returns the current rate limit seen in headers. +func GetRateLimit(m Manifest) types.RateLimit { + rl := types.RateLimit{} + header, err := m.RawHeaders() + if err != nil { + return rl + } + // check for rate limit headers + rlLimit := header.Get("RateLimit-Limit") + rlRemain := header.Get("RateLimit-Remaining") + rlReset := header.Get("RateLimit-Reset") + if rlLimit != "" { + lpSplit := strings.Split(rlLimit, ",") + lSplit := strings.Split(lpSplit[0], ";") + rlLimitI, err := strconv.Atoi(lSplit[0]) + if err != nil { + rl.Limit = 0 + } else { + rl.Limit = rlLimitI + } + if len(lSplit) > 1 { + rl.Policies = lpSplit + } else if len(lpSplit) > 1 { + rl.Policies = lpSplit[1:] + } + } + if rlRemain != "" { + rSplit := strings.Split(rlRemain, ";") + rlRemainI, err := strconv.Atoi(rSplit[0]) + if err != nil { + rl.Remain = 0 + } else { + rl.Remain = rlRemainI + rl.Set = true + } + } + if rlReset != "" { + rlResetI, err := strconv.Atoi(rlReset) + if err != nil { + rl.Reset = 0 + } else { + rl.Reset = rlResetI + } + } + return rl +} + +// HasRateLimit indicates whether the rate limit is set and available. +func HasRateLimit(m Manifest) bool { + rl := GetRateLimit(m) + return rl.Set +} + +// OCIIndexFromAny converts manifest lists to an OCI index. +func OCIIndexFromAny(orig any) (v1.Index, error) { + ociI := v1.Index{ + Versioned: v1.IndexSchemaVersion, + MediaType: mediatype.OCI1ManifestList, + } + switch orig := orig.(type) { + case schema2.ManifestList: + ociI.Manifests = orig.Manifests + ociI.Annotations = orig.Annotations + case v1.Index: + ociI = orig + default: + return ociI, fmt.Errorf("unable to convert %T to OCI index", orig) + } + return ociI, nil +} + +// OCIIndexToAny converts from an OCI index back to the manifest list. +func OCIIndexToAny(ociI v1.Index, origP any) error { + // reflect is used to handle both *interface and *Manifest + rv := reflect.ValueOf(origP) + for rv.IsValid() && rv.Type().Kind() == reflect.Pointer { + rv = rv.Elem() + } + if !rv.IsValid() { + return fmt.Errorf("invalid manifest output parameter: %T", origP) + } + if !rv.CanSet() { + return fmt.Errorf("manifest output must be a pointer: %T", origP) + } + origR := rv.Interface() + switch orig := (origR).(type) { + case schema2.ManifestList: + orig.Versioned = schema2.ManifestListSchemaVersion + orig.Manifests = ociI.Manifests + orig.Annotations = ociI.Annotations + rv.Set(reflect.ValueOf(orig)) + case v1.Index: + rv.Set(reflect.ValueOf(ociI)) + default: + return fmt.Errorf("unable to convert OCI index to %T", origR) + } + return nil +} + +// OCIManifestFromAny converts an image manifest to an OCI manifest. +func OCIManifestFromAny(orig any) (v1.Manifest, error) { + ociM := v1.Manifest{ + Versioned: v1.ManifestSchemaVersion, + MediaType: mediatype.OCI1Manifest, + } + switch orig := orig.(type) { + case schema2.Manifest: + ociM.Config = orig.Config + ociM.Layers = orig.Layers + ociM.Annotations = orig.Annotations + case v1.Manifest: + ociM = orig + default: + // TODO: consider supporting Docker schema v1 media types + return ociM, fmt.Errorf("unable to convert %T to OCI image", orig) + } + return ociM, nil +} + +// OCIManifestToAny converts an OCI manifest back to the image manifest. +func OCIManifestToAny(ociM v1.Manifest, origP any) error { + // reflect is used to handle both *interface and *Manifest + rv := reflect.ValueOf(origP) + for rv.IsValid() && rv.Type().Kind() == reflect.Pointer { + rv = rv.Elem() + } + if !rv.IsValid() { + return fmt.Errorf("invalid manifest output parameter: %T", origP) + } + if !rv.CanSet() { + return fmt.Errorf("manifest output must be a pointer: %T", origP) + } + origR := rv.Interface() + switch orig := (origR).(type) { + case schema2.Manifest: + orig.Versioned = schema2.ManifestSchemaVersion + orig.Config = ociM.Config + orig.Layers = ociM.Layers + orig.Annotations = ociM.Annotations + rv.Set(reflect.ValueOf(orig)) + case v1.Manifest: + rv.Set(reflect.ValueOf(ociM)) + default: + // Docker schema v1 will not be supported, can't resign, and no need for unsigned + return fmt.Errorf("unable to convert OCI image to %T", origR) + } + return nil +} + +// FromOrig creates a new manifest from the original upstream manifest type. +// This method should be used if you are creating a new manifest rather than pulling one from a registry. +func fromOrig(c common, orig any) (Manifest, error) { + var mt string + var m Manifest + origDigest := c.desc.Digest + + mj, err := json.Marshal(orig) + if err != nil { + return nil, err + } + c.manifSet = true + if len(c.rawBody) == 0 { + c.rawBody = mj + } + if _, ok := orig.(schema1.SignedManifest); !ok { + c.desc.Digest = c.desc.DigestAlgo().FromBytes(mj) + } + if c.desc.Size == 0 { + c.desc.Size = int64(len(mj)) + } + // create manifest based on type + switch mOrig := orig.(type) { + case schema1.Manifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.Docker1Manifest + m = &docker1Manifest{ + common: c, + Manifest: mOrig, + } + case schema1.SignedManifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.Docker1ManifestSigned + // recompute digest on the canonical data + c.desc.Digest = c.desc.DigestAlgo().FromBytes(mOrig.Canonical) + m = &docker1SignedManifest{ + common: c, + SignedManifest: mOrig, + } + case schema2.Manifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.Docker2Manifest + m = &docker2Manifest{ + common: c, + Manifest: mOrig, + } + case schema2.ManifestList: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.Docker2ManifestList + m = &docker2ManifestList{ + common: c, + ManifestList: mOrig, + } + case v1.Manifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.OCI1Manifest + m = &oci1Manifest{ + common: c, + Manifest: mOrig, + } + case v1.Index: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.OCI1ManifestList + m = &oci1Index{ + common: c, + Index: orig.(v1.Index), + } + case v1.ArtifactManifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.OCI1Artifact + m = &oci1Artifact{ + common: c, + ArtifactManifest: mOrig, + } + default: + return nil, fmt.Errorf("unsupported type to convert to a manifest: %T", orig) + } + // verify media type + err = verifyMT(c.desc.MediaType, mt) + if err != nil { + return nil, err + } + // verify digest didn't change + if origDigest != "" && origDigest != c.desc.Digest { + return nil, fmt.Errorf("manifest digest mismatch, expected %s, computed %s%.0w", origDigest, c.desc.Digest, errs.ErrDigestMismatch) + } + return m, nil +} + +// fromCommon is used to create a manifest when the underlying manifest struct is not provided. +func fromCommon(c common) (Manifest, error) { + var err error + var m Manifest + var mt string + origDigest := c.desc.Digest + // extract common data from from rawBody + if len(c.rawBody) > 0 { + c.manifSet = true + // extract media type from body, either explicitly or with duck typing + if c.desc.MediaType == "" { + mt := struct { + MediaType string `json:"mediaType,omitempty"` + SchemaVersion int `json:"schemaVersion,omitempty"` + Signatures []any `json:"signatures,omitempty"` + Manifests []descriptor.Descriptor `json:"manifests,omitempty"` + Layers []descriptor.Descriptor `json:"layers,omitempty"` + }{} + err = json.Unmarshal(c.rawBody, &mt) + if mt.MediaType != "" { + c.desc.MediaType = mt.MediaType + } else if mt.SchemaVersion == 1 && len(mt.Signatures) > 0 { + c.desc.MediaType = mediatype.Docker1ManifestSigned + } else if mt.SchemaVersion == 1 { + c.desc.MediaType = mediatype.Docker1Manifest + } else if len(mt.Manifests) > 0 { + if strings.HasPrefix(mt.Manifests[0].MediaType, "application/vnd.docker.") { + c.desc.MediaType = mediatype.Docker2ManifestList + } else { + c.desc.MediaType = mediatype.OCI1ManifestList + } + } else if len(mt.Layers) > 0 { + if strings.HasPrefix(mt.Layers[0].MediaType, "application/vnd.docker.") { + c.desc.MediaType = mediatype.Docker2Manifest + } else { + c.desc.MediaType = mediatype.OCI1Manifest + } + } + } + // compute digest + if c.desc.MediaType != mediatype.Docker1ManifestSigned { + d := c.desc.DigestAlgo().FromBytes(c.rawBody) + c.desc.Digest = d + c.desc.Size = int64(len(c.rawBody)) + } + } + switch c.desc.MediaType { + case mediatype.Docker1Manifest: + var mOrig schema1.Manifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &docker1Manifest{common: c, Manifest: mOrig} + case mediatype.Docker1ManifestSigned: + var mOrig schema1.SignedManifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + d := c.desc.DigestAlgo().FromBytes(mOrig.Canonical) + c.desc.Digest = d + c.desc.Size = int64(len(mOrig.Canonical)) + } + m = &docker1SignedManifest{common: c, SignedManifest: mOrig} + case mediatype.Docker2Manifest: + var mOrig schema2.Manifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &docker2Manifest{common: c, Manifest: mOrig} + case mediatype.Docker2ManifestList: + var mOrig schema2.ManifestList + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &docker2ManifestList{common: c, ManifestList: mOrig} + case mediatype.OCI1Manifest: + var mOrig v1.Manifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &oci1Manifest{common: c, Manifest: mOrig} + case mediatype.OCI1ManifestList: + var mOrig v1.Index + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &oci1Index{common: c, Index: mOrig} + case mediatype.OCI1Artifact: + var mOrig v1.ArtifactManifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &oci1Artifact{common: c, ArtifactManifest: mOrig} + default: + return nil, fmt.Errorf("%w: \"%s\"", errs.ErrUnsupportedMediaType, c.desc.MediaType) + } + if err != nil { + return nil, fmt.Errorf("error unmarshaling manifest for %s: %w", c.r.CommonName(), err) + } + // verify media type + err = verifyMT(c.desc.MediaType, mt) + if err != nil { + return nil, err + } + // verify digest didn't change + if origDigest != "" && origDigest != c.desc.Digest { + return nil, fmt.Errorf("manifest digest mismatch, expected %s, computed %s%.0w", origDigest, c.desc.Digest, errs.ErrDigestMismatch) + } + return m, nil +} + +func verifyMT(expected, received string) error { + if received != "" && expected != received { + return fmt.Errorf("manifest contains an unexpected media type: expected %s, received %s", expected, received) + } + return nil +} + +func getPlatformList(dl []descriptor.Descriptor) ([]*platform.Platform, error) { + var l []*platform.Platform + for _, d := range dl { + if d.Platform != nil { + l = append(l, d.Platform) + } + } + return l, nil +} diff --git a/vendor/github.com/regclient/regclient/types/manifest/oci1.go b/vendor/github.com/regclient/regclient/types/manifest/oci1.go new file mode 100644 index 000000000..4491960f6 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/oci1.go @@ -0,0 +1,611 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/units" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" +) + +const ( + // MediaTypeOCI1Manifest OCI v1 manifest media type + MediaTypeOCI1Manifest = mediatype.OCI1Manifest + // MediaTypeOCI1ManifestList OCI v1 manifest list media type + MediaTypeOCI1ManifestList = mediatype.OCI1ManifestList +) + +type oci1Manifest struct { + common + v1.Manifest +} +type oci1Index struct { + common + v1.Index +} + +// oci1Artifact is EXPERIMENTAL +type oci1Artifact struct { + common + v1.ArtifactManifest +} + +func (m *oci1Manifest) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} + +func (m *oci1Manifest) GetConfig() (descriptor.Descriptor, error) { + if !m.manifSet { + return descriptor.Descriptor{}, errs.ErrManifestNotSet + } + return m.Config, nil +} + +func (m *oci1Manifest) GetConfigDigest() (digest.Digest, error) { + if !m.manifSet { + return digest.Digest(""), errs.ErrManifestNotSet + } + return m.Config.Digest, nil +} + +func (m *oci1Index) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} + +func (m *oci1Index) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Index) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Artifact) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} + +func (m *oci1Artifact) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Artifact) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Index) GetManifestList() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Manifests, nil +} + +func (m *oci1Artifact) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Layers, nil +} + +func (m *oci1Index) GetLayers() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Artifact) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Blobs, nil +} + +func (m *oci1Manifest) GetOrig() any { + return m.Manifest +} + +func (m *oci1Index) GetOrig() any { + return m.Index +} + +func (m *oci1Artifact) GetOrig() any { + return m.ArtifactManifest +} + +func (m *oci1Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Index) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + d, err := descriptor.DescriptorListSearch(m.Manifests, descriptor.MatchOpt{Platform: p}) + if err != nil { + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) + } + return &d, nil +} + +func (m *oci1Artifact) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Index) GetPlatformList() ([]*platform.Platform, error) { + dl, err := m.GetManifestList() + if err != nil { + return nil, err + } + return getPlatformList(dl) +} + +func (m *oci1Artifact) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + + return json.Marshal((m.Manifest)) +} + +func (m *oci1Manifest) GetSubject() (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Manifest.Subject, nil +} + +func (m *oci1Index) GetSubject() (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Index.Subject, nil +} + +func (m *oci1Artifact) GetSubject() (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.ArtifactManifest.Subject, nil +} + +func (m *oci1Index) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + + return json.Marshal((m.Index)) +} + +func (m *oci1Artifact) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + + return json.Marshal((m.ArtifactManifest)) +} + +func (m *oci1Manifest) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + if m.ArtifactType != "" { + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) + } + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + fmt.Fprintf(tw, "Total Size:\t%s\n", units.HumanSize(float64(total))) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Config:\t\n") + err := m.Config.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Layers:\t\n") + for _, d := range m.Layers { + fmt.Fprintf(tw, "\t\n") + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + if m.Subject != nil { + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Subject:\t\n") + err := m.Subject.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err = tw.Flush() + return buf.Bytes(), err +} + +func (m *oci1Index) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + if m.ArtifactType != "" { + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) + } + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Manifests:\t\n") + for _, d := range m.Manifests { + fmt.Fprintf(tw, "\t\n") + dRef := m.r + if dRef.Reference != "" { + dRef = dRef.AddDigest(d.Digest.String()) + fmt.Fprintf(tw, " Name:\t%s\n", dRef.CommonName()) + } + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + if m.Subject != nil { + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Subject:\t\n") + err := m.Subject.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err := tw.Flush() + return buf.Bytes(), err +} + +func (m *oci1Artifact) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + var total int64 + for _, d := range m.Blobs { + total += d.Size + } + fmt.Fprintf(tw, "Total Size:\t%s\n", units.HumanSize(float64(total))) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Blobs:\t\n") + for _, d := range m.Blobs { + fmt.Fprintf(tw, "\t\n") + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + if m.Subject != nil { + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Subject:\t\n") + err := m.Subject.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err := tw.Flush() + return buf.Bytes(), err +} + +func (m *oci1Manifest) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} + +func (m *oci1Index) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} + +func (m *oci1Artifact) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} + +func (m *oci1Artifact) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set config not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) SetConfig(d descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Config = d + return m.updateDesc() +} + +func (m *oci1Artifact) SetLayers(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Blobs = dl + return m.updateDesc() +} + +// GetSize returns the size in bytes of all layers +func (m *oci1Manifest) GetSize() (int64, error) { + if !m.manifSet { + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + return total, nil +} + +// GetSize returns the size in bytes of all layers +func (m *oci1Artifact) GetSize() (int64, error) { + if !m.manifSet { + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Blobs { + total += d.Size + } + return total, nil +} + +func (m *oci1Manifest) SetLayers(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Layers = dl + return m.updateDesc() +} + +func (m *oci1Index) SetManifestList(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Manifests = dl + return m.updateDesc() +} + +func (m *oci1Manifest) SetOrig(origIn any) error { + orig, ok := origIn.(v1.Manifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.OCI1Manifest { + // TODO: error? + orig.MediaType = mediatype.OCI1Manifest + } + m.manifSet = true + m.Manifest = orig + + return m.updateDesc() +} + +func (m *oci1Index) SetOrig(origIn any) error { + orig, ok := origIn.(v1.Index) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.OCI1ManifestList { + // TODO: error? + orig.MediaType = mediatype.OCI1ManifestList + } + m.manifSet = true + m.Index = orig + + return m.updateDesc() +} + +func (m *oci1Artifact) SetSubject(d *descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.ArtifactManifest.Subject = d + return m.updateDesc() +} + +func (m *oci1Manifest) SetSubject(d *descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Manifest.Subject = d + return m.updateDesc() +} + +func (m *oci1Index) SetSubject(d *descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Index.Subject = d + return m.updateDesc() +} + +func (m *oci1Artifact) SetOrig(origIn any) error { + orig, ok := origIn.(v1.ArtifactManifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.OCI1Artifact { + // TODO: error? + orig.MediaType = mediatype.OCI1Artifact + } + m.manifSet = true + m.ArtifactManifest = orig + + return m.updateDesc() +} + +func (m *oci1Manifest) updateDesc() error { + mj, err := json.Marshal(m.Manifest) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1Manifest, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} + +func (m *oci1Index) updateDesc() error { + mj, err := json.Marshal(m.Index) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1ManifestList, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} + +func (m *oci1Artifact) updateDesc() error { + mj, err := json.Marshal(m.ArtifactManifest) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1Artifact, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/mediatype.go b/vendor/github.com/regclient/regclient/types/mediatype.go new file mode 100644 index 000000000..220759f8f --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/mediatype.go @@ -0,0 +1,110 @@ +package types + +import ( + "github.com/regclient/regclient/types/mediatype" +) + +const ( + // MediaTypeDocker1Manifest deprecated media type for docker schema1 manifests. + // + // Deprecated: replace with [mediatype.Docker1Manifest]. + //go:fix inline + MediaTypeDocker1Manifest = mediatype.Docker1Manifest + // MediaTypeDocker1ManifestSigned is a deprecated schema1 manifest with jws signing. + // + // Deprecated: replace with [mediatype.Docker1ManifestSigned]. + //go:fix inline + MediaTypeDocker1ManifestSigned = mediatype.Docker1ManifestSigned + // MediaTypeDocker2Manifest is the media type when pulling manifests from a v2 registry. + // + // Deprecated: replace with [mediatype.Docker2Manifest]. + //go:fix inline + MediaTypeDocker2Manifest = mediatype.Docker2Manifest + // MediaTypeDocker2ManifestList is the media type when pulling a manifest list from a v2 registry. + // + // Deprecated: replace with [mediatype.Docker2ManifestList]. + //go:fix inline + MediaTypeDocker2ManifestList = mediatype.Docker2ManifestList + // MediaTypeDocker2ImageConfig is for the configuration json object media type. + // + // Deprecated: replace with [mediatype.Docker2ImageConfig]. + //go:fix inline + MediaTypeDocker2ImageConfig = mediatype.Docker2ImageConfig + // MediaTypeOCI1Artifact EXPERIMENTAL OCI v1 artifact media type. + // + // Deprecated: replace with [mediatype.OCI1Artifact]. + //go:fix inline + MediaTypeOCI1Artifact = mediatype.OCI1Artifact + // MediaTypeOCI1Manifest OCI v1 manifest media type. + // + // Deprecated: replace with [mediatype.OCI1Manifest]. + //go:fix inline + MediaTypeOCI1Manifest = mediatype.OCI1Manifest + // MediaTypeOCI1ManifestList OCI v1 manifest list media type. + // + // Deprecated: replace with [mediatype.OCI1ManifestList]. + //go:fix inline + MediaTypeOCI1ManifestList = mediatype.OCI1ManifestList + // MediaTypeOCI1ImageConfig OCI v1 configuration json object media type. + // + // Deprecated: replace with [mediatype.OCI1ImageConfig]. + //go:fix inline + MediaTypeOCI1ImageConfig = mediatype.OCI1ImageConfig + // MediaTypeDocker2LayerGzip is the default compressed layer for docker schema2. + // + // Deprecated: replace with [mediatype.Docker2LayerGzip]. + //go:fix inline + MediaTypeDocker2LayerGzip = mediatype.Docker2LayerGzip + // MediaTypeDocker2ForeignLayer is the default compressed layer for foreign layers in docker schema2. + // + // Deprecated: replace with [mediatype.Docker2ForeignLayer]. + //go:fix inline + MediaTypeDocker2ForeignLayer = mediatype.Docker2ForeignLayer + // MediaTypeOCI1Layer is the uncompressed layer for OCIv1. + // + // Deprecated: replace with [mediatype.OCI1Layer]. + //go:fix inline + MediaTypeOCI1Layer = mediatype.OCI1Layer + // MediaTypeOCI1LayerGzip is the gzip compressed layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1LayerGzip]. + //go:fix inline + MediaTypeOCI1LayerGzip = mediatype.OCI1LayerGzip + // MediaTypeOCI1LayerZstd is the zstd compressed layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1LayerZstd]. + //go:fix inline + MediaTypeOCI1LayerZstd = mediatype.OCI1LayerZstd + // MediaTypeOCI1ForeignLayer is the foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayer]. + //go:fix inline + MediaTypeOCI1ForeignLayer = mediatype.OCI1ForeignLayer + // MediaTypeOCI1ForeignLayerGzip is the gzip compressed foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayerGzip]. + //go:fix inline + MediaTypeOCI1ForeignLayerGzip = mediatype.OCI1ForeignLayerGzip + // MediaTypeOCI1ForeignLayerZstd is the zstd compressed foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayerZstd]. + //go:fix inline + MediaTypeOCI1ForeignLayerZstd = mediatype.OCI1ForeignLayerZstd + // MediaTypeOCI1Empty is used for blobs containing the empty JSON data `{}`. + // + // Deprecated: replace with [mediatype.OCI1Empty]. + //go:fix inline + MediaTypeOCI1Empty = mediatype.OCI1Empty + // MediaTypeBuildkitCacheConfig is used by buildkit cache images. + // + // Deprecated: replace with [mediatype.BuildkitCacheConfig]. + //go:fix inline + MediaTypeBuildkitCacheConfig = mediatype.BuildkitCacheConfig +) + +// MediaTypeBase cleans the Content-Type header to return only the lower case base media type. +// +// Deprecated: replace with [mediatype.Base]. +// +//go:fix inline +var MediaTypeBase = mediatype.Base diff --git a/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go b/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go new file mode 100644 index 000000000..b74bb2c21 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go @@ -0,0 +1,65 @@ +// Package mediatype defines well known media types. +package mediatype + +import ( + "regexp" + "strings" +) + +const ( + // Docker1Manifest deprecated media type for docker schema1 manifests. + Docker1Manifest = "application/vnd.docker.distribution.manifest.v1+json" + // Docker1ManifestSigned is a deprecated schema1 manifest with jws signing. + Docker1ManifestSigned = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // Docker2Manifest is the media type when pulling manifests from a v2 registry. + Docker2Manifest = "application/vnd.docker.distribution.manifest.v2+json" + // Docker2ManifestList is the media type when pulling a manifest list from a v2 registry. + Docker2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + // Docker2ImageConfig is for the configuration json object media type. + Docker2ImageConfig = "application/vnd.docker.container.image.v1+json" + // OCI1Artifact EXPERIMENTAL OCI v1 artifact media type. + OCI1Artifact = "application/vnd.oci.artifact.manifest.v1+json" + // OCI1Manifest OCI v1 manifest media type. + OCI1Manifest = "application/vnd.oci.image.manifest.v1+json" + // OCI1ManifestList OCI v1 manifest list media type. + OCI1ManifestList = "application/vnd.oci.image.index.v1+json" + // OCI1ImageConfig OCI v1 configuration json object media type. + OCI1ImageConfig = "application/vnd.oci.image.config.v1+json" + // Docker2Layer is the uncompressed layer for docker schema2. + Docker2Layer = "application/vnd.docker.image.rootfs.diff.tar" + // Docker2LayerGzip is the default compressed layer for docker schema2. + Docker2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // Docker2LayerZstd is the default compressed layer for docker schema2. + Docker2LayerZstd = "application/vnd.docker.image.rootfs.diff.tar.zstd" + // Docker2ForeignLayer is the default compressed layer for foreign layers in docker schema2. + Docker2ForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + // OCI1Layer is the uncompressed layer for OCIv1. + OCI1Layer = "application/vnd.oci.image.layer.v1.tar" + // OCI1LayerGzip is the gzip compressed layer for OCI v1. + OCI1LayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + // OCI1LayerZstd is the zstd compressed layer for OCI v1. + OCI1LayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + // OCI1ForeignLayer is the foreign layer for OCI v1. + OCI1ForeignLayer = "application/vnd.oci.image.layer.nondistributable.v1.tar" + // OCI1ForeignLayerGzip is the gzip compressed foreign layer for OCI v1. + OCI1ForeignLayerGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + // OCI1ForeignLayerZstd is the zstd compressed foreign layer for OCI v1. + OCI1ForeignLayerZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + // OCI1Empty is used for blobs containing the empty JSON data `{}`. + OCI1Empty = "application/vnd.oci.empty.v1+json" + // BuildkitCacheConfig is used by buildkit cache images. + BuildkitCacheConfig = "application/vnd.buildkit.cacheconfig.v0" +) + +// Base cleans the Content-Type header to return only the lower case base media type. +func Base(orig string) string { + base, _, _ := strings.Cut(orig, ";") + return strings.TrimSpace(strings.ToLower(base)) +} + +// Valid returns true if the media type matches the rfc6838 4.2 naming requirements. +func Valid(mt string) bool { + return validateRegexp.MatchString(mt) +} + +var validateRegexp = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9!#$&^_.+-]{0,126}/[A-Za-z0-9][A-Za-z0-9!#$&^_.+-]{0,126}$`) diff --git a/vendor/github.com/regclient/regclient/types/oci/doc.go b/vendor/github.com/regclient/regclient/types/oci/doc.go new file mode 100644 index 000000000..8903fd697 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/doc.go @@ -0,0 +1,18 @@ +// Package oci defiles OCI image-spec types +package oci + +// Contents of this folder refer to types defined at with the following license: + +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go b/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go new file mode 100644 index 000000000..8effe1680 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go @@ -0,0 +1,21 @@ +package v1 + +import "github.com/regclient/regclient/types/descriptor" + +// ArtifactManifest EXPERIMENTAL defines an OCI Artifact +type ArtifactManifest struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // ArtifactType is the media type of the artifact this schema refers to. + ArtifactType string `json:"artifactType,omitempty"` + + // Blobs is a collection of blobs referenced by this manifest. + Blobs []descriptor.Descriptor `json:"blobs,omitempty"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *descriptor.Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the artifact manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/config.go b/vendor/github.com/regclient/regclient/types/oci/v1/config.go new file mode 100644 index 000000000..497eef897 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/config.go @@ -0,0 +1,141 @@ +package v1 + +// Docker specific content in this file is included from +// https://github.com/moby/moby/blob/master/api/types/container/config.go + +import ( + "time" + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/platform" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` + + // StopTimeout is the time in seconds to stop the container. + // This is a Docker specific extension to the config, and not part of the OCI spec. + StopTimeout *int `json:",omitempty"` + + // ArgsEscaped `[Deprecated]` - This field is present only for legacy + // compatibility with Docker and should not be used by new image builders. + // It is used by Docker for Windows images to indicate that the `Entrypoint` + // or `Cmd` or both, contains only a single element array, that is a + // pre-escaped, and combined into a single string `CommandLine`. If `true` + // the value in `Entrypoint` or `Cmd` should be used as-is to avoid double + // escaping. + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` + + // Healthcheck describes how to check if the container is healthy. + // This is a Docker specific extension to the config, and not part of the OCI spec. + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + + // OnBuild lists any ONBUILD steps defined in the Dockerfile. + // This is a Docker specific extension to the config, and not part of the OCI spec. + OnBuild []string `json:"OnBuild,omitempty"` + + // Shell for the shell-form of RUN, CMD, and ENTRYPOINT. + // This is a Docker specific extension to the config, and not part of the OCI spec. + Shell []string `json:"Shell,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + platform.Platform + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitzero"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +// This is a Docker specific extension to the config, and not part of the OCI spec. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/doc.go b/vendor/github.com/regclient/regclient/types/oci/v1/doc.go new file mode 100644 index 000000000..2d198388d --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/doc.go @@ -0,0 +1,18 @@ +// Package v1 defiles version 1 of OCI image-spec types +package v1 + +// Contents of this folder refer to types defined at with the following license: + +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/index.go b/vendor/github.com/regclient/regclient/types/oci/v1/index.go new file mode 100644 index 000000000..887a5c874 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/index.go @@ -0,0 +1,32 @@ +package v1 + +import ( + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/oci" +) + +// IndexSchemaVersion is a pre-configured versioned field for manifests +var IndexSchemaVersion = oci.Versioned{ + SchemaVersion: 2, +} + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + oci.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []descriptor.Descriptor `json:"manifests"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *descriptor.Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/layout.go b/vendor/github.com/regclient/regclient/types/oci/v1/layout.go new file mode 100644 index 000000000..508db6273 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/layout.go @@ -0,0 +1,6 @@ +package v1 + +// ImageLayout is the structure in the "oci-layout" file, found in the root of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go b/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go new file mode 100644 index 000000000..d49c39ae1 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go @@ -0,0 +1,35 @@ +package v1 + +import ( + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/oci" +) + +// ManifestSchemaVersion is a pre-configured versioned field for manifests +var ManifestSchemaVersion = oci.Versioned{ + SchemaVersion: 2, +} + +// Manifest defines an OCI image +type Manifest struct { + oci.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config descriptor.Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []descriptor.Descriptor `json:"layers"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *descriptor.Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/version.go b/vendor/github.com/regclient/regclient/types/oci/version.go new file mode 100644 index 000000000..0040374c2 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/version.go @@ -0,0 +1,8 @@ +// Package oci defines common settings for all OCI types +package oci + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/regclient/regclient/types/ping/ping.go b/vendor/github.com/regclient/regclient/types/ping/ping.go new file mode 100644 index 000000000..9417b4a51 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/ping/ping.go @@ -0,0 +1,13 @@ +// Package ping is used for data types with the Ping methods. +package ping + +import ( + "io/fs" + "net/http" +) + +// Result is the response to a ping request. +type Result struct { + Header http.Header // Header is defined for responses from a registry. + Stat fs.FileInfo // Stat is defined for responses from an ocidir. +} diff --git a/vendor/github.com/regclient/regclient/types/platform/compare.go b/vendor/github.com/regclient/regclient/types/platform/compare.go new file mode 100644 index 000000000..ac38f213b --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/compare.go @@ -0,0 +1,213 @@ +package platform + +import ( + "strconv" + "strings" +) + +type compare struct { + host Platform +} + +type CompareOpts func(*compare) + +// NewCompare is used to compare multiple target entries to a host value. +func NewCompare(host Platform, opts ...CompareOpts) *compare { + (&host).normalize() + c := compare{ + host: host, + } + for _, optFn := range opts { + optFn(&c) + } + return &c +} + +// Better returns true when the target is compatible and a better match than the previous platform. +// The previous platform value may be the zero value when no previous match has been found. +func (c *compare) Better(target, prev Platform) bool { + if !Compatible(c.host, target) { + return false + } + (&target).normalize() + (&prev).normalize() + if prev.OS != target.OS { + if target.OS == c.host.OS { + return true + } else if prev.OS == c.host.OS { + return false + } + } + if prev.Architecture != target.Architecture { + if target.Architecture == c.host.Architecture { + return true + } else if prev.Architecture == c.host.Architecture { + return false + } + } + if prev.Variant != target.Variant { + if target.Variant == c.host.Variant { + return true + } else if prev.Variant == c.host.Variant { + return false + } + pV := variantVer(prev.Variant) + tV := variantVer(target.Variant) + if tV > pV { + return true + } else if tV < pV { + return false + } + } + if prev.OSVersion != target.OSVersion { + if target.OSVersion == c.host.OSVersion { + return true + } else if prev.OSVersion == c.host.OSVersion { + return false + } + cmp := semverCmp(prev.OSVersion, target.OSVersion) + if cmp != 0 { + return cmp < 0 + } + } + return false +} + +// Compatible indicates if a host can run a specified target platform image. +// This accounts for Docker Desktop for Mac and Windows using a Linux VM. +func (c *compare) Compatible(target Platform) bool { + (&target).normalize() + if c.host.OS == "linux" || c.host.OS == "freebsd" { + return c.host.OS == target.OS && c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } else if c.host.OS == "windows" { + if target.OS == "windows" { + return c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) && + osVerCompatible(c.host.OSVersion, target.OSVersion) + } else if target.OS == "linux" { + return c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } + return false + } else if c.host.OS == "darwin" { + return (target.OS == "darwin" || target.OS == "linux") && + c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } else { + return c.host.OS == target.OS && c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) && + c.host.OSVersion == target.OSVersion && + strSliceEq(c.host.OSFeatures, target.OSFeatures) && + strSliceEq(c.host.Features, target.Features) + } +} + +// Match indicates if two platforms are the same. +func (c *compare) Match(target Platform) bool { + (&target).normalize() + if c.host.OS != target.OS { + return false + } + if c.host.OS == "linux" || c.host.OS == "freebsd" { + return c.host.Architecture == target.Architecture && c.host.Variant == target.Variant + } else if c.host.OS == "windows" { + return c.host.Architecture == target.Architecture && c.host.Variant == target.Variant && + osVerSemver(c.host.OSVersion) == osVerSemver(target.OSVersion) + } else { + return c.host.Architecture == target.Architecture && + c.host.Variant == target.Variant && + c.host.OSVersion == target.OSVersion && + strSliceEq(c.host.OSFeatures, target.OSFeatures) && + strSliceEq(c.host.Features, target.Features) + } +} + +// Compatible indicates if a host can run a specified target platform image. +// This accounts for Docker Desktop for Mac and Windows using a Linux VM. +func Compatible(host, target Platform) bool { + comp := NewCompare(host) + return comp.Compatible(target) +} + +// Match indicates if two platforms are the same. +func Match(a, b Platform) bool { + comp := NewCompare(a) + return comp.Match(b) +} + +func osVerCompatible(host, target string) bool { + if host == "" { + return true + } + vHost := osVerSemver(host) + vTarget := osVerSemver(target) + return vHost == vTarget +} + +func osVerSemver(platVer string) string { + verParts := strings.Split(platVer, ".") + if len(verParts) < 4 { + return platVer + } + return strings.Join(verParts[0:3], ".") +} + +// return: -1 if ab +func semverCmp(a, b string) int { + aParts := strings.Split(a, ".") + bParts := strings.Split(b, ".") + for i := range aParts { + if len(bParts) < i+1 { + return 1 + } + aInt, aErr := strconv.Atoi(aParts[i]) + bInt, bErr := strconv.Atoi(bParts[i]) + if aErr != nil { + if bErr != nil { + return 0 + } + return -1 + } + if bErr != nil { + return 1 + } + if aInt < bInt { + return -1 + } + if aInt > bInt { + return 1 + } + } + return 0 +} + +func strSliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func variantCompatible(host, target string) bool { + vHost := variantVer(host) + vTarget := variantVer(target) + if vHost >= vTarget || (vHost == 1 && target == "") || (host == "" && vTarget == 1) { + return true + } + return false +} + +func variantVer(v string) int { + v = strings.TrimPrefix(v, "v") + ver, err := strconv.Atoi(v) + if err != nil { + return 0 + } + return ver +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go new file mode 100644 index 000000000..75fe335a1 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go @@ -0,0 +1,30 @@ +// Related implementations: +// +// +// +// +// + +package platform + +import ( + "runtime" + "sync" +) + +// cpuVariantValue is the variant of the local CPU architecture. +// For example on ARM, v7 and v8. And on AMD64, v1 - v4. +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + switch runtime.GOARCH { + case "amd64", "arm", "arm64": + cpuVariantValue = lookupCPUVariant() + } + }) + return cpuVariantValue +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go new file mode 100644 index 000000000..e0802608f --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go @@ -0,0 +1,82 @@ +//go:build arm || arm64 + +package platform + +import ( + "bufio" + "os" + "runtime" + "strings" +) + +func lookupCPUVariant() string { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + switch runtime.GOARCH { + case "arm64": + return "v8" + case "arm": + return "v7" + } + return "" + } + + variant := getCPUInfo("Cpu architecture") + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model := getCPUInfo("model name") + if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "" + } + + return variant +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string) { + if runtime.GOOS != "linux" { + return "" + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "" + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]) + } + } + return "" +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go new file mode 100644 index 000000000..9623828db --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go @@ -0,0 +1,7 @@ +//go:build !386 && !amd64 && !amd64p32 && !arm && !arm64 + +package platform + +func lookupCPUVariant() string { + return "" +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go new file mode 100644 index 000000000..27e53e06c --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go @@ -0,0 +1,100 @@ +//go:build 386 || amd64 || amd64p32 + +package platform + +const ( + ecx1SSE3 = 0 + ecx1SSSE3 = 9 + ecx1FMA = 12 + ecx1CX16 = 13 + ecx1SSE4_1 = 19 + ecx1SSE4_2 = 20 + ecx1MOVBE = 22 + ecx1POPCNT = 23 + ecx1XSAVE = 26 + ecx1OSXSAVE = 27 + ecx1AVX = 28 + ecx1F16C = 29 + + ebx7BMI1 = 3 + ebx7AVX2 = 5 + ebx7BMI2 = 8 + ebx7AVX512F = 16 + ebx7AVX512DQ = 17 + ebx7AVX512CD = 28 + ebx7AVX512BW = 30 + ebx7AVX512VL = 31 + + ecxxLAHF = 0 + ecxxLZCNT = 5 + + eaxOSXMM = 1 + eaxOSYMM = 2 + eaxOSOpMask = 5 + eaxOSZMMHi16 = 6 + eaxOSZMMHi256 = 7 +) + +var ( + // GOAMD64=v1 (default): The baseline. Exclusively generates instructions that all 64-bit x86 processors can execute. + // GOAMD64=v2: all v1 instructions, plus CX16, LAHF-SAHF, POPCNT, SSE3, SSE4.1, SSE4.2, SSSE3. + // GOAMD64=v3: all v2 instructions, plus AVX, AVX2, BMI1, BMI2, F16C, FMA, LZCNT, MOVBE, OSXSAVE. + // GOAMD64=v4: all v3 instructions, plus AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL. + ecx1FeaturesV2 = bitSet(ecx1CX16) | bitSet(ecx1POPCNT) | bitSet(ecx1SSE3) | bitSet(ecx1SSE4_1) | bitSet(ecx1SSE4_2) | bitSet(ecx1SSSE3) + ecx1FeaturesV3 = ecx1FeaturesV2 | bitSet(ecx1AVX) | bitSet(ecx1F16C) | bitSet(ecx1FMA) | bitSet(ecx1MOVBE) | bitSet(ecx1OSXSAVE) + ebx7FeaturesV3 = bitSet(ebx7AVX2) | bitSet(ebx7BMI1) | bitSet(ebx7BMI2) + ebx7FeaturesV4 = ebx7FeaturesV3 | bitSet(ebx7AVX512F) | bitSet(ebx7AVX512BW) | bitSet(ebx7AVX512CD) | bitSet(ebx7AVX512DQ) | bitSet(ebx7AVX512VL) + ecxxFeaturesV2 = bitSet(ecxxLAHF) + ecxxFeaturesV3 = ecxxFeaturesV2 | bitSet(ecxxLZCNT) + eaxOSFeaturesV3 = bitSet(eaxOSXMM) | bitSet(eaxOSYMM) + eaxOSFeaturesV4 = eaxOSFeaturesV3 | bitSet(eaxOSOpMask) | bitSet(eaxOSZMMHi16) | bitSet(eaxOSZMMHi256) +) + +// cpuid is implemented in cpuinfo_x86.s. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s. +func xgetbv() (eax, edx uint32) + +func lookupCPUVariant() string { + variant := "v1" + maxID, _, _, _ := cpuid(0, 0) + if maxID < 7 { + return variant + } + _, _, ecx1, _ := cpuid(1, 0) + _, ebx7, _, _ := cpuid(7, 0) + maxX, _, _, _ := cpuid(0x80000000, 0) + _, _, ecxx, _ := cpuid(0x80000001, 0) + + if maxX < 0x80000001 || !bitIsSet(ecx1FeaturesV2, ecx1) || !bitIsSet(ecxxFeaturesV2, ecxx) { + return variant + } + variant = "v2" + + if !bitIsSet(ecx1FeaturesV3, ecx1) || !bitIsSet(ebx7FeaturesV3, ebx7) || !bitIsSet(ecxxFeaturesV3, ecxx) { + return variant + } + // For XGETBV, OSXSAVE bit is required and verified by ecx1FeaturesV3. + eaxOS, _ := xgetbv() + if !bitIsSet(eaxOSFeaturesV3, eaxOS) { + return variant + } + variant = "v3" + + // Darwin support for AVX-512 appears to have issues. + if isDarwin || !bitIsSet(ebx7FeaturesV4, ebx7) || !bitIsSet(eaxOSFeaturesV4, eaxOS) { + return variant + } + variant = "v4" + + return variant +} + +func bitSet(bitpos uint) uint32 { + return 1 << bitpos +} + +func bitIsSet(bits, value uint32) bool { + return (value & bits) == bits +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s new file mode 100644 index 000000000..7d7ba33ef --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +#include "textflag.h" + +// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), NOSPLIT, $0-24 + MOVL eaxArg+0(FP), AX + MOVL ecxArg+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv() (eax, edx uint32) +TEXT ·xgetbv(SB),NOSPLIT,$0-8 + MOVL $0, CX + XGETBV + MOVL AX, eax+0(FP) + MOVL DX, edx+4(FP) + RET diff --git a/vendor/github.com/regclient/regclient/types/platform/os_darwin.go b/vendor/github.com/regclient/regclient/types/platform/os_darwin.go new file mode 100644 index 000000000..bcd887752 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/os_darwin.go @@ -0,0 +1,7 @@ +//go:build darwin + +package platform + +const isDarwin = true + +var _ = isDarwin diff --git a/vendor/github.com/regclient/regclient/types/platform/os_other.go b/vendor/github.com/regclient/regclient/types/platform/os_other.go new file mode 100644 index 000000000..56796a605 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/os_other.go @@ -0,0 +1,7 @@ +//go:build !darwin + +package platform + +const isDarwin = false + +var _ = isDarwin diff --git a/vendor/github.com/regclient/regclient/types/platform/platform.go b/vendor/github.com/regclient/regclient/types/platform/platform.go new file mode 100644 index 000000000..7cf746e8e --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/platform.go @@ -0,0 +1,183 @@ +// Package platform handles the parsing and comparing of the image platform (e.g. linux/amd64) +package platform + +// Some of the code in the package and all of the inspiration for this comes from . +// Their license is included here: +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "fmt" + "path" + "regexp" + "strings" + + "github.com/regclient/regclient/internal/strparse" + "github.com/regclient/regclient/types/errs" +) + +var partRE = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) + +// Platform specifies a platform where a particular image manifest is applicable. +type Platform struct { + // Architecture field specifies the CPU architecture, for example `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// String outputs the platform in the // notation +func (p Platform) String() string { + (&p).normalize() + if p.OS == "" { + return "unknown" + } else { + return path.Join(p.OS, p.Architecture, p.Variant) + } +} + +// Parse converts a platform string into a struct +func Parse(platStr string) (Platform, error) { + // args are a regclient specific way to extend the platform string + platArgs := strings.SplitN(platStr, ",", 2) + // split on slash, validate each component + platSplit := strings.Split(platArgs[0], "/") + for i, part := range platSplit { + if !partRE.MatchString(part) { + return Platform{}, fmt.Errorf("invalid platform component %s in %s%.0w", part, platStr, errs.ErrParsingFailed) + } + platSplit[i] = strings.ToLower(part) + } + plat := &Platform{} + if len(platSplit) == 1 && knownArch(platSplit[0]) { + // special case of architecture only + plat.Architecture = platSplit[0] + } else if len(platSplit) >= 1 { + plat.OS = platSplit[0] + } + if len(platSplit) >= 2 { + plat.Architecture = platSplit[1] + } + if len(platSplit) >= 3 { + plat.Variant = platSplit[2] + } + if len(platArgs) > 1 { + kvMap, err := strparse.SplitCSKV(platArgs[1]) + if err != nil { + return Platform{}, fmt.Errorf("failed to split platform args in %s: %w", platStr, err) + } + for k, v := range kvMap { + k := strings.TrimSpace(k) + v := strings.TrimSpace(v) + switch strings.ToLower(k) { + case "osver", "osversion": + plat.OSVersion = v + default: + return Platform{}, fmt.Errorf("unsupported platform arg type, %s in %s%.0w", k, platStr, errs.ErrParsingFailed) + } + } + } + // gather local platform details + platLocal := Local() + // normalize and extrapolate missing fields + if platStr == "local" { + *plat = platLocal + } else if plat.OS == "local" || plat.OS == "" { + plat.OS = platLocal.OS + } + plat.normalize() + switch plat.OS { + case "linux", "darwin", "windows": + // expand short references to local platform with architecture and variant + if Compatible(Platform{OS: platLocal.OS}, Platform{OS: plat.OS}) && len(platSplit) < 2 { + if plat.Architecture == "" { + plat.Architecture = platLocal.Architecture + } + if plat.Architecture == platLocal.Architecture && plat.Variant == "" { + plat.Variant = platLocal.Variant + } + } + } + if plat.OS == "windows" && plat.OS == platLocal.OS && plat.Architecture == platLocal.Architecture && variantCompatible(platLocal.Variant, plat.Variant) && plat.OSVersion == "" { + plat.OSVersion = platLocal.OSVersion + } + + return *plat, nil +} + +// knownArch is a list of known architectures that can be parsed without the OS field. +// Otherwise the OS is required. +func knownArch(arch string) bool { + switch arch { + case "386", "amd64", "i386", "x86_64", "x86-64", + "arm", "armhf", "armel", "arm64", "aarch64", + "mips", "mips64", "mips64le", + "ppc", "ppc64", "ppc64le", + "loong64", + "riscv", "riscv64", + "s390", "s390x", + "sparc", "sparc64", + "wasm": + return true + } + return false +} + +func (p *Platform) normalize() { + switch p.OS { + case "macos": + p.OS = "darwin" + } + switch p.Architecture { + case "i386": + p.Architecture = "386" + p.Variant = "" + case "x86_64", "x86-64", "amd64": + p.Architecture = "amd64" + if p.Variant == "v1" { + p.Variant = "" + } + case "aarch64", "arm64": + p.Architecture = "arm64" + switch p.Variant { + case "8", "v8": + p.Variant = "" + } + case "armhf": + p.Architecture = "arm" + p.Variant = "v7" + case "armel": + p.Architecture = "arm" + p.Variant = "v6" + case "arm": + switch p.Variant { + case "", "7": + p.Variant = "v7" + case "5", "6", "8": + p.Variant = "v" + p.Variant + } + } +} diff --git a/vendor/github.com/regclient/regclient/types/platform/platform_other.go b/vendor/github.com/regclient/regclient/types/platform/platform_other.go new file mode 100644 index 000000000..1bb9b5d0b --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/platform_other.go @@ -0,0 +1,16 @@ +//go:build !windows + +package platform + +import "runtime" + +// Local retrieves the local platform details +func Local() Platform { + plat := Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + Variant: cpuVariant(), + } + plat.normalize() + return plat +} diff --git a/vendor/github.com/regclient/regclient/types/platform/platform_windows.go b/vendor/github.com/regclient/regclient/types/platform/platform_windows.go new file mode 100644 index 000000000..b2b491182 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/platform_windows.go @@ -0,0 +1,23 @@ +//go:build windows + +package platform + +import ( + "fmt" + "runtime" + + "golang.org/x/sys/windows" +) + +// Local retrieves the local platform details +func Local() Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + plat := Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + Variant: cpuVariant(), + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + } + plat.normalize() + return plat +} diff --git a/vendor/github.com/regclient/regclient/types/ratelimit.go b/vendor/github.com/regclient/regclient/types/ratelimit.go new file mode 100644 index 000000000..3ecbd608f --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/ratelimit.go @@ -0,0 +1,8 @@ +package types + +// RateLimit is returned from some http requests +type RateLimit struct { + Remain, Limit, Reset int + Set bool + Policies []string +} diff --git a/vendor/github.com/regclient/regclient/types/ref/ref.go b/vendor/github.com/regclient/regclient/types/ref/ref.go new file mode 100644 index 000000000..91f6f211b --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/ref/ref.go @@ -0,0 +1,316 @@ +// Package ref is used to define references. +// References default to remote registry references (registry:port/repo:tag). +// Schemes can be included in front of the reference for different reference types. +package ref + +import ( + "fmt" + "path" + "regexp" + "strings" + + "github.com/regclient/regclient/types/errs" +) + +const ( + dockerLibrary = "library" + // dockerRegistry is the name resolved in docker images on Hub. + dockerRegistry = "docker.io" + // dockerRegistryLegacy is the name resolved in docker images on Hub. + dockerRegistryLegacy = "index.docker.io" + // dockerRegistryDNS is the host to connect to for Hub. + dockerRegistryDNS = "registry-1.docker.io" +) + +var ( + hostPartS = `(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)` + portS = `(?:` + regexp.QuoteMeta(`:`) + `[0-9]+)` + ipv6PartS = `(?:[0-9a-fA-F]{1,4}:){0,7}[0-9a-fA-F]{1,4}` + ipv6S = `(?:` + regexp.QuoteMeta(`[`) + `(?:` + + ipv6PartS + `|` + // uncompressed + regexp.QuoteMeta(`::`) + ipv6PartS + `|` + // prefix compressed + ipv6PartS + regexp.QuoteMeta(`::`) + ipv6PartS + `|` + // middle compressed + ipv6PartS + regexp.QuoteMeta(`::`) + // suffix compressed + `)` + regexp.QuoteMeta(`]`) + `)` + localhostS = `localhost` + hostDomainS = `(?:` + hostPartS + `(?:(?:` + regexp.QuoteMeta(`.`) + hostPartS + `)+` + regexp.QuoteMeta(`.`) + `?|` + regexp.QuoteMeta(`.`) + `))` + hostUpperS = `(?:[a-zA-Z0-9]*[A-Z][a-zA-Z0-9-]*[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[A-Z][a-zA-Z0-9]*)` + registryS = `(?:` + + `(?:` + hostDomainS + `|` + hostUpperS + `|` + ipv6S + `|` + localhostS + `)` + portS + `?|` + // name with dotted domain, upper case, or IPv6 with optional port + hostPartS + portS + // a short name with required port + `)` + repoPartS = `[a-z0-9]+(?:(?:\.|_|__|-+)[a-z0-9]+)*` + pathS = `[/a-zA-Z0-9_\-. ~\+]+` + tagS = `[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127}` + digestS = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + schemeRE = regexp.MustCompile(`^([a-z]+)://(.+)$`) + registryRE = regexp.MustCompile(`^(` + registryS + `)$`) + refRE = regexp.MustCompile(`^(?:(` + registryS + `)` + regexp.QuoteMeta(`/`) + `)?` + + `(` + repoPartS + `(?:` + regexp.QuoteMeta(`/`) + repoPartS + `)*)` + + `(?:` + regexp.QuoteMeta(`:`) + `(` + tagS + `))?` + + `(?:` + regexp.QuoteMeta(`@`) + `(` + digestS + `))?$`) + ocidirRE = regexp.MustCompile(`^(` + pathS + `)` + + `(?:` + regexp.QuoteMeta(`:`) + `(` + tagS + `))?` + + `(?:` + regexp.QuoteMeta(`@`) + `(` + digestS + `))?$`) +) + +// Ref is a reference to a registry/repository. +// Direct access to the contents of this struct should not be assumed. +type Ref struct { + Scheme string // Scheme is the type of reference, "reg" or "ocidir". + Reference string // Reference is the unparsed string or common name. + Registry string // Registry is the server for the "reg" scheme. + Repository string // Repository is the path on the registry for the "reg" scheme. + Tag string // Tag is a mutable tag for a reference. + Digest string // Digest is an immutable hash for a reference. + Path string // Path is the directory of the OCI Layout for "ocidir". +} + +// New returns a reference based on the scheme (defaulting to "reg"). +func New(parse string) (Ref, error) { + scheme := "" + tail := parse + matchScheme := schemeRE.FindStringSubmatch(parse) + if len(matchScheme) == 3 { + scheme = matchScheme[1] + tail = matchScheme[2] + } + ret := Ref{ + Scheme: scheme, + Reference: parse, + } + switch scheme { + case "": + ret.Scheme = "reg" + matchRef := refRE.FindStringSubmatch(tail) + if len(matchRef) < 5 { + if refRE.FindStringSubmatch(strings.ToLower(tail)) != nil { + return Ref{}, fmt.Errorf("%w \"%s\", repo must be lowercase", errs.ErrInvalidReference, tail) + } + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrInvalidReference, tail) + } + ret.Registry = matchRef[1] + ret.Repository = matchRef[2] + ret.Tag = matchRef[3] + ret.Digest = matchRef[4] + + // handle localhost use case since it matches the regex for a repo path entry + repoPath := strings.Split(ret.Repository, "/") + if ret.Registry == "" && repoPath[0] == "localhost" { + ret.Registry = repoPath[0] + ret.Repository = strings.Join(repoPath[1:], "/") + } + switch ret.Registry { + case "", dockerRegistryDNS, dockerRegistryLegacy: + ret.Registry = dockerRegistry + } + if ret.Registry == dockerRegistry && !strings.Contains(ret.Repository, "/") { + ret.Repository = dockerLibrary + "/" + ret.Repository + } + if ret.Tag == "" && ret.Digest == "" { + ret.Tag = "latest" + } + if ret.Repository == "" { + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrInvalidReference, tail) + } + + case "ocidir", "ocifile": + matchPath := ocidirRE.FindStringSubmatch(tail) + if len(matchPath) < 2 || matchPath[1] == "" { + return Ref{}, fmt.Errorf("%w, invalid path for scheme \"%s\": %s", errs.ErrInvalidReference, scheme, tail) + } + ret.Path = matchPath[1] + if len(matchPath) > 2 && matchPath[2] != "" { + ret.Tag = matchPath[2] + } + if len(matchPath) > 3 && matchPath[3] != "" { + ret.Digest = matchPath[3] + } + + default: + return Ref{}, fmt.Errorf("%w, unknown scheme \"%s\" in \"%s\"", errs.ErrInvalidReference, scheme, parse) + } + return ret, nil +} + +// NewHost returns a Reg for a registry hostname or equivalent. +// The ocidir schema equivalent is the path. +func NewHost(parse string) (Ref, error) { + scheme := "" + tail := parse + matchScheme := schemeRE.FindStringSubmatch(parse) + if len(matchScheme) == 3 { + scheme = matchScheme[1] + tail = matchScheme[2] + } + ret := Ref{ + Scheme: scheme, + } + + switch scheme { + case "": + ret.Scheme = "reg" + matchReg := registryRE.FindStringSubmatch(tail) + if len(matchReg) < 2 { + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrParsingFailed, tail) + } + ret.Registry = matchReg[1] + if ret.Registry == "" { + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrParsingFailed, tail) + } + + case "ocidir", "ocifile": + matchPath := ocidirRE.FindStringSubmatch(tail) + if len(matchPath) < 2 || matchPath[1] == "" { + return Ref{}, fmt.Errorf("%w, invalid path for scheme \"%s\": %s", errs.ErrParsingFailed, scheme, tail) + } + ret.Path = matchPath[1] + + default: + return Ref{}, fmt.Errorf("%w, unknown scheme \"%s\" in \"%s\"", errs.ErrParsingFailed, scheme, parse) + } + return ret, nil +} + +// AddDigest returns a ref with the requested digest set. +// The tag will NOT be unset and the reference value will be reset. +func (r Ref) AddDigest(digest string) Ref { + r.Digest = digest + r.Reference = r.CommonName() + return r +} + +// CommonName outputs a parsable name from a reference. +func (r Ref) CommonName() string { + cn := "" + switch r.Scheme { + case "reg": + if r.Registry != "" { + cn = r.Registry + "/" + } + if r.Repository == "" { + return "" + } + cn = cn + r.Repository + if r.Tag != "" { + cn = cn + ":" + r.Tag + } + if r.Digest != "" { + cn = cn + "@" + r.Digest + } + case "ocidir": + cn = fmt.Sprintf("ocidir://%s", r.Path) + if r.Tag != "" { + cn = cn + ":" + r.Tag + } + if r.Digest != "" { + cn = cn + "@" + r.Digest + } + } + return cn +} + +// IsSet returns true if needed values are defined for a specific reference. +func (r Ref) IsSet() bool { + if !r.IsSetRepo() { + return false + } + // Registry requires a tag or digest, OCI Layout doesn't require these. + if r.Scheme == "reg" && r.Tag == "" && r.Digest == "" { + return false + } + return true +} + +// IsSetRepo returns true when the ref includes values for a specific repository. +func (r Ref) IsSetRepo() bool { + switch r.Scheme { + case "reg": + if r.Registry != "" && r.Repository != "" { + return true + } + case "ocidir": + if r.Path != "" { + return true + } + } + return false +} + +// IsZero returns true if ref is unset. +func (r Ref) IsZero() bool { + if r.Scheme == "" && r.Registry == "" && r.Repository == "" && r.Path == "" && r.Tag == "" && r.Digest == "" { + return true + } + return false +} + +// SetDigest returns a ref with the requested digest set. +// The tag will be unset and the reference value will be reset. +func (r Ref) SetDigest(digest string) Ref { + r.Digest = digest + r.Tag = "" + r.Reference = r.CommonName() + return r +} + +// SetTag returns a ref with the requested tag set. +// The digest will be unset and the reference value will be reset. +func (r Ref) SetTag(tag string) Ref { + r.Tag = tag + r.Digest = "" + r.Reference = r.CommonName() + return r +} + +// ToReg converts a reference to a registry like syntax. +func (r Ref) ToReg() Ref { + switch r.Scheme { + case "ocidir": + r.Scheme = "reg" + r.Registry = "localhost" + // clean the path to strip leading ".." + r.Repository = path.Clean("/" + r.Path)[1:] + r.Repository = strings.ToLower(r.Repository) + // convert any unsupported characters to "-" in the path + re := regexp.MustCompile(`[^/a-z0-9]+`) + r.Repository = string(re.ReplaceAll([]byte(r.Repository), []byte("-"))) + } + return r +} + +// EqualRegistry compares the registry between two references. +func EqualRegistry(a, b Ref) bool { + if a.Scheme != b.Scheme { + return false + } + switch a.Scheme { + case "reg": + return a.Registry == b.Registry + case "ocidir": + return a.Path == b.Path + case "": + // both undefined + return true + default: + return false + } +} + +// EqualRepository compares the repository between two references. +func EqualRepository(a, b Ref) bool { + if a.Scheme != b.Scheme { + return false + } + switch a.Scheme { + case "reg": + return a.Registry == b.Registry && a.Repository == b.Repository + case "ocidir": + return a.Path == b.Path + case "": + // both undefined + return true + default: + return false + } +} diff --git a/vendor/github.com/regclient/regclient/types/referrer/referrer.go b/vendor/github.com/regclient/regclient/types/referrer/referrer.go new file mode 100644 index 000000000..03a6be430 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/referrer/referrer.go @@ -0,0 +1,160 @@ +// Package referrer is used for responses to the referrers to a manifest +package referrer + +import ( + "bytes" + "fmt" + "regexp" + "slices" + "sort" + "text/tabwriter" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" +) + +// ReferrerList contains the response to a request for referrers to a subject +type ReferrerList struct { + Subject ref.Ref `json:"subject"` // subject queried + Source ref.Ref `json:"source"` // source for referrers, if different from subject + Descriptors []descriptor.Descriptor `json:"descriptors"` // descriptors found in Index + Annotations map[string]string `json:"annotations,omitempty"` // annotations extracted from Index + Manifest manifest.Manifest `json:"-"` // returned OCI Index + Tags []string `json:"-"` // tags matched when fetching referrers +} + +// Add appends an entry to rl.Manifest, used to modify the client managed Index +func (rl *ReferrerList) Add(m manifest.Manifest) error { + rlM, ok := rl.Manifest.GetOrig().(v1.Index) + if !ok { + return fmt.Errorf("referrer list manifest is not an OCI index for %s", rl.Subject.CommonName()) + } + // if entry already exists, return + mDesc := m.GetDescriptor() + for _, d := range rlM.Manifests { + if d.Digest == mDesc.Digest { + return nil + } + } + // update descriptor, pulling up artifact type and annotations + switch mOrig := m.GetOrig().(type) { + case v1.ArtifactManifest: + mDesc.Annotations = mOrig.Annotations + mDesc.ArtifactType = mOrig.ArtifactType + case v1.Manifest: + mDesc.Annotations = mOrig.Annotations + if mOrig.ArtifactType != "" { + mDesc.ArtifactType = mOrig.ArtifactType + } else { + mDesc.ArtifactType = mOrig.Config.MediaType + } + case v1.Index: + mDesc.Annotations = mOrig.Annotations + mDesc.ArtifactType = mOrig.ArtifactType + default: + // other types are not supported + return fmt.Errorf("invalid manifest for referrer \"%t\": %w", m.GetOrig(), errs.ErrUnsupportedMediaType) + } + // append descriptor to index + rlM.Manifests = append(rlM.Manifests, mDesc) + rl.Descriptors = rlM.Manifests + err := rl.Manifest.SetOrig(rlM) + if err != nil { + return err + } + return nil +} + +// Delete removes an entry from rl.Manifest, used to modify the client managed Index +func (rl *ReferrerList) Delete(m manifest.Manifest) error { + rlM, ok := rl.Manifest.GetOrig().(v1.Index) + if !ok { + return fmt.Errorf("referrer list manifest is not an OCI index for %s", rl.Subject.CommonName()) + } + // delete matching entries from the list + mDesc := m.GetDescriptor() + found := false + for i := len(rlM.Manifests) - 1; i >= 0; i-- { + if rlM.Manifests[i].Digest == mDesc.Digest { + rlM.Manifests = slices.Delete(rlM.Manifests, i, i+1) + found = true + } + } + if !found { + return fmt.Errorf("subject not found in referrer list%.0w", errs.ErrNotFound) + } + rl.Descriptors = rlM.Manifests + err := rl.Manifest.SetOrig(rlM) + if err != nil { + return err + } + return nil +} + +// IsEmpty reports if the returned Index contains no manifests +func (rl ReferrerList) IsEmpty() bool { + rlM, ok := rl.Manifest.GetOrig().(v1.Index) + if !ok || len(rlM.Manifests) == 0 { + return true + } + return false +} + +// MarshalPretty is used for printPretty template formatting +func (rl ReferrerList) MarshalPretty() ([]byte, error) { + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + var rRef ref.Ref + if rl.Subject.IsSet() { + rRef = rl.Subject + fmt.Fprintf(tw, "Subject:\t%s\n", rl.Subject.CommonName()) + } + if rl.Source.IsSet() { + rRef = rl.Source + fmt.Fprintf(tw, "Source:\t%s\n", rl.Source.CommonName()) + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Referrers:\t\n") + for _, d := range rl.Descriptors { + fmt.Fprintf(tw, "\t\n") + if rRef.IsSet() { + fmt.Fprintf(tw, " Name:\t%s\n", rRef.SetDigest(d.Digest.String()).CommonName()) + } + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + if len(rl.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(rl.Annotations)) + for k := range rl.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := rl.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + err := tw.Flush() + return buf.Bytes(), err +} + +// FallbackTag returns the ref that should be used when the registry does not support the referrers API +func FallbackTag(r ref.Ref) (ref.Ref, error) { + dig, err := digest.Parse(r.Digest) + if err != nil { + return r, fmt.Errorf("failed to parse digest for referrers: %w", err) + } + replaceRE := regexp.MustCompile(`[^a-zA-Z0-9._-]`) + algo := replaceRE.ReplaceAllString(string(dig.Algorithm()), "-") + hash := replaceRE.ReplaceAllString(string(dig.Hex()), "-") + rOut := r.SetTag(fmt.Sprintf("%.32s-%.64s", algo, hash)) + return rOut, nil +} diff --git a/vendor/github.com/regclient/regclient/types/repo/repolist.go b/vendor/github.com/regclient/regclient/types/repo/repolist.go new file mode 100644 index 000000000..0bdbb5ce4 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/repo/repolist.go @@ -0,0 +1,136 @@ +// Package repo handles a list of repositories from a registry +package repo + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + + "github.com/regclient/regclient/types/errs" +) + +// RepoList is the response for a repository listing. +type RepoList struct { + repoCommon + RepoRegistryList +} + +type repoCommon struct { + host string + mt string + orig any + rawHeader http.Header + rawBody []byte +} + +type repoConfig struct { + host string + mt string + raw []byte + header http.Header +} + +type Opts func(*repoConfig) + +// New is used to create a repository listing. +func New(opts ...Opts) (*RepoList, error) { + conf := repoConfig{ + mt: "application/json", + } + for _, opt := range opts { + opt(&conf) + } + rl := RepoList{} + rc := repoCommon{ + mt: conf.mt, + rawHeader: conf.header, + rawBody: conf.raw, + host: conf.host, + } + + mt := strings.Split(conf.mt, ";")[0] // "application/json; charset=utf-8" -> "application/json" + switch mt { + case "application/json", "text/plain": + err := json.Unmarshal(conf.raw, &rl) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("%w: media type: %s, hostname: %s", errs.ErrUnsupportedMediaType, conf.mt, conf.host) + } + + rl.repoCommon = rc + return &rl, nil +} + +func WithHeaders(header http.Header) Opts { + return func(c *repoConfig) { + c.header = header + } +} + +func WithHost(host string) Opts { + return func(c *repoConfig) { + c.host = host + } +} + +func WithMT(mt string) Opts { + return func(c *repoConfig) { + c.mt = mt + } +} + +func WithRaw(raw []byte) Opts { + return func(c *repoConfig) { + c.raw = raw + } +} + +// RepoRegistryList is a list of repositories from the _catalog API +type RepoRegistryList struct { + Repositories []string `json:"repositories"` +} + +func (r repoCommon) GetOrig() any { + return r.orig +} + +func (r repoCommon) MarshalJSON() ([]byte, error) { + if len(r.rawBody) > 0 { + return r.rawBody, nil + } + + if r.orig != nil { + return json.Marshal((r.orig)) + } + return []byte{}, fmt.Errorf("JSON marshalling failed: %w", errs.ErrNotFound) +} + +func (r repoCommon) RawBody() ([]byte, error) { + return r.rawBody, nil +} + +func (r repoCommon) RawHeaders() (http.Header, error) { + return r.rawHeader, nil +} + +// GetRepos returns the repositories +func (rl RepoRegistryList) GetRepos() ([]string, error) { + return rl.Repositories, nil +} + +// MarshalPretty is used for printPretty template formatting +func (rl RepoRegistryList) MarshalPretty() ([]byte, error) { + sort.Slice(rl.Repositories, func(i, j int) bool { + return strings.Compare(rl.Repositories[i], rl.Repositories[j]) < 0 + }) + buf := &bytes.Buffer{} + for _, tag := range rl.Repositories { + fmt.Fprintf(buf, "%s\n", tag) + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/regclient/regclient/types/slog.go b/vendor/github.com/regclient/regclient/types/slog.go new file mode 100644 index 000000000..99a4f5206 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/slog.go @@ -0,0 +1,8 @@ +package types + +import "log/slog" + +const ( + // LevelTrace is used for tracing network requests. + LevelTrace = slog.LevelDebug - 4 +) diff --git a/vendor/github.com/regclient/regclient/types/tag/gcrlist.go b/vendor/github.com/regclient/regclient/types/tag/gcrlist.go new file mode 100644 index 000000000..beec3bd93 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/tag/gcrlist.go @@ -0,0 +1,98 @@ +// Contents in this file are from github.com/google/go-containerregistry + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import ( + "encoding/json" + "strconv" + "time" +) + +type gcrRawManifestInfo struct { + Size string `json:"imageSizeBytes"` + MediaType string `json:"mediaType"` + Created string `json:"timeCreatedMs"` + Uploaded string `json:"timeUploadedMs"` + Tags []string `json:"tag"` +} + +// GCRManifestInfo is a Manifests entry is the output of List and Walk. +type GCRManifestInfo struct { + Size uint64 `json:"imageSizeBytes"` + MediaType string `json:"mediaType"` + Created time.Time `json:"timeCreatedMs"` + Uploaded time.Time `json:"timeUploadedMs"` + Tags []string `json:"tag"` +} + +func fromUnixMs(ms int64) time.Time { + sec := ms / 1000 + ns := (ms % 1000) * 1000000 + return time.Unix(sec, ns) +} + +func toUnixMs(t time.Time) string { + return strconv.FormatInt(t.UnixNano()/1000000, 10) +} + +// MarshalJSON implements json.Marshaler +func (m GCRManifestInfo) MarshalJSON() ([]byte, error) { + return json.Marshal(gcrRawManifestInfo{ + Size: strconv.FormatUint(m.Size, 10), + MediaType: m.MediaType, + Created: toUnixMs(m.Created), + Uploaded: toUnixMs(m.Uploaded), + Tags: m.Tags, + }) +} + +// UnmarshalJSON implements json.Unmarshaler +func (m *GCRManifestInfo) UnmarshalJSON(data []byte) error { + raw := gcrRawManifestInfo{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Size != "" { + size, err := strconv.ParseUint(raw.Size, 10, 64) + if err != nil { + return err + } + m.Size = size + } + + if raw.Created != "" { + created, err := strconv.ParseInt(raw.Created, 10, 64) + if err != nil { + return err + } + m.Created = fromUnixMs(created) + } + + if raw.Uploaded != "" { + uploaded, err := strconv.ParseInt(raw.Uploaded, 10, 64) + if err != nil { + return err + } + m.Uploaded = fromUnixMs(uploaded) + } + + m.MediaType = raw.MediaType + m.Tags = raw.Tags + + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/tag/tag.go b/vendor/github.com/regclient/regclient/types/tag/tag.go new file mode 100644 index 000000000..e76a98528 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/tag/tag.go @@ -0,0 +1,2 @@ +// Package tag is used for wrapping tag lists +package tag diff --git a/vendor/github.com/regclient/regclient/types/tag/taglist.go b/vendor/github.com/regclient/regclient/types/tag/taglist.go new file mode 100644 index 000000000..5aec60f5c --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/tag/taglist.go @@ -0,0 +1,254 @@ +package tag + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "maps" + "net/http" + "net/url" + "sort" + "strings" + + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + ociv1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" +) + +// List contains a tag list. +// Currently this is a struct but the underlying type could be changed to an interface in the future. +// Using methods is recommended over directly accessing fields. +type List struct { + tagCommon + DockerList + GCRList + LayoutList +} + +type tagCommon struct { + r ref.Ref + mt string + orig any + rawHeader http.Header + rawBody []byte + url *url.URL +} + +// DockerList is returned from registry/2.0 API's. +type DockerList struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GCRList fields are from gcr.io. +type GCRList struct { + Children []string `json:"child,omitempty"` + Manifests map[string]GCRManifestInfo `json:"manifest,omitempty"` +} + +// LayoutList includes the OCI Index from an OCI Layout. +type LayoutList struct { + Index ociv1.Index +} + +type tagConfig struct { + ref ref.Ref + mt string + raw []byte + header http.Header + index ociv1.Index + tags []string + url *url.URL +} + +// Opts defines options for creating a new tag. +type Opts func(*tagConfig) + +// New creates a tag list from options. +// Tags may be provided directly, or they will be parsed from the raw input based on the media type. +func New(opts ...Opts) (*List, error) { + conf := tagConfig{} + for _, opt := range opts { + opt(&conf) + } + if conf.mt == "" { + conf.mt = "application/json" + } + tl := List{} + tc := tagCommon{ + r: conf.ref, + mt: conf.mt, + rawHeader: conf.header, + rawBody: conf.raw, + url: conf.url, + } + if len(conf.tags) > 0 { + tl.Tags = conf.tags + } + if conf.index.Manifests != nil { + tl.LayoutList.Index = conf.index + } + if len(conf.raw) > 0 { + mt := mediatype.Base(conf.mt) + switch mt { + case "application/json", "text/plain": + err := json.Unmarshal(conf.raw, &tl) + if err != nil { + return nil, err + } + case mediatype.OCI1ManifestList: + // noop + default: + return nil, fmt.Errorf("%w: media type: %s, reference: %s", errs.ErrUnsupportedMediaType, conf.mt, conf.ref.CommonName()) + } + } + tl.tagCommon = tc + + return &tl, nil +} + +// WithHeaders includes data from http headers when creating tag list. +func WithHeaders(header http.Header) Opts { + return func(tConf *tagConfig) { + tConf.header = header + } +} + +// WithLayoutIndex include the index from an OCI Layout. +func WithLayoutIndex(index ociv1.Index) Opts { + return func(tConf *tagConfig) { + tConf.index = index + } +} + +// WithMT sets the returned media type on the tag list. +func WithMT(mt string) Opts { + return func(tConf *tagConfig) { + tConf.mt = mt + } +} + +// WithRaw defines the raw response from the tag list request. +func WithRaw(raw []byte) Opts { + return func(tConf *tagConfig) { + tConf.raw = raw + } +} + +// WithRef specifies the reference (repository) associated with the tag list. +func WithRef(ref ref.Ref) Opts { + return func(tConf *tagConfig) { + tConf.ref = ref + } +} + +// WithResp includes the response from an http request. +func WithResp(resp *http.Response) Opts { + return func(tConf *tagConfig) { + if len(tConf.raw) == 0 { + body, err := io.ReadAll(resp.Body) + if err == nil { + tConf.raw = body + } + } + if tConf.header == nil { + tConf.header = resp.Header + } + if tConf.mt == "" && resp.Header != nil { + tConf.mt = resp.Header.Get("Content-Type") + } + if tConf.url == nil { + tConf.url = resp.Request.URL + } + } +} + +// WithTags provides the parsed tags for the tag list. +func WithTags(tags []string) Opts { + return func(tConf *tagConfig) { + tConf.tags = tags + } +} + +// Append extends a tag list with another. +func (l *List) Append(add *List) error { + // verify two lists are compatible + if l.mt != add.mt || !ref.EqualRepository(l.r, add.r) || l.Name != add.Name { + return fmt.Errorf("unable to append, lists are incompatible") + } + if add.orig != nil { + l.orig = add.orig + } + if add.rawBody != nil { + l.rawBody = add.rawBody + } + if add.rawHeader != nil { + l.rawHeader = add.rawHeader + } + if add.url != nil { + l.url = add.url + } + l.Tags = append(l.Tags, add.Tags...) + if add.Children != nil { + l.Children = append(l.Children, add.Children...) + } + if add.Manifests != nil { + if l.Manifests == nil { + l.Manifests = add.Manifests + } else { + maps.Copy(l.Manifests, add.Manifests) + } + } + return nil +} + +// GetOrig returns the underlying tag data structure if defined. +func (t tagCommon) GetOrig() any { + return t.orig +} + +// MarshalJSON returns the tag list in json. +func (t tagCommon) MarshalJSON() ([]byte, error) { + if len(t.rawBody) > 0 { + return t.rawBody, nil + } + + if t.orig != nil { + return json.Marshal((t.orig)) + } + return []byte{}, fmt.Errorf("JSON marshalling failed: %w", errs.ErrNotFound) +} + +// RawBody returns the original tag list response. +func (t tagCommon) RawBody() ([]byte, error) { + return t.rawBody, nil +} + +// RawHeaders returns the received http headers. +func (t tagCommon) RawHeaders() (http.Header, error) { + return t.rawHeader, nil +} + +// GetURL returns the URL of the request. +func (t tagCommon) GetURL() *url.URL { + return t.url +} + +// GetTags returns the tags from a list. +func (tl DockerList) GetTags() ([]string, error) { + return tl.Tags, nil +} + +// MarshalPretty is used for printPretty template formatting. +func (tl DockerList) MarshalPretty() ([]byte, error) { + sort.Slice(tl.Tags, func(i, j int) bool { + return strings.Compare(tl.Tags[i], tl.Tags[j]) < 0 + }) + buf := &bytes.Buffer{} + for _, tag := range tl.Tags { + fmt.Fprintf(buf, "%s\n", tag) + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/regclient/regclient/types/warning/warning.go b/vendor/github.com/regclient/regclient/types/warning/warning.go new file mode 100644 index 000000000..eb0609f4f --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/warning/warning.go @@ -0,0 +1,77 @@ +// Package warning is used to handle HTTP warning headers +package warning + +import ( + "context" + "log/slog" + "slices" + "sync" +) + +type contextKey string + +var key contextKey = "key" + +type Warning struct { + List []string + Hook *func(context.Context, *slog.Logger, string) + mu sync.Mutex +} + +func (w *Warning) Handle(ctx context.Context, slog *slog.Logger, msg string) { + w.mu.Lock() + defer w.mu.Unlock() + if slices.Contains(w.List, msg) { + return + } + w.List = append(w.List, msg) + // handle new warning if hook defined + if w.Hook != nil { + (*w.Hook)(ctx, slog, msg) + } +} + +func NewContext(ctx context.Context, w *Warning) context.Context { + return context.WithValue(ctx, key, w) +} + +func FromContext(ctx context.Context) *Warning { + wAny := ctx.Value(key) + if wAny == nil { + return nil + } + w, ok := wAny.(*Warning) + if !ok { + return nil + } + return w +} + +func NewHook(log *slog.Logger) *func(context.Context, *slog.Logger, string) { + hook := func(_ context.Context, _ *slog.Logger, msg string) { + logMsg(log, msg) + } + return &hook +} + +func DefaultHook() *func(context.Context, *slog.Logger, string) { + hook := func(_ context.Context, slog *slog.Logger, msg string) { + logMsg(slog, msg) + } + return &hook +} + +func Handle(ctx context.Context, slog *slog.Logger, msg string) { + // check for context + if w := FromContext(ctx); w != nil { + w.Handle(ctx, slog, msg) + return + } + + // fallback to log + logMsg(slog, msg) +} + +func logMsg(log *slog.Logger, msg string) { + log.Warn("Registry warning message", slog.String("warning", msg)) +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index eeed1e92b..2fd3c5759 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -143,8 +143,9 @@ type ParseErrorsAllowlist struct { UnknownFlags bool } -// DEPRECATED: please use ParseErrorsAllowlist instead -// This type will be removed in a future release +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. type ParseErrorsWhitelist = ParseErrorsAllowlist // NormalizedName is a flag name that has been normalized according to rules @@ -165,8 +166,9 @@ type FlagSet struct { // ParseErrorsAllowlist is used to configure an allowlist of errors ParseErrorsAllowlist ParseErrorsAllowlist - // DEPRECATED: please use ParseErrorsAllowlist instead - // This field will be removed in a future release + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. ParseErrorsWhitelist ParseErrorsAllowlist name string @@ -1185,7 +1187,7 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - if errors.Is(err, ErrHelp) { + if err == ErrHelp { os.Exit(0) } fmt.Fprintln(f.Output(), err) @@ -1214,7 +1216,7 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: - if errors.Is(err, ErrHelp) { + if err == ErrHelp { os.Exit(0) } fmt.Fprintln(f.Output(), err) diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 000000000..eb3d5f517 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,28 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* + +# file generated by example +example.xz \ No newline at end of file diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 000000000..8a7f0877d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2022 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 000000000..56d49275a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,88 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +## Using the API + +The following example program shows how to use the API. + +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` + +## Documentation + +You can find the full documentation at [pkg.go.dev](https://pkg.go.dev/github.com/ulikunitz/xz). + +## Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + +## Security & Vulnerabilities + +The security policy is documented in [SECURITY.md](SECURITY.md). + +The software is not affected by the supply chain attack on the original xz +implementation, [CVE-2024-3094](https://nvd.nist.gov/vuln/detail/CVE-2024-3094). +This implementation doesn't share any files with the original xz implementation +and no patches or pull requests are accepted without a review. + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 000000000..1bdc88878 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +You can privately report a vulnerability following this +[procedure](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +Alternatively you can create a Github issue at +. + +In both cases expect a response in at least 7 days. + +## Security Advisories + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 000000000..8f9650c13 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,386 @@ +# TODO list + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +4. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### v0.6 + +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) + +## Optimizations + +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) + +### Different match finders + +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + + pointers into a an array) + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + + into an array with bit-steeling for the colors) + +## Release Procedure + +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a ` +* `git push` + +## Log + +## 2025-08-28 + +Release v0.5.14 addresses the security vulnerability CVE-2025-58058. If you put +bytes in from of a LZMA stream, the header might not be read correctly and +memory for the dictionary buffer allocated. I have implemented mitigations for +the problem. + +### 2025-08-20 + +Release v0.5.13 addressed issue #61 regarding handling of multiple WriteClosers +together. So I added a new package xio with a WriteCloserStack to address the +issue. + +### 2024-04-03 + +Release v0.5.12 updates README.md and SECURITY.md to address the supply chain +attack on the original xz implementation. + +### 2022-12-12 + +Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation +returned an error if the dictionary size was less than 4096 byte, but the +recommendation stated the actual used window size should be set to 4096 byte in +that case. It actually was the pull request +[#52](https://github.com/ulikunitz/xz/pull/52). The new patch v0.5.11 will fix +it. + +### 2021-02-02 + +Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The +function allocated a slice of records immediately after reading the value +without further checks. Since the number has been too large the make function +did panic. The fix is to check the number against the expected number of records +before allocating the records. + +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 + +Release v0.5.8 fixes issue +[issue #35](https://github.com/ulikunitz/xz/issues/35). + +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + +### 2019-02-20 + +Release v0.5.6 supports the go.mod file. + +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase. Reader and lzbase. Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma. Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma. Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + + + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 000000000..b30f1ec97 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,79 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + const maxUvarintLen = 10 + + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if i > maxUvarintLen { + return x, i, errOverflowU64 + } + if b < 0x80 { + if i == maxUvarintLen && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + None byte = 0x0 + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case None, CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + None: "None", + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case None: + newHash = newNoneHash + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader, expectedRecordLen int) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + if recLen != expectedRecordLen { + return nil, n, fmt.Errorf( + "xz: index length is %d; want %d", + recLen, expectedRecordLen) + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/vendor/github.com/ulikunitz/xz/fox-check-none.xz new file mode 100644 index 0000000000000000000000000000000000000000..46043f7dc89b610dc3badb9db3426620c4c97462 GIT binary patch literal 96 zcmexsUKJ6=z`*cd=%ynRgCe6CkX@qxbTK1?PDnLRM*R tL9s%9S!$6&2~avGv8qxbB|lw{3#g5Ofzej?!NQIFY(?{`7{LOOQ2>-O93KDx literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 0000000000000000000000000000000000000000..4b820bd5a16e83fe5db4fb315639a4337f862483 GIT binary patch literal 104 zcmexsUKJ6=z`*kC+7>q^21Q0O1_p)_{ill=8FWH2QWXkIGn2Cwl8W-n^AytZD-^Oy za|?dFO$zmVVdxt0+m!4eq- E0K@hlng9R* literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 000000000..dae159db5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 000000000..b4cf8b75e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 000000000..5322342ee --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 000000000..a98983356 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 000000000..f4627ea11 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,456 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 000000000..2b39da6f7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,522 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +/* +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} +*/ + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 000000000..201091709 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,47 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +/* +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} +*/ + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 000000000..9dfdf28b2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 000000000..af41d5b2d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 000000000..f27e31a4a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 000000000..3765484e6 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + // break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 000000000..d5b814f0a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,128 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 000000000..76b713106 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,38 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 000000000..b447d8ec4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,140 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 000000000..e40938318 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 000000000..4b3916eab --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// additional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 0000000000000000000000000000000000000000..5edad633266eb5173a7c39761dc8b9e71efbfe80 GIT binary patch literal 67 zcma!LU}#|Y4+RWbQXGqzRntCtR~%i$`d{za%}WYWYfXMUl6~Q5_UjH?=5CuO0w(I5 UuQ#VXelz{mI_3ZW`W7$%0HEw6g#Z8m literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 000000000..f66e9cdd9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 000000000..34aa097e1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,170 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// Header represents the Header of an LZMA file. +type Header struct { + Properties Properties + DictSize uint32 + // uncompressed Size; negative value if no Size is given + Size int64 +} + +// marshalBinary marshals the header. +func (h *Header) marshalBinary() (data []byte, err error) { + if err = h.Properties.verify(); err != nil { + return nil, err + } + if !(h.DictSize <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.DictSize) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.Properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.DictSize)) + + // uncompressed size + var s uint64 + if h.Size > 0 { + s = uint64(h.Size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *Header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.Properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.DictSize = uint32LE(data[1:]) + if int(h.DictSize) < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.Size = -1 + } else { + h.Size = int64(s) + if h.Size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictSize checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictSize(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +// +// This function should be disregarded because there is no guarantee that LZMA +// files follow the constraints. +func ValidHeader(data []byte) bool { + var h Header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictSize(int(h.DictSize)) { + return false + } + return h.Size < 0 || h.Size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 000000000..081fc840b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop chunkState = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 000000000..1ea5320a0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,115 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 000000000..e4ef5fc59 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,125 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 000000000..02dfb8bf5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 000000000..7b7eddc3d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,55 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 000000000..2feccba11 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 000000000..15b754ccb --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 000000000..4b0fee3ff --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,222 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 000000000..eef6bea76 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,193 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and does not rely on any external +// library. +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + // Since v0.5.14 this parameter sets an upper limit for a .lzma file's + // dictionary size. This helps to mitigate problems with mangled + // headers. + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + // set an upper limit of 2 GiB-1 for dictionary capacity + // to address the zero prefix security issue. + c.DictCap = (1 << 31) - 1 + // original: c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +// +// # Security concerns +// +// Note that LZMA format doesn't support a magic marker in the header. So +// [NewReader] cannot determine whether it reads the actual header. For instance +// the LZMA stream might have a zero byte in front of the reader, leading to +// larger dictionary sizes and file sizes. The code will detect later that there +// are problems with the stream, but the dictionary has already been allocated +// and this might consume a lot of memory. +// +// Version 0.5.14 introduces built-in mitigations: +// +// - The [ReaderConfig] DictCap field is now interpreted as a limit for the +// dictionary size. +// - The default is 2 Gigabytes minus 1 byte (2^31-1 bytes). +// - Users can check with the [Reader.Header] method what the actual values are in +// their LZMA files and set a smaller limit using [ReaderConfig]. +// - The dictionary size doesn't exceed the larger of the file size and +// the minimum dictionary size. This is another measure to prevent huge +// memory allocations for the dictionary. +// - The code supports stream sizes only up to a pebibyte (1024^5). +type Reader struct { + lzma io.Reader + header Header + // headerOrig stores the original header read from the stream. + headerOrig Header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// ErrDictSize reports about an error of the dictionary size. +type ErrDictSize struct { + ConfigDictCap int + HeaderDictSize uint32 + Message string +} + +// Error returns the error message. +func (e *ErrDictSize) Error() string { + return e.Message +} + +func newErrDictSize(messageformat string, + configDictCap int, headerDictSize uint32, + args ...interface{}) *ErrDictSize { + newArgs := make([]interface{}, len(args)+2) + newArgs[0] = configDictCap + newArgs[1] = headerDictSize + copy(newArgs[2:], args) + return &ErrDictSize{ + ConfigDictCap: configDictCap, + HeaderDictSize: headerDictSize, + Message: fmt.Sprintf(messageformat, newArgs...), + } +} + +// We support only files not larger than 1 << 50 bytes (a pebibyte, 1024^5). +const maxStreamSize = 1 << 50 + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.header.unmarshalBinary(data); err != nil { + return nil, err + } + r.headerOrig = r.header + dictSize := int64(r.header.DictSize) + if int64(c.DictCap) < dictSize { + return nil, newErrDictSize( + "lzma: header dictionary size %[2]d exceeds configured dictionary capacity %[1]d", + c.DictCap, uint32(dictSize), + ) + } + if dictSize < MinDictCap { + dictSize = MinDictCap + } + // original code: disabled this because there is no point in increasing + // the dictionary above what is stated in the file. + /* + if int64(c.DictCap) > int64(dictSize) { + dictSize = int64(c.DictCap) + } + */ + size := r.header.Size + if size >= 0 && size < dictSize { + dictSize = size + } + // Protect against modified or malicious headers. + if size > maxStreamSize { + return nil, fmt.Errorf( + "lzma: stream size %d exceeds a pebibyte (1024^5)", + size) + } + if dictSize < MinDictCap { + dictSize = MinDictCap + } + + r.header.DictSize = uint32(dictSize) + + state := newState(r.header.Properties) + dict, err := newDecoderDict(int(dictSize)) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.header.Size) + if err != nil { + return nil, err + } + return r, nil +} + +// Header returns the header as read from the LZMA stream. It is intended to +// allow the user to understand what parameters are typically provided in the +// headers of the LZMA files and set the DictCap field in [ReaderConfig] +// accordingly. +func (r *Reader) Header() (h Header, ok bool) { + return r.headerOrig, r.d != nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 000000000..f36e26505 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,231 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 000000000..34779c513 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,145 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 000000000..36b29b598 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() Header { + h := Header{ + Properties: *c.Properties, + DictSize: uint32(c.DictCap), + Size: -1, + } + if c.SizeInHeader { + h.Size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h Header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.Properties) + m, err := c.Matcher.new(int(w.h.DictSize)) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(int(w.h.DictSize), c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.Size >= 0 { + m := w.h.Size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.Size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.Size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 000000000..97bbafa11 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 000000000..bd5f42ee8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100644 index 000000000..a8c612ce1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go new file mode 100644 index 000000000..6a56a2612 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/none-check.go @@ -0,0 +1,23 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import "hash" + +type noneHash struct{} + +func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } + +func (h noneHash) Sum(b []byte) []byte { return b } + +func (h noneHash) Reset() {} + +func (h noneHash) Size() int { return 0 } + +func (h noneHash) BlockSize() int { return 0 } + +func newNoneHash() hash.Hash { + return &noneHash{} +} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 000000000..bde1412cf --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,359 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz, len(r.index)) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + if br.hash.Size() != 0 { + br.r = io.TeeReader(fr, br.hash) + } else { + br.r = fr + } + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 000000000..f693e0aef --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,399 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) + CheckSum byte + // Forces NoChecksum (default: false) + NoCheckSum bool + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } + if c.NoCheckSum { + c.CheckSum = None + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + if bw.hash.Size() != 0 { + bw.mw = io.MultiWriter(bw.w, bw.hash) + } else { + bw.mw = bw.w + } + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/go.podman.io/image/v5/LICENSE b/vendor/go.podman.io/image/v5/LICENSE deleted file mode 100644 index 953563530..000000000 --- a/vendor/go.podman.io/image/v5/LICENSE +++ /dev/null @@ -1,189 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.podman.io/image/v5/docker/body_reader.go b/vendor/go.podman.io/image/v5/docker/body_reader.go deleted file mode 100644 index 3c612f268..000000000 --- a/vendor/go.podman.io/image/v5/docker/body_reader.go +++ /dev/null @@ -1,253 +0,0 @@ -package docker - -import ( - "context" - "errors" - "fmt" - "io" - "math" - "math/rand/v2" - "net/http" - "net/url" - "strconv" - "strings" - "syscall" - "time" - - "github.com/sirupsen/logrus" -) - -const ( - // bodyReaderMinimumProgress is the minimum progress we consider a good reason to retry - bodyReaderMinimumProgress = 1 * 1024 * 1024 - // bodyReaderMSSinceLastRetry is the minimum time since a last retry we consider a good reason to retry - bodyReaderMSSinceLastRetry = 60 * 1_000 -) - -// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob, -// which can transparently resume some (very limited) kinds of aborted connections. -type bodyReader struct { - ctx context.Context - c *dockerClient - path string // path to pass to makeRequest to retry - logURL *url.URL // a string to use in error messages - firstConnectionTime time.Time - - body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. - lastRetryOffset int64 // -1 if N/A - lastRetryTime time.Time // IsZero() if N/A - offset int64 // Current offset within the blob - lastSuccessTime time.Time // IsZero() if N/A -} - -// newBodyReader creates a bodyReader for request path in c. -// firstBody is an already correctly opened body for the blob, returning the full blob from the start. -// If reading from firstBody fails, bodyReader may heuristically decide to resume. -func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) { - logURL, err := c.resolveRequestURL(path) - if err != nil { - return nil, err - } - res := &bodyReader{ - ctx: ctx, - c: c, - path: path, - logURL: logURL, - firstConnectionTime: time.Now(), - - body: firstBody, - lastRetryOffset: -1, - lastRetryTime: time.Time{}, - offset: 0, - lastSuccessTime: time.Time{}, - } - return res, nil -} - -// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number. -func parseDecimalInString(s string, start int) (int64, int, error) { - i := start - for i < len(s) && s[i] >= '0' && s[i] <= '9' { - i++ - } - if i == start { - return -1, -1, errors.New("missing decimal number") - } - v, err := strconv.ParseInt(s[start:i], 10, 64) - if err != nil { - return -1, -1, fmt.Errorf("parsing number: %w", err) - } - return v, i, nil -} - -// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it. -func parseExpectedChar(s string, pos int, expected byte) (int, error) { - if pos == len(s) || s[pos] != expected { - return -1, fmt.Errorf("missing expected %q", expected) - } - return pos + 1, nil -} - -// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1. -func parseContentRange(res *http.Response) (int64, int64, int64, error) { - hdrs := res.Header.Values("Content-Range") - switch len(hdrs) { - case 0: - return -1, -1, -1, errors.New("missing Content-Range: header") - case 1: - break - default: - return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs)) - } - hdr := hdrs[0] - expectedPrefix := "bytes " - if !strings.HasPrefix(hdr, expectedPrefix) { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix) - } - first, pos, err := parseDecimalInString(hdr, len(expectedPrefix)) - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err) - } - pos, err = parseExpectedChar(hdr, pos, '-') - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) - } - last, pos, err := parseDecimalInString(hdr, pos) - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err) - } - pos, err = parseExpectedChar(hdr, pos, '/') - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) - } - completeLength := int64(-1) - if pos < len(hdr) && hdr[pos] == '*' { - pos++ - } else { - completeLength, pos, err = parseDecimalInString(hdr, pos) - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err) - } - } - if pos < len(hdr) { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr) - } - return first, last, completeLength, nil -} - -// Read implements io.ReadCloser -func (br *bodyReader) Read(p []byte) (int, error) { - if br.body == nil { - return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted()) - } - n, err := br.body.Read(p) - br.offset += int64(n) - switch { - case err == nil || err == io.EOF: - br.lastSuccessTime = time.Now() - return n, err // Unlike the default: case, don’t log anything. - - case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET): - originalErr := err - redactedURL := br.logURL.Redacted() - if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil { - return n, err - } - - if err := br.body.Close(); err != nil { - logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise - } - br.body = nil - time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede - - headers := map[string][]string{ - "Range": {fmt.Sprintf("bytes=%d-", br.offset)}, - } - res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil) - if err != nil { - return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err) - } - consumedBody := false - defer func() { - if !consumedBody { - res.Body.Close() - } - }() - switch res.StatusCode { - case http.StatusPartialContent: // OK - // A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed. - // The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation. - first, last, completeLength, err := parseContentRange(res) - if err != nil { - return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err) - } - // We don’t handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob. - if first != br.offset || (completeLength != -1 && last+1 != completeLength) { - return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength) - } - // Continue below - case http.StatusOK: - return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK) - default: - err := registryHTTPResponseToError(res) - return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err) - } - - logrus.Debugf("Successfully reconnected to %s", redactedURL) - consumedBody = true - br.body = res.Body - br.lastRetryOffset = br.offset - br.lastRetryTime = time.Now() - return n, nil - - default: - logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err) - return n, err - } -} - -// millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value. -// If tm.IsZero(), it returns math.NaN() -func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 { - if tm.IsZero() { - return math.NaN() - } - return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0 -} - -// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil, -// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic) -func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error { - currentTime := time.Now() - msSinceFirstConnection := millisecondsSinceOptional(currentTime, br.firstConnectionTime) - msSinceLastRetry := millisecondsSinceOptional(currentTime, br.lastRetryTime) - msSinceLastSuccess := millisecondsSinceOptional(currentTime, br.lastSuccessTime) - logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: total %d @%.3f ms, last retry %d @%.3f ms, last progress @%.3f ms", - redactedURL, originalErr, br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess) - progress := br.offset - br.lastRetryOffset - if progress >= bodyReaderMinimumProgress { - logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress) - return nil - } - if br.lastRetryTime.IsZero() { - logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr) - return nil - } - if msSinceLastRetry >= bodyReaderMSSinceLastRetry { - logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry) - return nil - } - logrus.Debugf("Not reconnecting to %s: insufficient progress %d / time since last retry %.3f ms", redactedURL, progress, msSinceLastRetry) - return fmt.Errorf("(heuristic tuning data: total %d @%.3f ms, last retry %d @%.3f ms, last progress @ %.3f ms): %w", - br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess, originalErr) -} - -// Close implements io.ReadCloser -func (br *bodyReader) Close() error { - if br.body == nil { - return nil - } - err := br.body.Close() - br.body = nil - return err -} diff --git a/vendor/go.podman.io/image/v5/docker/cache.go b/vendor/go.podman.io/image/v5/docker/cache.go deleted file mode 100644 index 35fe37b38..000000000 --- a/vendor/go.podman.io/image/v5/docker/cache.go +++ /dev/null @@ -1,23 +0,0 @@ -package docker - -import ( - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/types" -) - -// bicTransportScope returns a BICTransportScope appropriate for ref. -func bicTransportScope(ref dockerReference) types.BICTransportScope { - // Blobs can be reused across the whole registry. - return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} -} - -// newBICLocationReference returns a BICLocationReference appropriate for ref. -func newBICLocationReference(ref dockerReference) types.BICLocationReference { - // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). - return types.BICLocationReference{Opaque: ref.ref.Name()} -} - -// parseBICLocationReference returns a repository for encoded lr. -func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { - return reference.ParseNormalizedNamed(lr.Opaque) -} diff --git a/vendor/go.podman.io/image/v5/docker/distribution_error.go b/vendor/go.podman.io/image/v5/docker/distribution_error.go deleted file mode 100644 index 06a9593dc..000000000 --- a/vendor/go.podman.io/image/v5/docker/distribution_error.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go -// Copyright 2022 github.com/distribution/distribution authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "slices" - - "github.com/docker/distribution/registry/api/errcode" -) - -// errNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var errNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - // StatusCode code as returned from the server, so callers can - // match the exact code to make certain decisions if needed. - StatusCode int - // status text as displayed in the error message, not exposed as callers should match the number. - status string -} - -func (e UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.status) -} - -func newUnexpectedHTTPStatusError(resp *http.Response) UnexpectedHTTPStatusError { - return UnexpectedHTTPStatusError{ - StatusCode: resp.StatusCode, - status: resp.Status, - } -} - -// unexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type unexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *unexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := io.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &unexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &unexpectedHTTPResponseError{ - ParseErr: errNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...)) -} - -// handleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func handleErrorResponse(resp *http.Response) error { - switch { - case resp.StatusCode == http.StatusUnauthorized: - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for c := range iterateAuthHeader(resp.Header) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) - } - } - fallthrough - case resp.StatusCode >= 400 && resp.StatusCode < 500: - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return newUnexpectedHTTPStatusError(resp) -} diff --git a/vendor/go.podman.io/image/v5/docker/docker_client.go b/vendor/go.podman.io/image/v5/docker/docker_client.go deleted file mode 100644 index 30f338da7..000000000 --- a/vendor/go.podman.io/image/v5/docker/docker_client.go +++ /dev/null @@ -1,1221 +0,0 @@ -package docker - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path/filepath" - "slices" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/distribution/registry/api/errcode" - v2 "github.com/docker/distribution/registry/api/v2" - "github.com/docker/go-connections/tlsconfig" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/iolimits" - "go.podman.io/image/v5/internal/multierr" - "go.podman.io/image/v5/internal/set" - "go.podman.io/image/v5/internal/useragent" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/pkg/docker/config" - "go.podman.io/image/v5/pkg/sysregistriesv2" - "go.podman.io/image/v5/pkg/tlsclientconfig" - "go.podman.io/image/v5/types" - "go.podman.io/storage/pkg/fileutils" - "go.podman.io/storage/pkg/homedir" -) - -const ( - dockerHostname = "docker.io" - dockerV1Hostname = "index.docker.io" - dockerRegistry = "registry-1.docker.io" - - resolvedPingV2URL = "%s://%s/v2/" - tagsPath = "/v2/%s/tags/list" - manifestPath = "/v2/%s/manifests/%s" - blobsPath = "/v2/%s/blobs/%s" - blobUploadPath = "/v2/%s/blobs/uploads/" - extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" - - minimumTokenLifetimeSeconds = 60 - - extensionSignatureSchemaVersion = 2 // extensionSignature.Version - extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type - - backoffNumIterations = 5 - backoffInitialDelay = 2 * time.Second - backoffMaxDelay = 60 * time.Second -) - -type certPath struct { - path string - absolute bool -} - -var ( - homeCertDir = filepath.FromSlash(".config/containers/certs.d") - perHostCertDirs = []certPath{ - {path: etcDir + "/containers/certs.d", absolute: true}, - {path: etcDir + "/docker/certs.d", absolute: true}, - } -) - -// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: -// signature represents a Docker image signature. -type extensionSignature struct { - Version int `json:"schemaVersion"` // Version specifies the schema version - Name string `json:"name"` // Name must be in "sha256:@signatureName" format - Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" - Content []byte `json:"content"` // Content contains the signature -} - -// signatureList represents list of Docker image signatures. -type extensionSignatureList struct { - Signatures []extensionSignature `json:"signatures"` -} - -// bearerToken records a cached token we can use to authenticate. -type bearerToken struct { - token string - expirationTime time.Time -} - -// dockerClient is configuration for dealing with a single container registry. -type dockerClient struct { - // The following members are set by newDockerClient and do not change afterwards. - sys *types.SystemContext - registry string - userAgent string - - // tlsClientConfig is setup by newDockerClient and will be used and updated - // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. - tlsClientConfig *tls.Config - // The following members are not set by newDockerClient and must be set by callers if needed. - auth types.DockerAuthConfig - registryToken string - signatureBase lookasideStorageBase - useSigstoreAttachments bool - scope authScope - - // The following members are detected registry properties: - // They are set after a successful detectProperties(), and never change afterwards. - client *http.Client - scheme string - challenges []challenge - supportsSignatures bool - - // Private state for setupRequestAuth (key: string, value: bearerToken) - tokenCache sync.Map - // Private state for detectProperties: - detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. - detectPropertiesError error // detectPropertiesError caches the initial error. - // Private state for logResponseWarnings - reportedWarningsLock sync.Mutex - reportedWarnings *set.Set[string] -} - -type authScope struct { - resourceType string - remoteName string - actions string -} - -// sendAuth determines whether we need authentication for v2 or v1 endpoint. -type sendAuth int - -const ( - // v2 endpoint with authentication. - v2Auth sendAuth = iota - // v1 endpoint with authentication. - // TODO: Get v1Auth working - // v1Auth - // no authentication, works for both v1 and v2. - noAuth -) - -// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. -func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { - if sys != nil && sys.DockerCertPath != "" { - return sys.DockerCertPath, nil - } - if sys != nil && sys.DockerPerHostCertDirPath != "" { - return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil - } - - var ( - hostCertDir string - fullCertDirPath string - ) - - for _, perHostCertDir := range append([]certPath{{path: filepath.Join(homedir.Get(), homeCertDir), absolute: false}}, perHostCertDirs...) { - if sys != nil && sys.RootForImplicitAbsolutePaths != "" && perHostCertDir.absolute { - hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, perHostCertDir.path) - } else { - hostCertDir = perHostCertDir.path - } - - fullCertDirPath = filepath.Join(hostCertDir, hostPort) - err := fileutils.Exists(fullCertDirPath) - if err == nil { - break - } - if os.IsNotExist(err) { - continue - } - if os.IsPermission(err) { - logrus.Debugf("error accessing certs directory due to permissions: %v", err) - continue - } - return "", err - } - return fullCertDirPath, nil -} - -// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) -// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) -// signatureBase is always set in the return value -// The caller must call .Close() on the returned client when done. -func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) { - auth, err := config.GetCredentialsForRef(sys, ref.ref) - if err != nil { - return nil, fmt.Errorf("getting username and password: %w", err) - } - - sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write) - if err != nil { - return nil, err - } - - registry := reference.Domain(ref.ref) - client, err := newDockerClient(sys, registry, ref.ref.Name()) - if err != nil { - return nil, err - } - client.auth = auth - if sys != nil { - client.registryToken = sys.DockerBearerRegistryToken - } - client.signatureBase = sigBase - client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref) - client.scope.resourceType = "repository" - client.scope.actions = actions - client.scope.remoteName = reference.Path(ref.ref) - return client, nil -} - -// newDockerClient returns a new dockerClient instance for the given registry -// and reference. The reference is used to query the registry configuration -// and can either be a registry (e.g, "registry.com[:5000]"), a repository -// (e.g., "registry.com[:5000][/some/namespace]/repo"). -// Please note that newDockerClient does not set all members of dockerClient -// (e.g., username and password); those must be set by callers if necessary. -// The caller must call .Close() on the returned client when done. -func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { - hostName := registry - if registry == dockerHostname { - registry = dockerRegistry - } - tlsClientConfig := &tls.Config{ - // As of 2025-08, tlsconfig.ClientDefault() differs from Go 1.23 defaults only in CipherSuites; - // so, limit us to only using that value. If go-connections/tlsconfig changes its policy, we - // will want to consider that and make a decision whether to follow suit. - // There is some chance that eventually the Go default will be to require TLS 1.3, and that point - // we might want to drop the dependency on go-connections entirely. - CipherSuites: tlsconfig.ClientDefault().CipherSuites, - } - - // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, - // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible - // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because - // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is - // undocumented and may change if docker/docker changes. - certDir, err := dockerCertDir(sys, hostName) - if err != nil { - return nil, err - } - if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { - return nil, err - } - - // Check if TLS verification shall be skipped (default=false) which can - // be specified in the sysregistriesv2 configuration. - skipVerify := false - reg, err := sysregistriesv2.FindRegistry(sys, reference) - if err != nil { - return nil, fmt.Errorf("loading registries: %w", err) - } - if reg != nil { - if reg.Blocked { - return nil, fmt.Errorf("registry %s is blocked in %s or %s", reg.Prefix, sysregistriesv2.ConfigPath(sys), sysregistriesv2.ConfigDirPath(sys)) - } - skipVerify = reg.Insecure - } - tlsClientConfig.InsecureSkipVerify = skipVerify - - userAgent := useragent.DefaultUserAgent - if sys != nil && sys.DockerRegistryUserAgent != "" { - userAgent = sys.DockerRegistryUserAgent - } - - return &dockerClient{ - sys: sys, - registry: registry, - userAgent: userAgent, - tlsClientConfig: tlsClientConfig, - reportedWarnings: set.New[string](), - }, nil -} - -// CheckAuth validates the credentials by attempting to log into the registry -// returns an error if an error occurred while making the http request or the status code received was 401 -func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { - client, err := newDockerClient(sys, registry, registry) - if err != nil { - return fmt.Errorf("creating new docker client: %w", err) - } - defer client.Close() - client.auth = types.DockerAuthConfig{ - Username: username, - Password: password, - } - - resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - err := registryHTTPResponseToError(resp) - if resp.StatusCode == http.StatusUnauthorized { - err = ErrUnauthorizedForCredentials{Err: err} - } - return err - } - return nil -} - -// SearchResult holds the information of each matching image -// It matches the output returned by the v1 endpoint -type SearchResult struct { - Name string `json:"name"` - Description string `json:"description"` - // StarCount states the number of stars the image has - StarCount int `json:"star_count"` - IsTrusted bool `json:"is_trusted"` - // IsAutomated states whether the image is an automated build - IsAutomated bool `json:"is_automated"` - // IsOfficial states whether the image is an official build - IsOfficial bool `json:"is_official"` -} - -// SearchRegistry queries a registry for images that contain "image" in their name -// The limit is the max number of results desired -// Note: The limit value doesn't work with all registries -// for example registry.access.redhat.com returns all the results without limiting it to the limit value -func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { - type V2Results struct { - // Repositories holds the results returned by the /v2/_catalog endpoint - Repositories []string `json:"repositories"` - } - type V1Results struct { - // Results holds the results returned by the /v1/search endpoint - Results []SearchResult `json:"results"` - } - v1Res := &V1Results{} - - // Get credentials from authfile for the underlying hostname - // We can't use GetCredentialsForRef here because we want to search the whole registry. - auth, err := config.GetCredentials(sys, registry) - if err != nil { - return nil, fmt.Errorf("getting username and password: %w", err) - } - - // The /v2/_catalog endpoint has been disabled for docker.io therefore - // the call made to that endpoint will fail. So using the v1 hostname - // for docker.io for simplicity of implementation and the fact that it - // returns search results. - hostname := registry - if registry == dockerHostname { - hostname = dockerV1Hostname - // A search term of library/foo does not find the library/foo image on the docker.io servers, - // which is surprising - and that Docker is modifying the search term client-side this same way, - // and it seems convenient to do the same thing. - // Read more here: https://github.com/containers/image/pull/2133#issue-1928524334 - image = strings.TrimPrefix(image, "library/") - } - - client, err := newDockerClient(sys, hostname, registry) - if err != nil { - return nil, fmt.Errorf("creating new docker client: %w", err) - } - defer client.Close() - client.auth = auth - if sys != nil { - client.registryToken = sys.DockerBearerRegistryToken - } - - // Only try the v1 search endpoint if the search query is not empty. If it is - // empty skip to the v2 endpoint. - if image != "" { - // set up the query values for the v1 endpoint - u := url.URL{ - Path: "/v1/search", - } - q := u.Query() - q.Set("q", image) - q.Set("n", strconv.Itoa(limit)) - u.RawQuery = q.Encode() - - logrus.Debugf("trying to talk to v1 search endpoint") - resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil) - if err != nil { - logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) - } else { - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp, "")) - } else { - if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { - return nil, err - } - return v1Res.Results, nil - } - } - } - - logrus.Debugf("trying to talk to v2 search endpoint") - searchRes := []SearchResult{} - path := "/v2/_catalog" - for len(searchRes) < limit { - resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) - return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - err := registryHTTPResponseToError(resp) - logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err) - return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) - } - v2Res := &V2Results{} - if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { - return nil, err - } - - for _, repo := range v2Res.Repositories { - if len(searchRes) == limit { - break - } - if strings.Contains(repo, image) { - res := SearchResult{ - Name: repo, - } - // bugzilla.redhat.com/show_bug.cgi?id=1976283 - // If we have a full match, make sure it's listed as the first result. - // (Note there might be a full match we never see if we reach the result limit first.) - if repo == image { - searchRes = append([]SearchResult{res}, searchRes...) - } else { - searchRes = append(searchRes, res) - } - } - } - - link := resp.Header.Get("Link") - if link == "" { - break - } - linkURLPart, _, _ := strings.Cut(link, ";") - linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) - if err != nil { - return searchRes, err - } - - // can be relative or absolute, but we only want the path (and I - // guess we're in trouble if it forwards to a new place...) - path = linkURL.Path - if linkURL.RawQuery != "" { - path += "?" - path += linkURL.RawQuery - } - } - return searchRes, nil -} - -// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. -func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { - if err := c.detectProperties(ctx); err != nil { - return nil, err - } - - requestURL, err := c.resolveRequestURL(path) - if err != nil { - return nil, err - } - return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope) -} - -// resolveRequestURL turns a path for c.makeRequest into a full URL. -// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs. -func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { - urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) - res, err := url.Parse(urlString) - if err != nil { - return nil, err - } - return res, nil -} - -// Checks if the auth headers in the response contain an indication of a failed -// authorization because of an "insufficient_scope" error. If that's the case, -// returns the required scope to be used for fetching a new token. -func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) { - if res.StatusCode == http.StatusUnauthorized { - for challenge := range iterateAuthHeader(res.Header) { - if challenge.Scheme == "bearer" { - if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" { - if scope, ok := challenge.Parameters["scope"]; ok && scope != "" { - if newScope, err := parseAuthScope(scope); err == nil { - return true, newScope - } else { - logrus.WithFields(logrus.Fields{ - "error": err, - "scope": scope, - "challenge": challenge, - }).Error("Failed to parse the authentication scope from the given challenge") - } - } - } - } - } - } - return false, nil -} - -// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it, -// silently falling back to fallbackDelay if the header is missing or invalid. -func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration { - after := res.Header.Get("Retry-After") - if after == "" { - return fallbackDelay - } - logrus.Debugf("Detected 'Retry-After' header %q", after) - // First, check if we have a numerical value. - if num, err := strconv.ParseInt(after, 10, 64); err == nil { - return time.Duration(num) * time.Second - } - // Second, check if we have an HTTP date. - if t, err := http.ParseTime(after); err == nil { - // If the delta between the date and now is positive, use it. - delta := time.Until(t) - if delta > 0 { - return delta - } - logrus.Debugf("Retry-After date in the past, ignoring it") - return fallbackDelay - } - logrus.Debugf("Invalid Retry-After format, ignoring it") - return fallbackDelay -} - -// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// streamLen, if not -1, specifies the length of the data expected on stream. -// makeRequest should generally be preferred. -// In case of an HTTP 429 status code in the response, it may automatically retry a few times. -// TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, requestURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - delay := backoffInitialDelay - attempts := 0 - for { - res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope) - if err != nil { - return nil, err - } - attempts++ - - // By default we use pre-defined scopes per operation. In - // certain cases, this can fail when our authentication is - // insufficient, then we might be getting an error back with a - // Www-Authenticate Header indicating an insufficient scope. - // - // Check for that and update the client challenges to retry after - // requesting a new token - // - // We only try this on the first attempt, to not overload an - // already struggling server. - // We also cannot retry with a body (stream != nil) as stream - // was already read - if attempts == 1 && stream == nil && auth != noAuth { - if retry, newScope := needsRetryWithUpdatedScope(res); retry { - logrus.Debug("Detected insufficient_scope error, will retry request with updated scope") - res.Body.Close() - // Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently - // expect the insufficient_scope errors to happen for those callers. If that changes, we can add support - // for more than one extra scope. - res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope) - if err != nil { - return nil, err - } - extraScope = newScope - } - } - - if res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately - stream != nil || // We can't retry with a body (which is not restartable in the general case) - attempts == backoffNumIterations { - return res, nil - } - // close response body before retry or context done - res.Body.Close() - - delay = min(parseRetryAfter(res, delay), backoffMaxDelay) - logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds()) - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(delay): - // Nothing - } - delay *= 2 // If the registry does not specify a delay, back off exponentially. - } -} - -// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// streamLen, if not -1, specifies the length of the data expected on stream. -// makeRequest should generally be preferred. -// Note that no exponential back off is performed when receiving an http 429 status code. -func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, resolvedURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, resolvedURL.String(), stream) - if err != nil { - return nil, err - } - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it. - req.ContentLength = streamLen - } - req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") - for n, h := range headers { - for _, hh := range h { - req.Header.Add(n, hh) - } - } - req.Header.Add("User-Agent", c.userAgent) - if auth == v2Auth { - if err := c.setupRequestAuth(req, extraScope); err != nil { - return nil, err - } - } - logrus.Debugf("%s %s", method, resolvedURL.Redacted()) - res, err := c.client.Do(req) - if err != nil { - return nil, err - } - if warnings := res.Header.Values("Warning"); len(warnings) != 0 { - c.logResponseWarnings(res, warnings) - } - return res, nil -} - -// logResponseWarnings logs warningHeaders from res, if any. -func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) { - c.reportedWarningsLock.Lock() - defer c.reportedWarningsLock.Unlock() - - for _, header := range warningHeaders { - warningString := parseRegistryWarningHeader(header) - if warningString == "" { - logrus.Debugf("Ignored Warning: header from registry: %q", header) - } else { - if !c.reportedWarnings.Contains(warningString) { - c.reportedWarnings.Add(warningString) - // Note that reportedWarnings is based only on warningString, so that we don’t - // repeat the same warning for every request - but the warning includes the URL; - // so it may not be specific to that URL. - logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString) - } else { - logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString) - } - } - } -} - -// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning -// values allowed by opencontainers/distribution-spec. -// It returns the warning string if the header has the expected format, or "" otherwise. -func parseRegistryWarningHeader(header string) string { - const expectedPrefix = `299 - "` - const expectedSuffix = `"` - - // warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ] - // distribution-spec requires warn-code=299, warn-agent="-", warn-date missing - header, ok := strings.CutPrefix(header, expectedPrefix) - if !ok { - return "" - } - header, ok = strings.CutSuffix(header, expectedSuffix) - if !ok { - return "" - } - - // ”Recipients that process the value of a quoted-string MUST handle a quoted-pair - // as if it were replaced by the octet following the backslash.”, so let’s do that… - res := strings.Builder{} - afterBackslash := false - for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points - switch { - case c == 0x7F || (c < ' ' && c != '\t'): - return "" // Control characters are forbidden - case afterBackslash: - res.WriteByte(c) - afterBackslash = false - case c == '"': - // This terminates the warn-text and warn-date, forbidden by distribution-spec, follows, - // or completely invalid input. - return "" - case c == '\\': - afterBackslash = true - default: - res.WriteByte(c) - } - } - if afterBackslash { - return "" - } - return res.String() -} - -// we're using the challenges from the /v2/ ping response and not the one from the destination -// URL in this request because: -// -// 1) docker does that as well -// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request -// -// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up -func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { - if len(c.challenges) == 0 { - return nil - } - schemeNames := make([]string, 0, len(c.challenges)) - for _, challenge := range c.challenges { - schemeNames = append(schemeNames, challenge.Scheme) - switch challenge.Scheme { - case "basic": - req.SetBasicAuth(c.auth.Username, c.auth.Password) - return nil - case "bearer": - registryToken := c.registryToken - if registryToken == "" { - cacheKey := "" - scopes := []authScope{c.scope} - if extraScope != nil { - // Using ':' as a separator here is unambiguous because getBearerToken below - // uses the same separator when formatting a remote request (and because - // repository names that we create can't contain colons, and extraScope values - // coming from a server come from `parseAuthScope`, which also splits on colons). - cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions) - if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 { - return fmt.Errorf( - "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d", - cacheKey, - colonCount, - ) - } - scopes = append(scopes, *extraScope) - } - var token bearerToken - t, inCache := c.tokenCache.Load(cacheKey) - if inCache { - token = t.(bearerToken) - } - if !inCache || time.Now().After(token.expirationTime) { - var ( - t *bearerToken - err error - ) - if c.auth.IdentityToken != "" { - t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes) - } else { - t, err = c.getBearerToken(req.Context(), challenge, scopes) - } - if err != nil { - return err - } - - token = *t - c.tokenCache.Store(cacheKey, token) - } - registryToken = token.token - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken)) - return nil - default: - logrus.Debugf("no handler for %s authentication", challenge.Scheme) - } - } - logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) - return nil -} - -func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge, - scopes []authScope) (*bearerToken, error) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return nil, errors.New("missing realm in bearer auth challenge") - } - - authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil) - if err != nil { - return nil, err - } - - // Make the form data required against the oauth2 authentication - // More details here: https://docs.docker.com/registry/spec/auth/oauth/ - params := authReq.URL.Query() - if service, ok := challenge.Parameters["service"]; ok && service != "" { - params.Add("service", service) - } - - for _, scope := range scopes { - if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" { - params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions)) - } - } - params.Add("grant_type", "refresh_token") - params.Add("refresh_token", c.auth.IdentityToken) - params.Add("client_id", "containers/image") - - authReq.Body = io.NopCloser(strings.NewReader(params.Encode())) - authReq.Header.Add("User-Agent", c.userAgent) - authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") - logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) - res, err := c.client.Do(authReq) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := httpResponseToError(res, "Trying to obtain access token"); err != nil { - return nil, err - } - - return newBearerTokenFromHTTPResponseBody(res) -} - -func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, - scopes []authScope) (*bearerToken, error) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return nil, errors.New("missing realm in bearer auth challenge") - } - - authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) - if err != nil { - return nil, err - } - - params := authReq.URL.Query() - if c.auth.Username != "" { - params.Add("account", c.auth.Username) - } - - if service, ok := challenge.Parameters["service"]; ok && service != "" { - params.Add("service", service) - } - - for _, scope := range scopes { - if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" { - params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions)) - } - } - - authReq.URL.RawQuery = params.Encode() - - if c.auth.Username != "" && c.auth.Password != "" { - authReq.SetBasicAuth(c.auth.Username, c.auth.Password) - } - authReq.Header.Add("User-Agent", c.userAgent) - - logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) - res, err := c.client.Do(authReq) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := httpResponseToError(res, "Requesting bearer token"); err != nil { - return nil, err - } - - return newBearerTokenFromHTTPResponseBody(res) -} - -// newBearerTokenFromHTTPResponseBody parses a http.Response to obtain a bearerToken. -// The caller is still responsible for ensuring res.Body is closed. -func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error) { - blob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) - if err != nil { - return nil, err - } - - var token struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - expirationTime time.Time - } - if err := json.Unmarshal(blob, &token); err != nil { - const bodySampleLength = 50 - bodySample := blob - if len(bodySample) > bodySampleLength { - bodySample = bodySample[:bodySampleLength] - } - return nil, fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err) - } - - bt := &bearerToken{ - token: token.Token, - } - if bt.token == "" { - bt.token = token.AccessToken - } - - if token.ExpiresIn < minimumTokenLifetimeSeconds { - token.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) - } - if token.IssuedAt.IsZero() { - token.IssuedAt = time.Now().UTC() - } - bt.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) - return bt, nil -} - -// detectPropertiesHelper performs the work of detectProperties which executes -// it at most once. -func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { - // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly - // specified by the system context - if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { - c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue - } - tr := tlsclientconfig.NewTransport() - tr.TLSClientConfig = c.tlsClientConfig - // if set DockerProxyURL explicitly, use the DockerProxyURL instead of system proxy - if c.sys != nil && c.sys.DockerProxyURL != nil { - tr.Proxy = http.ProxyURL(c.sys.DockerProxyURL) - } - if c.sys != nil && c.sys.DockerProxy != nil { - tr.Proxy = func(request *http.Request) (*url.URL, error) { - return c.sys.DockerProxy(request.URL) - } - } - c.client = &http.Client{Transport: tr} - - ping := func(scheme string) error { - pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) - if err != nil { - return err - } - resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil) - if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err) - return err - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return registryHTTPResponseToError(resp) - } - c.challenges = slices.Collect(iterateAuthHeader(resp.Header)) - c.scheme = scheme - c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" - return nil - } - err := ping("https") - if err != nil && c.tlsClientConfig.InsecureSkipVerify { - err = ping("http") - } - if err != nil { - err = fmt.Errorf("pinging container registry %s: %w", c.registry, err) - } - return err -} - -// detectProperties detects various properties of the registry. -// See the dockerClient documentation for members which are affected by this. -func (c *dockerClient) detectProperties(ctx context.Context) error { - c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) - return c.detectPropertiesError -} - -// fetchManifest fetches a manifest for (the repo of ref) + tagOrDigest. -// The caller is responsible for ensuring tagOrDigest uses the expected format. -func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) { - path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest) - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - res, err := c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) - if err != nil { - return nil, "", err - } - logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type")) - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res)) - } - - manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize) - if err != nil { - return nil, "", err - } - return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil -} - -// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. -// This function can return nil reader when no url is supported by this function. In this case, the caller -// should fallback to fetch the non-external blob (i.e. pull from the registry). -func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - if len(urls) == 0 { - return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") - } - var remoteErrors []error - for _, u := range urls { - blobURL, err := url.Parse(u) - if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") { - continue // unsupported url. skip this url. - } - // NOTE: we must not authenticate on additional URLs as those - // can be abused to leak credentials or tokens. Please - // refer to CVE-2020-15157 for more information. - resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) - if err != nil { - remoteErrors = append(remoteErrors, err) - continue - } - if resp.StatusCode != http.StatusOK { - err := fmt.Errorf("error fetching external blob from %q: %w", u, newUnexpectedHTTPStatusError(resp)) - remoteErrors = append(remoteErrors, err) - logrus.Debug(err) - resp.Body.Close() - continue - } - - size, err := getBlobSize(resp) - if err != nil { - size = -1 - } - return resp.Body, size, nil - } - if remoteErrors == nil { - return nil, 0, nil // fallback to non-external blob - } - return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors)) -} - -func getBlobSize(resp *http.Response) (int64, error) { - hdrs := resp.Header.Values("Content-Length") - if len(hdrs) == 0 { - return -1, errors.New(`Missing "Content-Length" header in response`) - } - hdr := hdrs[0] // Equivalent to resp.Header.Get(…) - size, err := strconv.ParseInt(hdr, 10, 64) - if err != nil { // Go’s response reader should already reject such values. - return -1, err - } - if size < 0 { // '-' is not a valid character in Content-Length, so negative values are invalid. Go’s response reader should already reject such values. - return -1, fmt.Errorf(`Invalid negative "Content-Length" %q`, hdr) - } - return size, nil -} - -// getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - r, s, err := c.getExternalBlob(ctx, info.URLs) - if err != nil { - return nil, 0, err - } else if r != nil { - return r, s, nil - } - } - - if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters - return nil, 0, err - } - path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - return nil, 0, err - } - if res.StatusCode != http.StatusOK { - err := registryHTTPResponseToError(res) - res.Body.Close() - return nil, 0, fmt.Errorf("fetching blob: %w", err) - } - cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - blobSize, err := getBlobSize(res) - if err != nil { - // See above, we don't guarantee returning a size - logrus.Debugf("failed to get blob size: %v", err) - blobSize = -1 - } - - reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) - if err != nil { - res.Body.Close() - return nil, 0, err - } - return reconnectingReader, blobSize, nil -} - -// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit. -func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) { - // Note that this copies all kinds of attachments: attestations, and whatever else is there, - // not just signatures. We leave the signature consumers to decide based on the MIME type. - - if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check - return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err) - } - digestAlgorithm := desc.Digest.Algorithm() - if !digestAlgorithm.Available() { - return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String()) - } - - reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache) - if err != nil { - return nil, err - } - defer reader.Close() - payload, err := iolimits.ReadAtMost(reader, maxSize) - if err != nil { - return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err) - } - actualDigest := digestAlgorithm.FromBytes(payload) - if actualDigest != desc.Digest { - return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String()) - } - return payload, nil -} - -// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error. -func isManifestUnknownError(err error) bool { - // docker/distribution, and as defined in the spec - var ec errcode.ErrorCoder - if errors.As(err, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown { - return true - } - // registry.redhat.io as of October 2022 - var e errcode.Error - if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" { - return true - } - // Harbor v2.10.2 - if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && strings.Contains(strings.ToLower(e.Message), "not found") { - return true - } - - // opencontainers/distribution-spec does not require the errcode.Error payloads to be used, - // but specifies that the HTTP status must be 404. - var unexpected *unexpectedHTTPResponseError - if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound { - return true - } - return false -} - -// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for -// digest in ref. -// It returns (nil, nil) if the manifest does not exist. -func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) { - tag, err := sigstoreAttachmentTag(digest) - if err != nil { - return nil, err - } - sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag) - if err != nil { - return nil, err - } - logrus.Debugf("Looking for sigstore attachments in %s", sigstoreRef.String()) - manifestBlob, mimeType, err := c.fetchManifest(ctx, ref, tag) - if err != nil { - // FIXME: Are we going to need better heuristics?? - // This alone is probably a good enough reason for sigstore to be opt-in only, - // otherwise we would just break ordinary copies. - if isManifestUnknownError(err) { - logrus.Debugf("Fetching sigstore attachment manifest failed, assuming it does not exist: %v", err) - return nil, nil - } - logrus.Debugf("Fetching sigstore attachment manifest failed: %v", err) - return nil, err - } - if mimeType != imgspecv1.MediaTypeImageManifest { - // FIXME: Try anyway?? - return nil, fmt.Errorf("unexpected MIME type for sigstore attachment manifest %s: %q", - sigstoreRef.String(), mimeType) - } - res, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { - return nil, fmt.Errorf("parsing manifest %s: %w", sigstoreRef.String(), err) - } - return res, nil -} - -// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, -// using the original data structures. -func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { - if err := manifestDigest.Validate(); err != nil { // Make sure manifestDigest.String() does not contain any unexpected characters - return nil, err - } - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) - res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), registryHTTPResponseToError(res)) - } - - body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize) - if err != nil { - return nil, err - } - - var parsedBody extensionSignatureList - if err := json.Unmarshal(body, &parsedBody); err != nil { - return nil, fmt.Errorf("decoding signature list: %w", err) - } - return &parsedBody, nil -} - -// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest. -func sigstoreAttachmentTag(d digest.Digest) (string, error) { - if err := d.Validate(); err != nil { // Make sure d.String() doesn’t contain any unexpected characters - return "", err - } - return strings.Replace(d.String(), ":", "-", 1) + ".sig", nil -} - -// Close removes resources associated with an initialized dockerClient, if any. -func (c *dockerClient) Close() error { - if c.client != nil { - c.client.CloseIdleConnections() - } - return nil -} diff --git a/vendor/go.podman.io/image/v5/docker/docker_image.go b/vendor/go.podman.io/image/v5/docker/docker_image.go deleted file mode 100644 index 1e5de65a7..000000000 --- a/vendor/go.podman.io/image/v5/docker/docker_image.go +++ /dev/null @@ -1,186 +0,0 @@ -package docker - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/image" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/types" -) - -// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods -// which are specific to Docker. -type Image struct { - types.ImageCloser - src *dockerImageSource -} - -// newImage returns a new Image interface type after setting up -// a client to the registry hosting the given image. -// The caller must call .Close() on the returned Image. -func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { - s, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, s) - if err != nil { - return nil, err - } - return &Image{ImageCloser: img, src: s}, nil -} - -// SourceRefFullName returns a fully expanded name for the repository this image is in. -func (i *Image) SourceRefFullName() string { - return i.src.logicalRef.ref.Name() -} - -// GetRepositoryTags list all tags available in the repository. The tag -// provided inside the ImageReference will be ignored. (This is a -// backward-compatible shim method which calls the module-level -// GetRepositoryTags) -func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { - return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef) -} - -// GetRepositoryTags list all tags available in the repository. The tag -// provided inside the ImageReference will be ignored. -func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { - dr, ok := ref.(dockerReference) - if !ok { - return nil, errors.New("ref must be a dockerReference") - } - - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return nil, err - } - path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) - client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull") - if err != nil { - return nil, fmt.Errorf("failed to create client: %w", err) - } - defer client.Close() - - tags := make([]string, 0) - - for { - res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("fetching tags list: %w", registryHTTPResponseToError(res)) - } - - var tagsHolder struct { - Tags []string - } - if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { - return nil, err - } - for _, tag := range tagsHolder.Tags { - if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values - // Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary - // to the spec, may include JSON null values in the list; and Go silently parses them as "". - if tag == "" { - logrus.Debugf("Ignoring invalid empty tag") - continue - } - // Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory, - // contrary to the tag format specified in - // https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 , - // include digests in the list. - if _, err := digest.Parse(tag); err == nil { - logrus.Debugf("Ignoring invalid tag %q matching a digest format", tag) - continue - } - return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err) - } - tags = append(tags, tag) - } - - link := res.Header.Get("Link") - if link == "" { - break - } - - linkURLPart, _, _ := strings.Cut(link, ";") - linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) - if err != nil { - return tags, err - } - - // can be relative or absolute, but we only want the path (and I - // guess we're in trouble if it forwards to a new place...) - path = linkURL.Path - if linkURL.RawQuery != "" { - path += "?" - path += linkURL.RawQuery - } - } - return tags, nil -} - -// GetDigest returns the image's digest -// Use this to optimize and avoid use of an ImageSource based on the returned digest; -// if you are going to use an ImageSource anyway, it’s more efficient to create it first -// and compute the digest from the value returned by GetManifest. -// NOTE: Implemented to avoid Docker Hub API limits, and mirror configuration may be -// ignored (but may be implemented in the future) -func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) { - dr, ok := ref.(dockerReference) - if !ok { - return "", errors.New("ref must be a dockerReference") - } - if dr.isUnknownDigest { - return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport()) - } - - tagOrDigest, err := dr.tagOrDigest() - if err != nil { - return "", err - } - - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return "", err - } - client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull") - if err != nil { - return "", fmt.Errorf("failed to create client: %w", err) - } - defer client.Close() - - path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest) - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - - res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil) - if err != nil { - return "", err - } - - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res)) - } - - dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest")) - if err != nil { - return "", err - } - - return dig, nil -} diff --git a/vendor/go.podman.io/image/v5/docker/docker_image_dest.go b/vendor/go.podman.io/image/v5/docker/docker_image_dest.go deleted file mode 100644 index 86077fe93..000000000 --- a/vendor/go.podman.io/image/v5/docker/docker_image_dest.go +++ /dev/null @@ -1,937 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "io" - "maps" - "net/http" - "net/url" - "os" - "path/filepath" - "slices" - "strings" - - "github.com/docker/distribution/registry/api/errcode" - v2 "github.com/docker/distribution/registry/api/v2" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/blobinfocache" - "go.podman.io/image/v5/internal/imagedestination/impl" - "go.podman.io/image/v5/internal/imagedestination/stubs" - "go.podman.io/image/v5/internal/iolimits" - "go.podman.io/image/v5/internal/private" - "go.podman.io/image/v5/internal/putblobdigest" - "go.podman.io/image/v5/internal/set" - "go.podman.io/image/v5/internal/signature" - "go.podman.io/image/v5/internal/streamdigest" - "go.podman.io/image/v5/internal/uploadreader" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/pkg/blobinfocache/none" - compressiontypes "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" -) - -type dockerImageDestination struct { - impl.Compat - impl.PropertyMethodsInitialize - stubs.IgnoresOriginalOCIConfig - stubs.NoPutBlobPartialInitialize - - ref dockerReference - c *dockerClient - // State - manifestDigest digest.Digest // or "" if not yet known. -} - -// newImageDestination creates a new ImageDestination for the specified image reference. -func newImageDestination(sys *types.SystemContext, ref dockerReference) (private.ImageDestination, error) { - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return nil, err - } - c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "pull,push") - if err != nil { - return nil, err - } - mimeTypes := []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - imgspecv1.MediaTypeImageIndex, - manifest.DockerV2ListMediaType, - } - if c.sys == nil || !c.sys.DockerDisableDestSchema1MIMETypes { - mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType) - } - - dest := &dockerImageDestination{ - PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ - SupportedManifestMIMETypes: mimeTypes, - DesiredLayerCompression: types.Compress, - MustMatchRuntimeOS: false, - IgnoresEmbeddedDockerReference: false, // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. - HasThreadSafePutBlob: true, - }), - NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), - - ref: ref, - c: c, - } - dest.Compat = impl.AddCompat(dest) - return dest, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dockerImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dockerImageDestination) Close() error { - return d.c.Close() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { - if err := d.c.detectProperties(ctx); err != nil { - return err - } - switch { - case d.c.supportsSignatures: - return nil - case d.c.signatureBase != nil: - return nil - default: - return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") - } -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// sizeCounter is an io.Writer which only counts the total size of its input. -type sizeCounter struct{ size int64 } - -func (c *sizeCounter) Write(p []byte) (n int, err error) { - c.size += int64(len(p)) - return len(p), nil -} - -// PutBlobWithOptions writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { - // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. - // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, - // the source blob is uncompressed, and the destination blob is being compressed "on the fly". - if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests { - logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) - streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) - if err != nil { - return private.UploadedBlob{}, err - } - defer cleanup() - stream = streamCopy - } - - if inputInfo.Digest != "" { - // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. - // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. - haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache) - if err != nil { - return private.UploadedBlob{}, err - } - if haveBlob { - return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil - } - } - - // FIXME? Chunked upload, progress reporting, etc. - uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) - logrus.Debugf("Uploading %s", uploadPath) - res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil) - if err != nil { - return private.UploadedBlob{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusAccepted { - logrus.Debugf("Error initiating layer upload, response %#v", *res) - return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res)) - } - uploadLocation, err := res.Location() - if err != nil { - return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err) - } - - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) - sizeCounter := &sizeCounter{} - stream = io.TeeReader(stream, sizeCounter) - - uploadLocation, err = func() (*url.URL, error) { // A scope for defer - uploadReader := uploadreader.NewUploadReader(stream) - // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL - // returns, so there isn’t a way for the error text to be provided to any of our callers. - defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) - if err != nil { - logrus.Debugf("Error uploading layer chunked %v", err) - return nil, err - } - defer res.Body.Close() - if !successStatus(res.StatusCode) { - return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res)) - } - uploadLocation, err := res.Location() - if err != nil { - return nil, fmt.Errorf("determining upload URL: %w", err) - } - return uploadLocation, nil - }() - if err != nil { - return private.UploadedBlob{}, err - } - blobDigest := digester.Digest() - - // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) - - locationQuery := uploadLocation.Query() - locationQuery.Set("digest", blobDigest.String()) - uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) - if err != nil { - return private.UploadedBlob{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - logrus.Debugf("Error uploading layer, response %#v", *res) - return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res)) - } - - logrus.Debugf("Upload of layer %s complete", blobDigest) - options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) - return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil -} - -// blobExists returns true iff repo contains a blob with digest, and if so, also its size. -// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { - if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters - return false, -1, err - } - checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) - logrus.Debugf("Checking %s", checkPath) - res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope) - if err != nil { - return false, -1, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusOK: - size, err := getBlobSize(res) - if err != nil { - return false, -1, fmt.Errorf("determining size of blob %s in %s: %w", digest, repo.Name(), err) - } - logrus.Debugf("... already exists") - return true, size, nil - case http.StatusUnauthorized: - logrus.Debugf("... not authorized") - return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) - case http.StatusNotFound: - logrus.Debugf("... not present") - return false, -1, nil - default: - return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) - } -} - -// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. -func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { - u := url.URL{ - Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), - RawQuery: url.Values{ - "mount": {srcDigest.String()}, - "from": {reference.Path(srcRepo)}, - }.Encode(), - } - logrus.Debugf("Trying to mount %s", u.Redacted()) - res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope) - if err != nil { - return err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusCreated: - logrus.Debugf("... mount OK") - return nil - case http.StatusAccepted: - // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. - // Abort, and let the ultimate caller do an upload when its ready, instead. - // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. - uploadLocation, err := res.Location() - if err != nil { - return fmt.Errorf("determining upload URL after a mount attempt: %w", err) - } - logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted()) - res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope) - if err != nil { - logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) - } else { - defer res2.Body.Close() - if res2.StatusCode != http.StatusNoContent { - logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) - } - } - // Anyway, if canceling the upload fails, ignore it and return the more important error: - return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) - default: - logrus.Debugf("Error mounting, response %#v", *res) - return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res)) - } -} - -// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified -// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read. -// The caller must ensure info.Digest is set. -func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) { - exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) - if err != nil { - return false, private.ReusedBlob{}, err - } - if exists { - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil - } - return false, private.ReusedBlob{}, nil -} - -func optionalCompressionName(algo *compressiontypes.Algorithm) string { - if algo != nil { - return algo.Name() - } - return "nil" -} - -// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil). -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if info.Digest == "" { - return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") - } - - originalCandidateKnownToBeMissing := false - if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { - // First, check whether the blob happens to already exist at the destination. - haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) - if err != nil { - return false, private.ReusedBlob{}, err - } - if haveBlob { - return true, reusedInfo, nil - } - originalCandidateKnownToBeMissing = true - } else { - logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v", - optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) - // We can get here with a blob detected to be zstd when the user wants a zstd:chunked. - // In that case we keep originalCandiateKnownToBeMissing = false, so that if we find - // a BIC entry for this blob, we do use that entry and return a zstd:chunked entry - // with the BIC’s annotations. - // This is not quite correct, it only works if the BIC also contains an acceptable _location_. - // Ideally, we could look up just the compression algorithm/annotations for info.digest, - // and use it even if no location candidate exists and the original dandidate is present. - } - - // Then try reusing blobs from other locations. - candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, blobinfocache.CandidateLocations2Options{ - CanSubstitute: options.CanSubstitute, - PossibleManifestFormats: options.PossibleManifestFormats, - RequiredCompression: options.RequiredCompression, - }) - for _, candidate := range candidates { - var candidateRepo reference.Named - if !candidate.UnknownLocation { - var err error - candidateRepo, err = parseBICLocationReference(candidate.Location) - if err != nil { - logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) - continue - } - if candidate.CompressionAlgorithm != nil { - logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name()) - } else { - logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name()) - } - // Sanity checks: - if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { - // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo - // (the "from" parameter); in that case we might try to use these candidates as well. - // - // OTOH that would mean we can’t do the “blobExists” check, and if there is no match - // we could get an upload request that we would have to cancel. - logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) - continue - } - } else { - if candidate.CompressionAlgorithm != nil { - logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressionAlgorithm.Name()) - } else { - logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String()) - } - // This digest is a known variant of this blob but we don’t - // have a recorded location in this registry, let’s try looking - // for it in the current repo. - candidateRepo = reference.TrimNamed(d.ref.ref) - } - if originalCandidateKnownToBeMissing && - candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { - logrus.Debug("... Already tried the primary destination") - continue - } - - // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. - - // Checking candidateRepo, and mounting from it, requires an - // expanded token scope. - extraScope := &authScope{ - resourceType: "repository", - remoteName: reference.Path(candidateRepo), - actions: "pull", - } - // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. - // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. - // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. - // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. - // Even worse, docker/distribution does not actually reasonably implement canceling uploads - // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); - // so, be a nice client and don't create unnecessary upload sessions on the server. - exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) - if err != nil { - logrus.Debugf("... Failed: %v", err) - continue - } - if !exists { - // FIXME? Should we drop the blob from cache here (and elsewhere?)? - continue // logrus.Debug() already happened in blobExists - } - if candidateRepo.Name() != d.ref.ref.Name() { - if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { - logrus.Debugf("... Mount failed: %v", err) - continue - } - } - - options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) - - return true, private.ReusedBlob{ - Digest: candidate.Digest, - Size: size, - CompressionOperation: candidate.CompressionOperation, - CompressionAlgorithm: candidate.CompressionAlgorithm, - CompressionAnnotations: candidate.CompressionAnnotations, - }, nil - } - - return false, private.ReusedBlob{}, nil -} - -// PutManifest writes manifest to the destination. -// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list -// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the -// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { - var refTail string - // If d.ref.isUnknownDigest=true, then we push without a tag, so get the - // digest that will be used - if d.ref.isUnknownDigest { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - refTail = digest.String() - } else if instanceDigest != nil { - // If the instanceDigest is provided, then use it as the refTail, because the reference, - // whether it includes a tag or a digest, refers to the list as a whole, and not this - // particular instance. - refTail = instanceDigest.String() - // Double-check that the manifest we've been given matches the digest we've been given. - // This also validates the format of instanceDigest. - matches, err := manifest.MatchesDigest(m, *instanceDigest) - if err != nil { - return fmt.Errorf("digesting manifest in PutManifest: %w", err) - } - if !matches { - manifestDigest, merr := manifest.Digest(m) - if merr != nil { - return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr) - } - return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String()) - } - } else { - // Compute the digest of the main manifest, or the list if it's a list, so that we - // have a digest value to use if we're asked to save a signature for the manifest. - digest, err := manifest.Digest(m) - if err != nil { - return err - } - d.manifestDigest = digest - // The refTail should be either a digest (which we expect to match the value we just - // computed) or a tag name. - refTail, err = d.ref.tagOrDigest() - if err != nil { - return err - } - } - - return d.uploadManifest(ctx, m, refTail) -} - -// uploadManifest writes manifest to tagOrDigest. -func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, tagOrDigest string) error { - path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), tagOrDigest) - - headers := map[string][]string{} - mimeType := manifest.GuessMIMEType(m) - if mimeType != "" { - headers["Content-Type"] = []string{mimeType} - } - res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil) - if err != nil { - return err - } - defer res.Body.Close() - if !successStatus(res.StatusCode) { - rawErr := registryHTTPResponseToError(res) - err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr) - if isManifestInvalidError(rawErr) { - err = types.ManifestTypeRejectedError{Err: err} - } - return err - } - // A HTTP server may not be a registry at all, and just return 200 OK to everything - // (in particular that can fairly easily happen after tearing down a website and - // replacing it with a global 302 redirect to a new website, completely ignoring the - // path in the request); in that case we could “succeed” uploading a whole image. - // With docker/distribution we could rely on a Docker-Content-Digest header being present - // (because docker/distribution/registry/client has been failing uploads if it was missing), - // but that has been defined as explicitly optional by - // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers - // So, just note the missing header in a debug log. - if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 { - logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry") - } - return nil -} - -// successStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func successStatus(status int) bool { - return status >= 200 && status <= 399 -} - -// isManifestInvalidError returns true iff err from registryHTTPResponseToError is a “manifest invalid” error. -func isManifestInvalidError(err error) bool { - var ec errcode.ErrorCoder - if ok := errors.As(err, &ec); !ok { - return false - } - - switch ec.ErrorCode() { - // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. - case v2.ErrorCodeManifestInvalid: - return true - // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) - // when uploading to a tag (because it can’t find a matching tag inside the manifest) - case v2.ErrorCodeTagInvalid: - return true - // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when - // uploading an OCI manifest that is (correctly, according to the spec) missing - // a top-level media type. See libpod issue #1719 - // FIXME: remove this case when ECR behavior is fixed - case errcode.ErrorCodeUnsupported: - return strings.Contains(err.Error(), "Invalid JSON syntax") - default: - return false - } -} - -// PutSignaturesWithFormat writes a set of signatures to the destination. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for -// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -// MUST be called after PutManifest (signatures may reference manifest contents). -func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { - if instanceDigest == nil { - if d.manifestDigest == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.New("Unknown manifest digest, can't add signatures") - } - instanceDigest = &d.manifestDigest - } - - sigstoreSignatures := []signature.Sigstore{} - otherSignatures := []signature.Signature{} - for _, sig := range signatures { - if sigstoreSig, ok := sig.(signature.Sigstore); ok { - sigstoreSignatures = append(sigstoreSignatures, sigstoreSig) - } else { - otherSignatures = append(otherSignatures, sig) - } - } - - // Only write sigstores signatures to sigstores attachments. We _could_ store them to lookaside - // instead, but that would probably be rather surprising. - // FIXME: So should we enable sigstores in all cases? Or write in all cases, but opt-in to read? - - if len(sigstoreSignatures) != 0 { - if err := d.putSignaturesToSigstoreAttachments(ctx, sigstoreSignatures, *instanceDigest); err != nil { - return err - } - } - - if len(otherSignatures) != 0 { - if err := d.c.detectProperties(ctx); err != nil { - return err - } - switch { - case d.c.supportsSignatures: - if err := d.putSignaturesToAPIExtension(ctx, otherSignatures, *instanceDigest); err != nil { - return err - } - case d.c.signatureBase != nil: - if err := d.putSignaturesToLookaside(otherSignatures, *instanceDigest); err != nil { - return err - } - default: - return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") - } - } - - return nil -} - -// putSignaturesToLookaside implements PutSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, -// which is not nil, for a manifest with manifestDigest. -func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature.Signature, manifestDigest digest.Digest) error { - // FIXME? This overwrites files one at a time, definitely not atomic. - // A failure when updating signatures with a reordered copy could lose some of them. - - // Skip dealing with the manifest digest if not necessary. - if len(signatures) == 0 { - return nil - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i, signature := range signatures { - sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) - if err != nil { - return err - } - if err := d.putOneSignature(sigURL, signature); err != nil { - return err - } - } - // Remove any other signatures, if present. - // We stop at the first missing signature; if a previous deleting loop aborted - // prematurely, this may not clean up all of them, but one missing signature - // is enough for dockerImageSource to stop looking for other signatures, so that - // is sufficient. - for i := len(signatures); ; i++ { - sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) - if err != nil { - return err - } - missing, err := d.c.deleteOneSignature(sigURL) - if err != nil { - return err - } - if missing { - break - } - } - - return nil -} - -// putOneSignature stores sig to sigURL. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (d *dockerImageDestination) putOneSignature(sigURL *url.URL, sig signature.Signature) error { - switch sigURL.Scheme { - case "file": - logrus.Debugf("Writing to %s", sigURL.Path) - err := os.MkdirAll(filepath.Dir(sigURL.Path), 0755) - if err != nil { - return err - } - blob, err := signature.Blob(sig) - if err != nil { - return err - } - err = os.WriteFile(sigURL.Path, blob, 0644) - if err != nil { - return err - } - return nil - - case "http", "https": - return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted()) - default: - return fmt.Errorf("Unsupported scheme when writing signature to %s", sigURL.Redacted()) - } -} - -func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.Context, signatures []signature.Sigstore, manifestDigest digest.Digest) error { - if !d.c.useSigstoreAttachments { - return errors.New("writing sigstore attachments is disabled by configuration") - } - - ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest) - if err != nil { - return err - } - var ociConfig imgspecv1.Image // Most fields empty by default - if ociManifest == nil { - ociManifest = manifest.OCI1FromComponents(imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Digest: "", // We will fill this in later. - Size: 0, - }, nil) - ociConfig.RootFS.Type = "layers" - } else { - logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String()) - // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. - configBlob, err := d.c.getOCIDescriptorContents(ctx, d.ref, ociManifest.Config, iolimits.MaxConfigBodySize, - none.NoCache) - if err != nil { - return err - } - if err := json.Unmarshal(configBlob, &ociConfig); err != nil { - return fmt.Errorf("parsing sigstore attachment config %s in %s: %w", ociManifest.Config.Digest.String(), - d.ref.ref.Name(), err) - } - } - - // To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it. - ociManifest.Layers = slices.Clone(ociManifest.Layers) - // We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to - // the slice in the original object (or in a newly allocated object). - for _, sig := range signatures { - mimeType := sig.UntrustedMIMEType() - payloadBlob := sig.UntrustedPayload() - annotations := sig.UntrustedAnnotations() - - alreadyOnRegistry := false - for _, layer := range ociManifest.Layers { - if layerMatchesSigstoreSignature(layer, mimeType, payloadBlob, annotations) { - logrus.Debugf("Signature with digest %s already exists on the registry", layer.Digest.String()) - alreadyOnRegistry = true - break - } - } - if alreadyOnRegistry { - continue - } - - // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads. - // That might eventually need to change if payloads grow to be not just signatures, but something - // significantly large. - sigDesc, err := d.putBlobBytesAsOCI(ctx, payloadBlob, mimeType, private.PutBlobOptions{ - Cache: none.NoCache, - IsConfig: false, - EmptyLayer: false, - LayerIndex: nil, - }) - if err != nil { - return err - } - sigDesc.Annotations = annotations - ociManifest.Layers = append(ociManifest.Layers, sigDesc) - ociConfig.RootFS.DiffIDs = append(ociConfig.RootFS.DiffIDs, sigDesc.Digest) - logrus.Debugf("Adding new signature, digest %s", sigDesc.Digest.String()) - } - - configBlob, err := json.Marshal(ociConfig) - if err != nil { - return err - } - logrus.Debugf("Uploading updated sigstore attachment config") - // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. - configDesc, err := d.putBlobBytesAsOCI(ctx, configBlob, imgspecv1.MediaTypeImageConfig, private.PutBlobOptions{ - Cache: none.NoCache, - IsConfig: true, - EmptyLayer: false, - LayerIndex: nil, - }) - if err != nil { - return err - } - ociManifest.Config = configDesc - - manifestBlob, err := ociManifest.Serialize() - if err != nil { - return err - } - attachmentTag, err := sigstoreAttachmentTag(manifestDigest) - if err != nil { - return err - } - logrus.Debugf("Uploading sigstore attachment manifest") - return d.uploadManifest(ctx, manifestBlob, attachmentTag) -} - -func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string, - payloadBlob []byte, annotations map[string]string) bool { - if layer.MediaType != mimeType || - layer.Size != int64(len(payloadBlob)) || - // This is not quite correct, we should use the layer’s digest algorithm. - // But right now we don’t want to deal with corner cases like bad digest formats - // or unavailable algorithms; in the worst case we end up with duplicate signature - // entries. - layer.Digest.String() != digest.FromBytes(payloadBlob).String() || - !maps.Equal(layer.Annotations, annotations) { - return false - } - return true -} - -// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate -// OCI descriptor. -func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) { - blobDigest := digest.FromBytes(contents) - info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents), - types.BlobInfo{ - Digest: blobDigest, - Size: int64(len(contents)), - MediaType: mimeType, - }, options) - if err != nil { - return imgspecv1.Descriptor{}, fmt.Errorf("writing blob %s: %w", blobDigest.String(), err) - } - return imgspecv1.Descriptor{ - MediaType: mimeType, - Digest: info.Digest, - Size: info.Size, - }, nil -} - -// deleteOneSignature deletes a signature from sigURL, if it exists. -// If it successfully determines that the signature does not exist, returns (true, nil) -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (c *dockerClient) deleteOneSignature(sigURL *url.URL) (missing bool, err error) { - switch sigURL.Scheme { - case "file": - logrus.Debugf("Deleting %s", sigURL.Path) - err := os.Remove(sigURL.Path) - if err != nil && os.IsNotExist(err) { - return true, nil - } - return false, err - - case "http", "https": - return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted()) - default: - return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", sigURL.Redacted()) - } -} - -// putSignaturesToAPIExtension implements PutSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, -// for a manifest with manifestDigest. -func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures []signature.Signature, manifestDigest digest.Digest) error { - // Skip dealing with the manifest digest, or reading the old state, if not necessary. - if len(signatures) == 0 { - return nil - } - - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures, - // but the X-Registry-Supports-Signatures API extension does not support that yet. - - existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest) - if err != nil { - return err - } - existingSigNames := set.New[string]() - for _, sig := range existingSignatures.Signatures { - existingSigNames.Add(sig.Name) - } - - for _, newSigWithFormat := range signatures { - newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) - if !ok { - return signature.UnsupportedFormatError(newSigWithFormat) - } - newSig := newSigSimple.UntrustedSignature() - - if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool { - return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) - }) { - continue - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return fmt.Errorf("generating random signature len %d: %w", n, err) - } - signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes) - if !existingSigNames.Contains(signatureName) { - break - } - } - sig := extensionSignature{ - Version: extensionSignatureSchemaVersion, - Name: signatureName, - Type: extensionSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - if err != nil { - return err - } - - // manifestDigest is known to be valid because it was not rejected by getExtensionsSignatures above. - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String()) - res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) - return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res)) - } - } - - return nil -} - -// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before CommitWithOptions() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { - return nil -} diff --git a/vendor/go.podman.io/image/v5/docker/docker_image_src.go b/vendor/go.podman.io/image/v5/docker/docker_image_src.go deleted file mode 100644 index 553dddeef..000000000 --- a/vendor/go.podman.io/image/v5/docker/docker_image_src.go +++ /dev/null @@ -1,863 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "mime" - "mime/multipart" - "net/http" - "net/url" - "os" - "os/exec" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/imagesource/impl" - "go.podman.io/image/v5/internal/imagesource/stubs" - "go.podman.io/image/v5/internal/iolimits" - "go.podman.io/image/v5/internal/private" - "go.podman.io/image/v5/internal/signature" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/pkg/blobinfocache/none" - "go.podman.io/image/v5/pkg/sysregistriesv2" - "go.podman.io/image/v5/types" - "go.podman.io/storage/pkg/regexp" -) - -// maxLookasideSignatures is an arbitrary limit for the total number of signatures we would try to read from a lookaside server, -// even if it were broken or malicious and it continued serving an enormous number of items. -const maxLookasideSignatures = 128 - -type dockerImageSource struct { - impl.Compat - impl.PropertyMethodsInitialize - impl.DoesNotAffectLayerInfosForCopy - stubs.ImplementsGetBlobAt - - logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest - physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest - c *dockerClient - // State - cachedManifest []byte // nil if not loaded yet - cachedManifestMIMEType string // Only valid if cachedManifest != nil -} - -// newImageSource creates a new ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -// The caller must ensure !ref.isUnknownDigest. -func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { - if ref.isUnknownDigest { - return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport()) - } - - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return nil, err - } - registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) - if err != nil { - return nil, fmt.Errorf("loading registries configuration: %w", err) - } - if registry == nil { - // No configuration was found for the provided reference, so use the - // equivalent of a default configuration. - registry = &sysregistriesv2.Registry{ - Endpoint: sysregistriesv2.Endpoint{ - Location: ref.ref.String(), - }, - Prefix: ref.ref.String(), - } - } - - // Check all endpoints for the manifest availability. If we find one that does - // contain the image, it will be used for all future pull actions. Always try the - // non-mirror original location last; this both transparently handles the case - // of no mirrors configured, and ensures we return the error encountered when - // accessing the upstream location if all endpoints fail. - pullSources, err := registry.PullSourcesFromReference(ref.ref) - if err != nil { - return nil, err - } - type attempt struct { - ref reference.Named - err error - } - attempts := []attempt{} - for _, pullSource := range pullSources { - if sys != nil && sys.DockerLogMirrorChoice { - logrus.Infof("Trying to access %q", pullSource.Reference) - } else { - logrus.Debugf("Trying to access %q", pullSource.Reference) - } - s, err := newImageSourceAttempt(ctx, sys, ref, pullSource, registryConfig) - if err == nil { - return s, nil - } - logrus.Debugf("Accessing %q failed: %v", pullSource.Reference, err) - attempts = append(attempts, attempt{ - ref: pullSource.Reference, - err: err, - }) - } - switch len(attempts) { - case 0: - return nil, errors.New("Internal error: newImageSource returned without trying any endpoint") - case 1: - return nil, attempts[0].err // If no mirrors are used, perfectly preserve the error type and add no noise. - default: - // Don’t just build a string, try to preserve the typed error. - primary := &attempts[len(attempts)-1] - extras := []string{} - for _, attempt := range attempts[:len(attempts)-1] { - // This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use. - // The paired [] at least have some chance of being unambiguous. - extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err)) - } - return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err) - } -} - -// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource. -// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable. -// The caller must call .Close() on the returned ImageSource. -func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource, - registryConfig *registryConfiguration) (*dockerImageSource, error) { - physicalRef, err := newReference(pullSource.Reference, false) - if err != nil { - return nil, err - } - - endpointSys := sys - // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. - if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) { - copy := *endpointSys - copy.DockerAuthConfig = nil - copy.DockerBearerRegistryToken = "" - endpointSys = © - } - - client, err := newDockerClientFromRef(endpointSys, physicalRef, registryConfig, false, "pull") - if err != nil { - return nil, err - } - client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure - - s := &dockerImageSource{ - PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ - HasThreadSafeGetBlob: true, - }), - - logicalRef: logicalRef, - physicalRef: physicalRef, - c: client, - } - s.Compat = impl.AddCompat(s) - - if err := s.ensureManifestIsLoaded(ctx); err != nil { - client.Close() - return nil, err - } - - if h, err := sysregistriesv2.AdditionalLayerStoreAuthHelper(endpointSys); err == nil && h != "" { - acf := map[string]struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - IdentityToken string `json:"identityToken,omitempty"` - }{ - physicalRef.ref.String(): { - Username: client.auth.Username, - Password: client.auth.Password, - IdentityToken: client.auth.IdentityToken, - }, - } - acfD, err := json.Marshal(acf) - if err != nil { - logrus.Warnf("failed to marshal auth config: %v", err) - } else { - cmd := exec.Command(h) - cmd.Stdin = bytes.NewReader(acfD) - if err := cmd.Run(); err != nil { - var stderr string - if ee, ok := err.(*exec.ExitError); ok { - stderr = string(ee.Stderr) - } - logrus.Warnf("Failed to call additional-layer-store-auth-helper (stderr:%s): %v", stderr, err) - } - } - } - return s, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dockerImageSource) Reference() types.ImageReference { - return s.logicalRef -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dockerImageSource) Close() error { - return s.c.Close() -} - -// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) -// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. -func simplifyContentType(contentType string) string { - if contentType == "" { - return contentType - } - mimeType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return "" - } - return mimeType -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters - return nil, "", err - } - return s.fetchManifest(ctx, instanceDigest.String()) - } - err := s.ensureManifestIsLoaded(ctx) - if err != nil { - return nil, "", err - } - return s.cachedManifest, s.cachedManifestMIMEType, nil -} - -// fetchManifest fetches a manifest for tagOrDigest. -// The caller is responsible for ensuring tagOrDigest uses the expected format. -func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { - return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest) -} - -// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType -// -// ImageSource implementations are not required or expected to do any caching, -// but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) -// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious -// signature verification failures when pulling while a tag is being updated. -func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { - if s.cachedManifest != nil { - return nil - } - - reference, err := s.physicalRef.tagOrDigest() - if err != nil { - return err - } - - manblob, mt, err := s.fetchManifest(ctx, reference) - if err != nil { - return err - } - // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. - s.cachedManifest = manblob - s.cachedManifestMIMEType = mt - return nil -} - -// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks -func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) { - defer close(streams) - defer close(errs) - currentOffset := uint64(0) - - body = makeBufferedNetworkReader(body, 64, 16384) - defer body.Close() - for _, c := range chunks { - if c.Offset != currentOffset { - if c.Offset < currentOffset { - errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset) - break - } - toSkip := c.Offset - currentOffset - if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil { - errs <- err - break - } - currentOffset += toSkip - } - var reader io.Reader - if c.Length == math.MaxUint64 { - reader = body - } else { - reader = io.LimitReader(body, int64(c.Length)) - } - s := signalCloseReader{ - closed: make(chan struct{}), - stream: io.NopCloser(reader), - consumeStream: true, - } - streams <- s - - // Wait until the stream is closed before going to the next chunk - <-s.closed - currentOffset += c.Length - } -} - -// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. -func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) { - defer close(streams) - defer close(errs) - if !strings.HasPrefix(mediaType, "multipart/") { - streams <- body - return - } - boundary, found := params["boundary"] - if !found { - errs <- errors.New("could not find boundary") - body.Close() - return - } - buffered := makeBufferedNetworkReader(body, 64, 16384) - defer buffered.Close() - mr := multipart.NewReader(buffered, boundary) - parts := 0 - for { - p, err := mr.NextPart() - if err != nil { - if err != io.EOF { - errs <- err - } - if parts != len(chunks) { - errs <- errors.New("invalid number of chunks returned by the server") - } - return - } - if parts >= len(chunks) { - errs <- errors.New("too many parts returned by the server") - break - } - s := signalCloseReader{ - closed: make(chan struct{}), - stream: p, - } - streams <- s - // NextPart() cannot be called while the current part - // is being read, so wait until it is closed - <-s.closed - parts++ - } -} - -var multipartByteRangesRe = regexp.Delayed("multipart/byteranges; boundary=([A-Za-z-0-9:]+)") - -func parseMediaType(contentType string) (string, map[string]string, error) { - mediaType, params, err := mime.ParseMediaType(contentType) - if err != nil { - if err == mime.ErrInvalidMediaParameter { - // CloudFront returns an invalid MIME type, that contains an unquoted ":" in the boundary - // param, let's handle it here. - matches := multipartByteRangesRe.FindStringSubmatch(contentType) - if len(matches) == 2 { - mediaType = "multipart/byteranges" - params = map[string]string{ - "boundary": matches[1], - } - err = nil - } - } - if err != nil { - return "", nil, err - } - } - return mediaType, params, err -} - -// GetBlobAt returns a sequential channel of readers that contain data for the requested -// blob chunks, and a channel that might get a single error value. -// The specified chunks must be not overlapping and sorted by their offset. -// The readers must be fully consumed, in the order they are returned, before blocking -// to read the next chunk. -// If the Length for the last chunk is set to math.MaxUint64, then it -// fully fetches the remaining data from the offset to the end of the blob. -func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - headers := make(map[string][]string) - - rangeVals := make([]string, 0, len(chunks)) - lastFound := false - for _, c := range chunks { - if lastFound { - return nil, nil, fmt.Errorf("internal error: another chunk requested after an util-EOF chunk") - } - // If the Length is set to -1, then request anything after the specified offset. - if c.Length == math.MaxUint64 { - lastFound = true - rangeVals = append(rangeVals, fmt.Sprintf("%d-", c.Offset)) - } else { - rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) - } - } - - headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))} - - if len(info.URLs) != 0 { - return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt") - } - - if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters - return nil, nil, err - } - path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) - if err != nil { - return nil, nil, err - } - - switch res.StatusCode { - case http.StatusOK: - // if the server replied with a 200 status code, convert the full body response to a series of - // streams as it would have been done with 206. - streams := make(chan io.ReadCloser) - errs := make(chan error) - go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks) - return streams, errs, nil - case http.StatusPartialContent: - mediaType, params, err := parseMediaType(res.Header.Get("Content-Type")) - if err != nil { - return nil, nil, err - } - - streams := make(chan io.ReadCloser) - errs := make(chan error) - - go handle206Response(streams, errs, res.Body, chunks, mediaType, params) - return streams, errs, nil - case http.StatusBadRequest: - res.Body.Close() - return nil, nil, private.BadPartialRequestError{Status: res.Status} - default: - err := registryHTTPResponseToError(res) - res.Body.Close() - return nil, nil, fmt.Errorf("fetching partial blob: %w", err) - } -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - return s.c.getBlob(ctx, s.physicalRef, info, cache) -} - -// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { - if err := s.c.detectProperties(ctx); err != nil { - return nil, err - } - var res []signature.Signature - switch { - case s.c.supportsSignatures: - if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil { - return nil, err - } - case s.c.signatureBase != nil: - if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil { - return nil, err - } - default: - return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") - } - - if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil { - return nil, err - } - return res, nil -} - -// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, -// or finally, from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { - if instanceDigest != nil { - return *instanceDigest, nil - } - if digested, ok := s.physicalRef.ref.(reference.Digested); ok { - d := digested.Digest() - if d.Algorithm() == digest.Canonical { - return d, nil - } - } - if err := s.ensureManifestIsLoaded(ctx); err != nil { - return "", err - } - return manifest.Digest(s.cachedManifest) -} - -// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, -// which is not nil, storing the signatures to *dest. -// On error, the contents of *dest are undefined. -func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return err - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i := 0; ; i++ { - if i >= maxLookasideSignatures { - return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) - } - - sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) - if err != nil { - return err - } - signature, missing, err := s.getOneSignature(ctx, sigURL) - if err != nil { - return err - } - if missing { - break - } - *dest = append(*dest, signature) - } - return nil -} - -// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil) -// If it successfully determines that the signature does not exist, returns (nil, true, nil). -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL) (signature.Signature, bool, error) { - switch sigURL.Scheme { - case "file": - logrus.Debugf("Reading %s", sigURL.Path) - sigBlob, err := os.ReadFile(sigURL.Path) - if err != nil { - if os.IsNotExist(err) { - return nil, true, nil - } - return nil, false, err - } - sig, err := signature.FromBlob(sigBlob) - if err != nil { - return nil, false, fmt.Errorf("parsing signature %q: %w", sigURL.Path, err) - } - return sig, false, nil - - case "http", "https": - logrus.Debugf("GET %s", sigURL.Redacted()) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL.String(), nil) - if err != nil { - return nil, false, err - } - res, err := s.c.client.Do(req) - if err != nil { - return nil, false, err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - logrus.Debugf("... got status 404, as expected = end of signatures") - return nil, true, nil - } else if res.StatusCode != http.StatusOK { - return nil, false, fmt.Errorf("reading signature from %s: %w", sigURL.Redacted(), newUnexpectedHTTPStatusError(res)) - } - - contentType := res.Header.Get("Content-Type") - if mimeType := simplifyContentType(contentType); mimeType == "text/html" { - logrus.Warnf("Signature %q has Content-Type %q, unexpected for a signature", sigURL.Redacted(), contentType) - // Don’t immediately fail; the lookaside spec does not place any requirements on Content-Type. - // If the content really is HTML, it’s going to fail in signature.FromBlob. - } - - sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) - if err != nil { - return nil, false, err - } - sig, err := signature.FromBlob(sigBlob) - if err != nil { - return nil, false, fmt.Errorf("parsing signature %s: %w", sigURL.Redacted(), err) - } - return sig, false, nil - - default: - return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", sigURL.Redacted()) - } -} - -// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, -// storing the signatures to *dest. -// On error, the contents of *dest are undefined. -func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return err - } - - parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest) - if err != nil { - return err - } - - for _, sig := range parsedBody.Signatures { - if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - *dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content)) - } - } - return nil -} - -// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention, -// storing the signatures to *dest. -// On error, the contents of *dest are undefined. -func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { - if !s.c.useSigstoreAttachments { - logrus.Debugf("Not looking for sigstore attachments: disabled by configuration") - return nil - } - - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return err - } - - ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest) - if err != nil { - return err - } - if ociManifest == nil { - return nil - } - - logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers)) - for layerIndex, layer := range ociManifest.Layers { - // Note that this copies all kinds of attachments: attestations, and whatever else is there, - // not just signatures. We leave the signature consumers to decide based on the MIME type. - logrus.Debugf("Fetching sigstore attachment %d/%d: %s", layerIndex+1, len(ociManifest.Layers), layer.Digest.String()) - // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads. - // That might eventually need to change if payloads grow to be not just signatures, but something - // significantly large. - payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize, - none.NoCache) - if err != nil { - return err - } - *dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) - } - return nil -} - -// deleteImage deletes the named image from the registry, if supported. -func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { - if ref.isUnknownDigest { - return fmt.Errorf("Docker reference without a tag or digest cannot be deleted") - } - - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return err - } - // docker/distribution does not document what action should be used for deleting images. - // - // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. - // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. - // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). - // - // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". - c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "*") - if err != nil { - return err - } - defer c.Close() - - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - refTail, err := ref.tagOrDigest() - if err != nil { - return err - } - getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) - get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil) - if err != nil { - return err - } - defer get.Body.Close() - switch get.StatusCode { - case http.StatusOK: - case http.StatusNotFound: - return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) - default: - return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(get)) - } - manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize) - if err != nil { - return err - } - - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return fmt.Errorf("computing manifest digest: %w", err) - } - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest) - - // When retrieving the digest from a registry >= 2.3 use the following header: - // "Accept": "application/vnd.docker.distribution.manifest.v2+json" - delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil) - if err != nil { - return err - } - defer delete.Body.Close() - if delete.StatusCode != http.StatusAccepted { - return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(delete)) - } - - for i := 0; ; i++ { - sigURL, err := lookasideStorageURL(c.signatureBase, manifestDigest, i) - if err != nil { - return err - } - missing, err := c.deleteOneSignature(sigURL) - if err != nil { - return err - } - if missing { - break - } - } - - return nil -} - -type bufferedNetworkReaderBuffer struct { - data []byte - len int - consumed int - err error -} - -type bufferedNetworkReader struct { - stream io.ReadCloser - emptyBuffer chan *bufferedNetworkReaderBuffer - readyBuffer chan *bufferedNetworkReaderBuffer - terminate chan bool - current *bufferedNetworkReaderBuffer - mutex sync.Mutex - gotEOF bool -} - -// handleBufferedNetworkReader runs in a goroutine -func handleBufferedNetworkReader(br *bufferedNetworkReader) { - defer close(br.readyBuffer) - for { - select { - case b := <-br.emptyBuffer: - b.len, b.err = br.stream.Read(b.data) - br.readyBuffer <- b - if b.err != nil { - return - } - case <-br.terminate: - return - } - } -} - -func (n *bufferedNetworkReader) Close() error { - close(n.terminate) - close(n.emptyBuffer) - return n.stream.Close() -} - -func (n *bufferedNetworkReader) read(p []byte) (int, error) { - if n.current != nil { - copied := copy(p, n.current.data[n.current.consumed:n.current.len]) - n.current.consumed += copied - if n.current.consumed == n.current.len { - n.emptyBuffer <- n.current - n.current = nil - } - if copied > 0 { - return copied, nil - } - } - if n.gotEOF { - return 0, io.EOF - } - - var b *bufferedNetworkReaderBuffer - - select { - case b = <-n.readyBuffer: - if b.err != nil { - if b.err != io.EOF { - return b.len, b.err - } - n.gotEOF = true - } - b.consumed = 0 - n.current = b - return n.read(p) - case <-n.terminate: - return 0, io.EOF - } -} - -func (n *bufferedNetworkReader) Read(p []byte) (int, error) { - n.mutex.Lock() - defer n.mutex.Unlock() - - return n.read(p) -} - -func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader { - br := bufferedNetworkReader{ - stream: stream, - emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), - readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), - terminate: make(chan bool), - } - - go func() { - handleBufferedNetworkReader(&br) - }() - - for range nBuffers { - b := bufferedNetworkReaderBuffer{ - data: make([]byte, bufferSize), - } - br.emptyBuffer <- &b - } - - return &br -} - -type signalCloseReader struct { - closed chan struct{} - stream io.ReadCloser - consumeStream bool -} - -func (s signalCloseReader) Read(p []byte) (int, error) { - return s.stream.Read(p) -} - -func (s signalCloseReader) Close() error { - defer close(s.closed) - if s.consumeStream { - if _, err := io.Copy(io.Discard, s.stream); err != nil { - s.stream.Close() - return err - } - } - return s.stream.Close() -} diff --git a/vendor/go.podman.io/image/v5/docker/docker_transport.go b/vendor/go.podman.io/image/v5/docker/docker_transport.go deleted file mode 100644 index 5831dc3ce..000000000 --- a/vendor/go.podman.io/image/v5/docker/docker_transport.go +++ /dev/null @@ -1,211 +0,0 @@ -package docker - -import ( - "context" - "errors" - "fmt" - "strings" - - "go.podman.io/image/v5/docker/policyconfiguration" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/transports" - "go.podman.io/image/v5/types" -) - -// UnknownDigestSuffix can be appended to a reference when the caller -// wants to push an image without a tag or digest. -// NewReferenceUnknownDigest() is called when this const is detected. -const UnknownDigestSuffix = "@@unknown-digest@@" - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for container registry-hosted images. -var Transport = dockerTransport{} - -type dockerTransport struct{} - -func (t dockerTransport) Name() string { - return "docker" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// dockerReference is an ImageReference for Docker images. -type dockerReference struct { - ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true - isUnknownDigest bool -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - refString, ok := strings.CutPrefix(refString, "//") - if !ok { - return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) - } - refString, unknownDigest := strings.CutSuffix(refString, UnknownDigestSuffix) - ref, err := reference.ParseNormalizedNamed(refString) - if err != nil { - return nil, err - } - - if unknownDigest { - if !reference.IsNameOnly(ref) { - return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix) - } - return NewReferenceUnknownDigest(ref) - } - - ref = reference.TagNameOnly(ref) - return NewReference(ref) -} - -// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). -func NewReference(ref reference.Named) (types.ImageReference, error) { - return newReference(ref, false) -} - -// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting -// a tag on the registry. The reference must satisfy reference.IsNameOnly() -func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) { - return newReference(ref, true) -} - -// newReference returns a dockerReference for a named reference. -func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) { - if reference.IsNameOnly(ref) && !unknownDigest { - return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref)) - } - if !reference.IsNameOnly(ref) && unknownDigest { - return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // The docker/distribution API does not really support that (we can’t ask for an image with a specific - // tag and digest), so fail. This MAY be accepted in the future. - // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop - // the tag or the digest first?) - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return dockerReference{}, errors.New("Docker references with both a tag and digest are currently not supported") - } - - return dockerReference{ - ref: ref, - isUnknownDigest: unknownDigest, - }, nil -} - -func (ref dockerReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dockerReference) StringWithinTransport() string { - famString := "//" + reference.FamiliarString(ref.ref) - if ref.isUnknownDigest { - return famString + UnknownDigestSuffix - } - return famString -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dockerReference) DockerReference() reference.Named { - return ref.ref -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dockerReference) PolicyConfigurationIdentity() string { - if ref.isUnknownDigest { - return ref.ref.Name() - } - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dockerReference) PolicyConfigurationNamespaces() []string { - namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref) - if ref.isUnknownDigest { - if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() { - namespaces = namespaces[1:] - } - } - return namespaces -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - return newImage(ctx, sys, ref) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return deleteImage(ctx, sys, ref) -} - -// tagOrDigest returns a tag or digest from the reference. -func (ref dockerReference) tagOrDigest() (string, error) { - if ref, ok := ref.ref.(reference.Canonical); ok { - return ref.Digest().String(), nil - } - if ref, ok := ref.ref.(reference.NamedTagged); ok { - return ref.Tag(), nil - } - - if ref.isUnknownDigest { - return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref)) - } - // This should not happen, NewReference above refuses reference.IsNameOnly values. - return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) -} diff --git a/vendor/go.podman.io/image/v5/docker/errors.go b/vendor/go.podman.io/image/v5/docker/errors.go deleted file mode 100644 index 1ed40b87f..000000000 --- a/vendor/go.podman.io/image/v5/docker/errors.go +++ /dev/null @@ -1,102 +0,0 @@ -package docker - -import ( - "errors" - "fmt" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/sirupsen/logrus" -) - -var ( - // ErrV1NotSupported is returned when we're trying to talk to a - // docker V1 registry. - // Deprecated: The V1 container registry detection is no longer performed, so this error is never returned. - ErrV1NotSupported = errors.New("can't talk to a V1 container registry") - // ErrTooManyRequests is returned when the status code returned is 429 - ErrTooManyRequests = errors.New("too many requests to registry") -) - -// ErrUnauthorizedForCredentials is returned when the status code returned is 401 -type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. - Err error -} - -func (e ErrUnauthorizedForCredentials) Error() string { - return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error()) -} - -// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns -// nil if the response is not considered an error. -// NOTE: Almost all callers in this package should use registryHTTPResponseToError instead. -func httpResponseToError(res *http.Response, context string) error { - switch res.StatusCode { - case http.StatusOK: - return nil - case http.StatusTooManyRequests: - return ErrTooManyRequests - case http.StatusUnauthorized: - err := registryHTTPResponseToError(res) - return ErrUnauthorizedForCredentials{Err: err} - default: - if context == "" { - return newUnexpectedHTTPStatusError(res) - } - return fmt.Errorf("%s: %w", context, newUnexpectedHTTPStatusError(res)) - } -} - -// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution -// registry. -// -// WARNING: The OCI distribution spec says -// “A `4XX` response code from the registry MAY return a body in any format.”; but if it is -// JSON, it MUST use the errcode.Error structure. -// So, callers should primarily decide based on HTTP StatusCode, not based on error type here. -func registryHTTPResponseToError(res *http.Response) error { - err := handleErrorResponse(res) - // len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is. - if errs, ok := err.(errcode.Errors); ok && len(errs) > 0 { - // The docker/distribution registry implementation almost never returns - // more than one error in the HTTP body; it seems there is only one - // possible instance, where the second error reports a cleanup failure - // we don't really care about. - // - // The only _common_ case where a multi-element error is returned is - // created by the handleErrorResponse parser when OAuth authorization fails: - // the first element contains errors from a WWW-Authenticate header, the second - // element contains errors from the response body. - // - // In that case the first one is currently _slightly_ more informative (ErrorCodeUnauthorized - // for invalid tokens, ErrorCodeDenied for permission denied with a valid token - // for the first error, vs. ErrorCodeUnauthorized for both cases for the second error.) - // - // Also, docker/docker similarly only logs the other errors and returns the - // first one. - if len(errs) > 1 { - logrus.Debugf("Discarding non-primary errors:") - for _, err := range errs[1:] { - logrus.Debugf(" %s", err.Error()) - } - } - err = errs[0] - } - switch e := err.(type) { - case *unexpectedHTTPResponseError: - response := string(e.Response) - if len(response) > 50 { - response = response[:50] + "..." - } - // %.0w makes e visible to error.Unwrap() without including any text - err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e) - case errcode.Error: - // e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually - // rather redundant. So reword it without using e.Code.Error() if e.Message is the default. - if e.Message == e.Code.Message() { - // %.0w makes e visible to error.Unwrap() without including any text - err = fmt.Errorf("%s%.0w", e.Message, e) - } - } - return err -} diff --git a/vendor/go.podman.io/image/v5/docker/paths_common.go b/vendor/go.podman.io/image/v5/docker/paths_common.go deleted file mode 100644 index d9993630b..000000000 --- a/vendor/go.podman.io/image/v5/docker/paths_common.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !freebsd - -package docker - -const etcDir = "/etc" diff --git a/vendor/go.podman.io/image/v5/docker/paths_freebsd.go b/vendor/go.podman.io/image/v5/docker/paths_freebsd.go deleted file mode 100644 index 8f0f2eee8..000000000 --- a/vendor/go.podman.io/image/v5/docker/paths_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build freebsd - -package docker - -const etcDir = "/usr/local/etc" diff --git a/vendor/go.podman.io/image/v5/docker/policyconfiguration/naming.go b/vendor/go.podman.io/image/v5/docker/policyconfiguration/naming.go deleted file mode 100644 index ddb0bce12..000000000 --- a/vendor/go.podman.io/image/v5/docker/policyconfiguration/naming.go +++ /dev/null @@ -1,78 +0,0 @@ -package policyconfiguration - -import ( - "errors" - "fmt" - "strings" - - "go.podman.io/image/v5/docker/reference" -) - -// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceIdentity(ref reference.Named) (string, error) { - res := ref.Name() - tagged, isTagged := ref.(reference.NamedTagged) - digested, isDigested := ref.(reference.Canonical) - switch { - case isTagged && isDigested: // Note that this CAN actually happen. - return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) - case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() - return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) - case isTagged: - res = res + ":" + tagged.Tag() - case isDigested: - res = res + "@" + digested.Digest().String() - default: // Coverage: The above was supposed to be exhaustive. - return "", errors.New("Internal inconsistency, unexpected default branch") - } - return res, nil -} - -// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceNamespaces(ref reference.Named) []string { - // Look for a match of the repository, and then of the possible parent - // namespaces. Note that this only happens on the expanded host names - // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", - // then in its parent "docker.io/library"; in none of "busybox", - // un-namespaced "library" nor in "" supposedly implicitly representing "library/". - // - // ref.Name() == ref.Domain() + "/" + ref.Path(), so the last - // iteration matches the host name (for any namespace). - res := []string{} - name := ref.Name() - for { - res = append(res, name) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - - // Strip port number if any, before appending to res slice. - // Currently, the most compatible behavior is to return - // example.com:8443/ns, example.com:8443, *.com. - // If a port number is not specified, the expected behavior would be - // example.com/ns, example.com, *.com - portNumColon := strings.Index(name, ":") - if portNumColon != -1 { - name = name[:portNumColon] - } - - // Append wildcarded domains to res slice - for { - firstDot := strings.Index(name, ".") - if firstDot == -1 { - break - } - name = name[firstDot+1:] - - res = append(res, "*."+name) - } - return res -} diff --git a/vendor/go.podman.io/image/v5/docker/reference/README.md b/vendor/go.podman.io/image/v5/docker/reference/README.md deleted file mode 100644 index 3c4d74eb4..000000000 --- a/vendor/go.podman.io/image/v5/docker/reference/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, -except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/go.podman.io/image/v5/docker/reference/helpers.go b/vendor/go.podman.io/image/v5/docker/reference/helpers.go deleted file mode 100644 index 978df7eab..000000000 --- a/vendor/go.podman.io/image/v5/docker/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/go.podman.io/image/v5/docker/reference/normalize.go b/vendor/go.podman.io/image/v5/docker/reference/normalize.go deleted file mode 100644 index d3f47d210..000000000 --- a/vendor/go.podman.io/image/v5/docker/reference/normalize.go +++ /dev/null @@ -1,181 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/go.podman.io/image/v5/docker/reference/reference.go b/vendor/go.podman.io/image/v5/docker/reference/reference.go deleted file mode 100644 index 6c5484c06..000000000 --- a/vendor/go.podman.io/image/v5/docker/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alphanumeric [separator alphanumeric]* -// alphanumeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// Deprecated: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/go.podman.io/image/v5/docker/reference/regexp-additions.go b/vendor/go.podman.io/image/v5/docker/reference/regexp-additions.go deleted file mode 100644 index 7b15871f7..000000000 --- a/vendor/go.podman.io/image/v5/docker/reference/regexp-additions.go +++ /dev/null @@ -1,6 +0,0 @@ -package reference - -// Return true if the specified string fully matches `IdentifierRegexp`. -func IsFullIdentifier(s string) bool { - return anchoredIdentifierRegexp.MatchString(s) -} diff --git a/vendor/go.podman.io/image/v5/docker/reference/regexp.go b/vendor/go.podman.io/image/v5/docker/reference/regexp.go deleted file mode 100644 index db656fe6a..000000000 --- a/vendor/go.podman.io/image/v5/docker/reference/regexp.go +++ /dev/null @@ -1,156 +0,0 @@ -package reference - -import ( - "regexp" - "strings" - - storageRegexp "go.podman.io/storage/pkg/regexp" -) - -const ( - // alphaNumeric defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumeric = `[a-z0-9]+` - - // separator defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. Repeated dashes and underscores are intentionally treated - // differently. In order to support valid hostnames as name components, - // supporting repeated dash was added. Additionally double underscore is - // now allowed as a separator to loosen the restriction for previously - // supported names. - separator = `(?:[._]|__|[-]*)` - - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` - - // The string counterpart for TagRegexp. - tag = `[\w][\w.-]{0,127}` - - // The string counterpart for DigestRegexp. - digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` - - // The string counterpart for IdentifierRegexp. - identifier = `([a-f0-9]{64})` - - // The string counterpart for ShortIdentifierRegexp. - shortIdentifier = `([a-f0-9]{6,64})` -) - -var ( - // nameComponent restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponent = expression( - alphaNumeric, - optional(repeated(separator, alphaNumeric))) - - domain = expression( - domainComponent, - optional(repeated(literal(`.`), domainComponent)), - optional(literal(`:`), `[0-9]+`)) - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = re(domain) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = re(tag) - - anchoredTag = anchored(tag) - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = storageRegexp.Delayed(anchoredTag) - - // DigestRegexp matches valid digests. - DigestRegexp = re(digestPat) - - anchoredDigest = anchored(digestPat) - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest) - - namePat = expression( - optional(domain, literal(`/`)), - nameComponent, - optional(repeated(literal(`/`), nameComponent))) - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = re(namePat) - - anchoredName = anchored( - optional(capture(domain), literal(`/`)), - capture(nameComponent, - optional(repeated(literal(`/`), nameComponent)))) - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = storageRegexp.Delayed(anchoredName) - - referencePat = anchored(capture(namePat), - optional(literal(":"), capture(tag)), - optional(literal("@"), capture(digestPat))) - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = re(referencePat) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = re(identifier) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = re(shortIdentifier) - - anchoredIdentifier = anchored(identifier) - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier) -) - -// re compiles the string to a regular expression. -var re = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) string { - return regexp.QuoteMeta(s) -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...string) string { - return strings.Join(res, "") -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...string) string { - return group(expression(res...)) + `?` -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...string) string { - return group(expression(res...)) + `+` -} - -// group wraps the regexp in a non-capturing group. -func group(res ...string) string { - return `(?:` + expression(res...) + `)` -} - -// capture wraps the expression in a capturing group. -func capture(res ...string) string { - return `(` + expression(res...) + `)` -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...string) string { - return `^` + expression(res...) + `$` -} diff --git a/vendor/go.podman.io/image/v5/docker/registries_d.go b/vendor/go.podman.io/image/v5/docker/registries_d.go deleted file mode 100644 index 53bbb53cb..000000000 --- a/vendor/go.podman.io/image/v5/docker/registries_d.go +++ /dev/null @@ -1,303 +0,0 @@ -package docker - -import ( - "errors" - "fmt" - "io/fs" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/rootless" - "go.podman.io/image/v5/types" - "go.podman.io/storage/pkg/fileutils" - "go.podman.io/storage/pkg/homedir" - "gopkg.in/yaml.v3" -) - -// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. -// You can override this at build time with -// -ldflags '-X go.podman.io/image/v5/docker.systemRegistriesDirPath=$your_path' -var systemRegistriesDirPath = builtinRegistriesDirPath - -// builtinRegistriesDirPath is the path to registries.d. -// DO NOT change this, instead see systemRegistriesDirPath above. -const builtinRegistriesDirPath = etcDir + "/containers/registries.d" - -// userRegistriesDirPath is the path to the per user registries.d. -var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") - -// defaultUserDockerDir is the default lookaside directory for unprivileged user -var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore") - -// defaultDockerDir is the default lookaside directory for root -var defaultDockerDir = "/var/lib/containers/sigstore" - -// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. -// NOTE: Keep this in sync with docs/registries.d.md! -type registryConfiguration struct { - DefaultDocker *registryNamespace `yaml:"default-docker"` - // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), - Docker map[string]registryNamespace `yaml:"docker"` -} - -// registryNamespace defines lookaside locations for a single namespace. -type registryNamespace struct { - Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing. - LookasideStaging string `yaml:"lookaside-staging"` // For writing only. - SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside. - SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging. - UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"` -} - -// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage. -// Users outside of this file should use SignatureStorageBaseURL and lookasideStorageURL below. -type lookasideStorageBase *url.URL - -// SignatureStorageBaseURL reads configuration to find an appropriate lookaside storage URL for ref, for write access if “write”. -// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md -// Warning: This function only exposes configuration in registries.d; -// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S). -func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) { - dr, ok := ref.(dockerReference) - if !ok { - return nil, errors.New("ref must be a dockerReference") - } - config, err := loadRegistryConfiguration(sys) - if err != nil { - return nil, err - } - - return config.lookasideStorageBaseURL(dr, write) -} - -// loadRegistryConfiguration returns a registryConfiguration appropriate for sys. -func loadRegistryConfiguration(sys *types.SystemContext) (*registryConfiguration, error) { - dirPath := registriesDirPath(sys) - logrus.Debugf(`Using registries.d directory %s`, dirPath) - return loadAndMergeConfig(dirPath) -} - -// registriesDirPath returns a path to registries.d -func registriesDirPath(sys *types.SystemContext) string { - return registriesDirPathWithHomeDir(sys, homedir.Get()) -} - -// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath, -// it exists only to allow testing it with an artificial home directory. -func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string { - if sys != nil && sys.RegistriesDirPath != "" { - return sys.RegistriesDirPath - } - userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) - if err := fileutils.Exists(userRegistriesDirPath); err == nil { - return userRegistriesDirPath - } - if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) - } - - return systemRegistriesDirPath -} - -// loadAndMergeConfig loads configuration files in dirPath -// FIXME: Probably rename to loadRegistryConfigurationForPath -func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { - mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} - dockerDefaultMergedFrom := "" - nsMergedFrom := map[string]string{} - - dir, err := os.Open(dirPath) - if err != nil { - if os.IsNotExist(err) { - return &mergedConfig, nil - } - return nil, err - } - configNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - for _, configName := range configNames { - if !strings.HasSuffix(configName, ".yaml") { - continue - } - configPath := filepath.Join(dirPath, configName) - configBytes, err := os.ReadFile(configPath) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - // file must have been removed between the directory listing - // and the open call, ignore that as it is a expected race - continue - } - return nil, err - } - - var config registryConfiguration - err = yaml.Unmarshal(configBytes, &config) - if err != nil { - return nil, fmt.Errorf("parsing %s: %w", configPath, err) - } - - if config.DefaultDocker != nil { - if mergedConfig.DefaultDocker != nil { - return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in %q and %q`, - dockerDefaultMergedFrom, configPath) - } - mergedConfig.DefaultDocker = config.DefaultDocker - dockerDefaultMergedFrom = configPath - } - - for nsName, nsConfig := range config.Docker { // includes config.Docker == nil - if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace %q defined both in %q and %q`, - nsName, nsMergedFrom[nsName], configPath) - } - mergedConfig.Docker[nsName] = nsConfig - nsMergedFrom[nsName] = configPath - } - } - - return &mergedConfig, nil -} - -// lookasideStorageBaseURL returns an appropriate signature storage URL for ref, for write access if “write”. -// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md -func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) { - topLevel := config.signatureTopLevel(dr, write) - var baseURL *url.URL - if topLevel != "" { - u, err := url.Parse(topLevel) - if err != nil { - return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err) - } - baseURL = u - } else { - // returns default directory if no lookaside specified in configuration file - baseURL = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID()) - logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), baseURL.Redacted()) - } - // NOTE: Keep this in sync with docs/signature-protocols.md! - // FIXME? Restrict to explicitly supported schemes? - repo := reference.Path(dr.ref) // Note that this is without a tag or digest. - if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references - return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String()) - } - baseURL.Path = baseURL.Path + "/" + repo - return baseURL, nil -} - -// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid -func builtinDefaultLookasideStorageDir(euid int) *url.URL { - if euid != 0 { - return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)} - } - return &url.URL{Scheme: "file", Path: defaultDockerDir} -} - -// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. -// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured. -func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { - if config.Docker != nil { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity) - if ret := ns.signatureTopLevel(write); ret != "" { - return ret - } - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name) - if ret := ns.signatureTopLevel(write); ret != "" { - return ret - } - } - } - } - // Look for a default location - if config.DefaultDocker != nil { - logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`) - if ret := config.DefaultDocker.signatureTopLevel(write); ret != "" { - return ret - } - } - return "" -} - -// config.useSigstoreAttachments returns whether we should look for and write sigstore attachments. -// for ref. -func (config *registryConfiguration) useSigstoreAttachments(ref dockerReference) bool { - if config.Docker != nil { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, identity) - if ns.UseSigstoreAttachments != nil { - return *ns.UseSigstoreAttachments - } - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, name) - if ns.UseSigstoreAttachments != nil { - return *ns.UseSigstoreAttachments - } - } - } - } - // Look for a default location - if config.DefaultDocker != nil { - logrus.Debugf(` Sigstore attachments: using "default-docker" configuration`) - if config.DefaultDocker.UseSigstoreAttachments != nil { - return *config.DefaultDocker.UseSigstoreAttachments - } - } - return false -} - -// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. -// or "" if nothing has been configured. -func (ns registryNamespace) signatureTopLevel(write bool) string { - if write { - if ns.LookasideStaging != "" { - logrus.Debugf(` Using "lookaside-staging" %s`, ns.LookasideStaging) - return ns.LookasideStaging - } - if ns.SigStoreStaging != "" { - logrus.Debugf(` Using "sigstore-staging" %s`, ns.SigStoreStaging) - return ns.SigStoreStaging - } - } - if ns.Lookaside != "" { - logrus.Debugf(` Using "lookaside" %s`, ns.Lookaside) - return ns.Lookaside - } - if ns.SigStore != "" { - logrus.Debugf(` Using "sigstore" %s`, ns.SigStore) - return ns.SigStore - } - return "" -} - -// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. -// base is not nil from the caller -// NOTE: Keep this in sync with docs/signature-protocols.md! -func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) { - if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly. - return nil, err - } - sigURL := *base - sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1) - return &sigURL, nil -} diff --git a/vendor/go.podman.io/image/v5/docker/wwwauthenticate.go b/vendor/go.podman.io/image/v5/docker/wwwauthenticate.go deleted file mode 100644 index f5fed07b8..000000000 --- a/vendor/go.podman.io/image/v5/docker/wwwauthenticate.go +++ /dev/null @@ -1,175 +0,0 @@ -package docker - -// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. - -import ( - "fmt" - "iter" - "net/http" - "strings" -) - -// challenge carries information from a WWW-Authenticate response header. -// See RFC 7235. -type challenge struct { - // Scheme is the auth-scheme according to RFC 7235 - Scheme string - - // Parameters are the auth-params according to RFC 7235 - Parameters map[string]string -} - -// Octet types from RFC 7230. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -func iterateAuthHeader(header http.Header) iter.Seq[challenge] { - return func(yield func(challenge) bool) { - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - if !yield(challenge{Scheme: v, Parameters: p}) { - return - } - } - } - } -} - -// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions` -func parseAuthScope(scopeStr string) (*authScope, error) { - if parts := strings.Split(scopeStr, ":"); len(parts) == 3 { - return &authScope{ - resourceType: parts[0], - remoteName: parts[1], - actions: parts[2], - }, nil - } - return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr) -} - -// NOTE: This is not a fully compliant parser per RFC 7235: -// Most notably it does not support more than one challenge within a single header -// Some of the whitespace parsing also seems noncompliant. -// But it is clearly better than what we used to have… -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i++; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/go.podman.io/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/go.podman.io/image/v5/internal/blobinfocache/blobinfocache.go deleted file mode 100644 index 5399c2961..000000000 --- a/vendor/go.podman.io/image/v5/internal/blobinfocache/blobinfocache.go +++ /dev/null @@ -1,55 +0,0 @@ -package blobinfocache - -import ( - digest "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/types" -) - -// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original -// object if it implements BlobInfoCache2, or a wrapper which discards compression information -// if it only implements BlobInfoCache. -func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 { - if bic2, ok := bic.(BlobInfoCache2); ok { - return bic2 - } - return &v1OnlyBlobInfoCache{ - BlobInfoCache: bic, - } -} - -type v1OnlyBlobInfoCache struct { - types.BlobInfoCache -} - -func (bic *v1OnlyBlobInfoCache) Open() { -} - -func (bic *v1OnlyBlobInfoCache) Close() { -} - -func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { - return "" -} - -func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { -} - -func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) { -} - -func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 { - return nil -} - -// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of -// types.BICReplacementCandidate, dropping compression information. -func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate { - candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates)) - for _, c := range v2candidates { - candidates = append(candidates, types.BICReplacementCandidate{ - Digest: c.Digest, - Location: c.Location, - }) - } - return candidates -} diff --git a/vendor/go.podman.io/image/v5/internal/blobinfocache/types.go b/vendor/go.podman.io/image/v5/internal/blobinfocache/types.go deleted file mode 100644 index d9d27ec95..000000000 --- a/vendor/go.podman.io/image/v5/internal/blobinfocache/types.go +++ /dev/null @@ -1,81 +0,0 @@ -package blobinfocache - -import ( - digest "github.com/opencontainers/go-digest" - compressiontypes "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" -) - -const ( - // Uncompressed is the value we store in a blob info cache to indicate that we know that - // the blob in the corresponding location is not compressed. - Uncompressed = "uncompressed" - // UnknownCompression is the value we store in a blob info cache to indicate that we don't - // know if the blob in the corresponding location is compressed (and if so, how) or not. - UnknownCompression = "unknown" -) - -// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind -// of compression was applied to the blobs it keeps information about. -type BlobInfoCache2 interface { - types.BlobInfoCache - - // Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). - // Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). - Open() - // Close destroys state created by Open(). - Close() - - // UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. - // Returns "" if the uncompressed digest is unknown. - UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest - // RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. - // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. - // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. - // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) - RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) - - // RecordDigestCompressorData records data for the blob with the specified digest. - // WARNING: Only call this with LOCALLY VERIFIED data: - // - don’t record a compressor for a digest just because some remote author claims so - // (e.g. because a manifest says so); - // - don’t record the non-base variant or annotations if we are not _sure_ that the base variant - // and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them - // in a manifest) - // otherwise the cache could be poisoned and cause us to make incorrect edits to type - // information in a manifest. - RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) - // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) - // that could possibly be reused within the specified (transport scope) (if they still - // exist, which is not guaranteed). - CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 -} - -// DigestCompressorData is information known about how a blob is compressed. -// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.) -type DigestCompressorData struct { - BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression. - // The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression: - SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant) - SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant. -} - -// CandidateLocations2Options are used in CandidateLocations2. -type CandidateLocations2Options struct { - // If !CanSubstitute, the returned candidates will match the submitted digest exactly; if - // CanSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look - // up variants of the blob which have the same uncompressed digest. - CanSubstitute bool - PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer - RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm -} - -// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. -type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed - CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed - CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm - UnknownLocation bool // is true when `Location` for this blob is not set - Location types.BICLocationReference // not set if UnknownLocation is set to `true` -} diff --git a/vendor/go.podman.io/image/v5/internal/image/docker_list.go b/vendor/go.podman.io/image/v5/internal/image/docker_list.go deleted file mode 100644 index 2b49964a1..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/docker_list.go +++ /dev/null @@ -1,34 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "go.podman.io/image/v5/internal/manifest" - "go.podman.io/image/v5/types" -) - -func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - list, err := manifest.Schema2ListFromManifest(manblob) - if err != nil { - return nil, fmt.Errorf("parsing schema2 manifest list: %w", err) - } - targetManifestDigest, err := list.ChooseInstance(sys) - if err != nil { - return nil, fmt.Errorf("choosing image instance: %w", err) - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err) - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, fmt.Errorf("computing manifest digest: %w", err) - } - if !matches { - return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} diff --git a/vendor/go.podman.io/image/v5/internal/image/docker_schema1.go b/vendor/go.podman.io/image/v5/internal/image/docker_schema1.go deleted file mode 100644 index da7a943b3..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/docker_schema1.go +++ /dev/null @@ -1,257 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/types" -) - -type manifestSchema1 struct { - m *manifest.Schema1 -} - -func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { - m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema1) manifestMIMEType() string { - return manifest.DockerV2Schema1SignedMediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { - return nil, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{}) - if err != nil { - return nil, err - } - return v2s2.OCIConfig(ctx) -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - // This is a bit convoluted: We can’t just have a "get embedded docker reference" method - // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually - // embed a full docker/distribution reference, but only the repo name and tag (without the host name). - // So we would have to provide a “return repo without host name, and tag” getter for the generic code, - // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the - // generic copy code needs to know about is reference.Named and that a manifest may need updating - // for some destinations. - name := reference.Path(ref) - var tag string - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } else { - tag = "" - } - return m.m.Name != name || m.m.Tag != tag -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { - return m.m.Inspect(nil) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} - - // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, - // handle conversions between them by doing nothing. - if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType { - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, - manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - if options.EmbeddedDockerReference != nil { - copy.m.Name = reference.Path(options.EmbeddedDockerReference) - if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.m.Tag = tagged.Tag() - } else { - copy.m.Tag = "" - } - } - - return memoryImageFromManifest(©), nil -} - -// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// We need this function just because a function returning an implementation of the genericManifest -// interface is not automatically assignable to a function type returning the genericManifest interface -func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - return m.convertToManifestSchema2(ctx, options) -} - -// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { - uploadedLayerInfos := options.InformationOnly.LayerInfos - layerDiffIDs := options.InformationOnly.LayerDiffIDs - - if len(m.m.ExtractedV1Compatibility) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. - return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) - } - if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { - return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) - } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { - return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) - } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { - return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) - } - - var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil - if options.LayerInfos != nil { - if len(options.LayerInfos) != len(m.m.FSLayers) { - return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", - len(options.LayerInfos), len(m.m.FSLayers)) - } - convertedLayerUpdates = []types.BlobInfo{} - } - - // Build a list of the diffIDs for the non-empty layers. - diffIDs := []digest.Digest{} - var layers []manifest.Schema2Descriptor - for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index - - if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { - var size int64 - if uploadedLayerInfos != nil { - size = uploadedLayerInfos[v2Index].Size - } - var d digest.Digest - if layerDiffIDs != nil { - d = layerDiffIDs[v2Index] - } - layers = append(layers, manifest.Schema2Descriptor{ - MediaType: manifest.DockerV2Schema2LayerMediaType, - Size: size, - Digest: m.m.FSLayers[v1Index].BlobSum, - }) - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index]) - } - diffIDs = append(diffIDs, d) - } - } - configJSON, err := m.m.ToSchema2Config(diffIDs) - if err != nil { - return nil, err - } - configDescriptor := manifest.Schema2Descriptor{ - MediaType: manifest.DockerV2Schema2ConfigMediaType, - Size: int64(len(configJSON)), - Digest: digest.FromBytes(configJSON), - } - - if options.LayerInfos != nil { - options.LayerInfos = convertedLayerUpdates - } - return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil -} - -// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest - m2, err := m.convertToManifestSchema2(ctx, options) - if err != nil { - return nil, err - } - - return m2.convertToManifestOCI1(ctx, options) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestSchema1) SupportsEncryption(context.Context) bool { - return false -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts -// to a different manifest format). -func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool { - return true // There are no MIME types in the manifest, so we must assume a valid image. -} diff --git a/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go b/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go deleted file mode 100644 index 1586d6790..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go +++ /dev/null @@ -1,415 +0,0 @@ -package image - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/iolimits" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/pkg/blobinfocache/none" - "go.podman.io/image/v5/types" -) - -// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) -// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is -// a non-zero embedded timestamp; we could zero that, but that would just waste storage space -// in registries, so let’s use the same values. -// -// This is publicly visible as c/image/image.GzippedEmptyLayer. -var GzippedEmptyLayer = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} - -// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer -// -// This is publicly visible as c/image/image.GzippedEmptyLayerDigest. -const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - m *manifest.Schema2 -} - -func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema2FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema2{ - src: src, - m: m, - }, nil -} - -// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { - return &manifestSchema2{ - src: src, - configBlob: configBlob, - m: manifest.Schema2FromComponents(config, layers), - } -} - -func (m *manifestSchema2) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema2) manifestMIMEType() string { - return m.m.MediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - configBlob, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields - // than OCI v1. This unmarshal makes sure we drop docker v2s2 - // fields that aren't needed in OCI v1. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(configBlob, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError -// if the CompressionOperation and CompressionAlgorithm specified in one or more -// options.LayerInfos items is anything other than gzip. -func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.Schema2Clone(m.m), - } - - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, - manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, - imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - - return memoryImageFromManifest(©), nil -} - -func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema2 object. -func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { - configOCI, err := m.OCIConfig(ctx) - if err != nil { - return nil, err - } - configOCIBytes, err := json.Marshal(configOCI) - if err != nil { - return nil, err - } - - config := imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - } - - layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) - for idx := range layers { - layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - switch m.m.LayersDescriptors[idx].MediaType { - case manifest.DockerV2Schema2ForeignLayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - case manifest.DockerV2SchemaLayerMediaTypeUncompressed: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema2LayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip - case manifest.DockerV2SchemaLayerMediaTypeZstd: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerZstd - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) - } - } - - return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil -} - -// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema2 object. -// -// Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - dest := options.InformationOnly.Destination - - var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil - if options.LayerInfos != nil { - if len(options.LayerInfos) != len(m.m.LayersDescriptors) { - return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", - len(options.LayerInfos), len(m.m.LayersDescriptors)) - } - convertedLayerUpdates = []types.BlobInfo{} - } - - configBytes, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - imageConfig := &manifest.Schema2Image{} - if err := json.Unmarshal(configBytes, imageConfig); err != nil { - return nil, err - } - - // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) - history := make([]manifest.Schema1History, len(imageConfig.History)) - nonemptyLayerIndex := 0 - var parentV1ID string // Set in the loop - v1ID := "" - haveGzippedEmptyLayer := false - if len(imageConfig.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) - } - for v2Index, historyEntry := range imageConfig.History { - parentV1ID = v1ID - v1Index := len(imageConfig.History) - 1 - v2Index - - var blobDigest digest.Digest - if historyEntry.EmptyLayer { - emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} - - if !haveGzippedEmptyLayer { - logrus.Debugf("Uploading empty layer during conversion to schema 1") - // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, - // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) - if err != nil { - return nil, fmt.Errorf("uploading empty layer: %w", err) - } - if info.Digest != emptyLayerBlobInfo.Digest { - return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) - } - haveGzippedEmptyLayer = true - } - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) - } - blobDigest = emptyLayerBlobInfo.Digest - } else { - if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { - return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) - } - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) - } - blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest - nonemptyLayerIndex++ - } - - // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. - v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) - if err != nil { - return nil, err - } - v1ID = v - - fakeImage := manifest.Schema1V1Compatibility{ - ID: v1ID, - Parent: parentV1ID, - Comment: historyEntry.Comment, - Created: historyEntry.Created, - Author: historyEntry.Author, - ThrowAway: historyEntry.EmptyLayer, - } - fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} - v1CompatibilityBytes, err := json.Marshal(&fakeImage) - if err != nil { - return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) - } - - fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} - history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} - // Note that parentV1ID of the top layer is preserved when exiting this loop - } - - // Now patch in real configuration for the top layer (v1Index == 0) - v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. - if err != nil { - return nil, err - } - v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) - if err != nil { - return nil, err - } - history[0].V1Compatibility = string(v1Config) - - if options.LayerInfos != nil { - options.LayerInfos = convertedLayerUpdates - } - m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) - if err != nil { - return nil, err // This should never happen, we should have created all the components correctly. - } - return m1, nil -} - -func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { - if err := blobDigest.Validate(); err != nil { - return "", err - } - parts := append([]string{blobDigest.Encoded()}, others...) - v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) - return hex.EncodeToString(v1IDHash[:]), nil -} - -func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - delete(rawContents, "rootfs") - delete(rawContents, "history") - - updates := map[string]any{"id": v1ID} - if parentV1ID != "" { - updates["parent"] = parentV1ID - } - if throwaway { - updates["throwaway"] = throwaway - } - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestSchema2) SupportsEncryption(context.Context) bool { - return false -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts -// to a different manifest format). -func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool { - return m.m.CanChangeLayerCompression(mimeType) -} diff --git a/vendor/go.podman.io/image/v5/internal/image/manifest.go b/vendor/go.podman.io/image/v5/internal/image/manifest.go deleted file mode 100644 index d6ae8b6fa..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/manifest.go +++ /dev/null @@ -1,121 +0,0 @@ -package image - -import ( - "context" - "fmt" - - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/types" -) - -// genericManifest is an interface for parsing, modifying image manifests and related data. -// The public methods are related to types.Image so that embedding a genericManifest implements most of it, -// but there are also public methods that are only visible by packages that can import c/image/internal/image. -type genericManifest interface { - serialize() ([]byte, error) - manifestMIMEType() string - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() types.BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*imgspecv1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*types.ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // This does not change the state of the original Image object. - UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) - // SupportsEncryption returns if encryption is supported for the manifest type - // - // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since - // the process of updating a manifest between different manifest types was to update then convert. - // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 - SupportsEncryption(ctx context.Context) bool - - // The following methods are not a part of types.Image: - // === - - // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image - // (and the code can handle that). - // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted - // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts - // to a different manifest format). - CanChangeLayerCompression(mimeType string) bool -} - -// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. -// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. -func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch manifest.NormalizedMIMEType(mt) { - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - return manifestSchema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return manifestOCI1FromManifest(src, manblob) - case manifest.DockerV2Schema2MediaType: - return manifestSchema2FromManifest(src, manblob) - case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(ctx, sys, src, manblob) - case imgspecv1.MediaTypeImageIndex: - return manifestOCI1FromImageIndex(ctx, sys, src, manblob) - default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %q", mt) - } -} - -// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. -func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { - blobs := make([]types.BlobInfo, len(layers)) - for i, layer := range layers { - blobs[i] = layer.BlobInfo - } - return blobs -} - -// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation -// converted to a specific manifest MIME type. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original genericManifest object. -type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) - -// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if -// required and re-apply the options to the converted type. -// It returns (nil, nil) if no conversion was requested. -func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) { - if options.ManifestMIMEType == "" { - return nil, nil - } - - converter, ok := converters[options.ManifestMIMEType] - if !ok { - return nil, fmt.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) - } - - optionsCopy := options - convertedManifest, err := converter(ctx, &optionsCopy) - if err != nil { - return nil, err - } - convertedImage := memoryImageFromManifest(convertedManifest) - - optionsCopy.ManifestMIMEType = "" - return convertedImage.UpdatedImage(ctx, optionsCopy) -} diff --git a/vendor/go.podman.io/image/v5/internal/image/memory.go b/vendor/go.podman.io/image/v5/internal/image/memory.go deleted file mode 100644 index 9dff39197..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/memory.go +++ /dev/null @@ -1,64 +0,0 @@ -package image - -import ( - "context" - "errors" - - "go.podman.io/image/v5/types" -) - -// memoryImage is a mostly-implementation of types.Image assembled from data -// created in memory, used primarily as a return value of types.Image.UpdatedImage -// as a way to carry various structured information in a type-safe and easy-to-use way. -// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone -// collection of all related information, e.g. there is no way to get layer blobs -// from a memoryImage. -type memoryImage struct { - genericManifest - serializedManifest []byte // A private cache for Manifest() -} - -func memoryImageFromManifest(m genericManifest) types.Image { - return &memoryImage{ - genericManifest: m, - serializedManifest: nil, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *memoryImage) Reference() types.ImageReference { - // It would really be inappropriate to return the ImageReference of the image this was based on. - return nil -} - -// Size returns the size of the image as stored, if known, or -1 if not. -func (i *memoryImage) Size() (int64, error) { - return -1, nil -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.serializedManifest == nil { - m, err := i.genericManifest.serialize() - if err != nil { - return nil, "", err - } - i.serializedManifest = m - } - return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { - // Modifying an image invalidates signatures; a caller asking the updated image for signatures - // is probably confused. - return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") -} - -// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/go.podman.io/image/v5/internal/image/oci.go b/vendor/go.podman.io/image/v5/internal/image/oci.go deleted file mode 100644 index 56a1a6d64..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/oci.go +++ /dev/null @@ -1,336 +0,0 @@ -package image - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "slices" - - ociencspec "github.com/containers/ocicrypt/spec" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/iolimits" - internalManifest "go.podman.io/image/v5/internal/manifest" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/pkg/blobinfocache/none" - "go.podman.io/image/v5/types" -) - -type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of m.Config. - m *manifest.OCI1 -} - -func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestOCI1{ - src: src, - m: m, - }, nil -} - -// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { - return &manifestOCI1{ - src: src, - configBlob: configBlob, - m: manifest.OCI1FromComponents(config, layers), - } -} - -func (m *manifestOCI1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestOCI1) manifestMIMEType() string { - return imgspecv1.MediaTypeImageManifest -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.New("Internal error: neither src nor configBlob set in manifestOCI1") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) - } - - cb, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError -// if the combination of CompressionOperation and CompressionAlgorithm specified -// in one or more options.LayerInfos items indicates that a layer is compressed using -// an algorithm that is not allowed in OCI. -func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.OCI1Clone(m.m), - } - - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, - manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, - manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. - - return memoryImageFromManifest(©), nil -} - -func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { - return manifest.Schema2Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// We need this function just because a function returning an implementation of the genericManifest -// interface is not automatically assignable to a function type returning the genericManifest interface -func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - return m.convertToManifestSchema2(ctx, options) -} - -// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format. -// If not, it returns (nil, nil). -// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos, -// and edits *options to not try decryption again. -func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { - if options == nil || options.LayerInfos == nil { - return nil, nil - } - - originalInfos := m.LayerInfos() - if len(originalInfos) != len(options.LayerInfos) { - return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos)) - } - - ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate. - laterEdits := slices.Clone(options.LayerInfos) - needsOCIOnlyEdits := false - for i, edit := range options.LayerInfos { - // Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. - ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal - ociOnlyEdits[i].CompressionAlgorithm = nil - - if edit.CryptoOperation == types.Decrypt { - needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas - ociOnlyEdits[i].CryptoOperation = types.Decrypt - laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. - } - - if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd || - originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas. - ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation - ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm - laterEdits[i].CompressionOperation = types.PreserveOriginal - laterEdits[i].CompressionAlgorithm = nil - } - } - if !needsOCIOnlyEdits { - return nil, nil - } - - options.LayerInfos = laterEdits - return ociOnlyEdits, nil -} - -// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestOCI1 object. -func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { - if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) - } - - // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits - // which remove OCI-specific features, because trying to convert those layers would fail. - // So, do the layer updates for decryption, and for conversions from Zstd. - ociManifest := m.m - ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options) - if err != nil { - return nil, err - } - if ociOnlyEdits != nil { - ociManifest = manifest.OCI1Clone(ociManifest) - if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil { - return nil, err - } - } - - // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config) - - // Above, we have already checked that this manifest refers to an image, not an OCI artifact, - // so the only difference between OCI and DockerSchema2 is the mediatypes. The - // media type of the manifest is handled by manifestSchema2FromComponents. - config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers)) - for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx]) - switch layers[idx].MediaType { - case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType - case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip - case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - case imgspecv1.MediaTypeImageLayer: - layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed - case imgspecv1.MediaTypeImageLayerGzip: - layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType - case imgspecv1.MediaTypeImageLayerZstd: - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not officially supported for docker images", layers[idx].MediaType) - case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc, - ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc: - return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType) - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) - } - } - - // Rather than copying the ConfigBlob now, we just pass m.src to the - // translated manifest, since the only difference is the mediatype of - // descriptors there is no change to any blob stored in m.src. - return manifestSchema2FromComponents(config, m.src, nil, layers), nil -} - -// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestOCI1 object. -func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) - } - - // We can't directly convert images to V1, but we can transitively convert via a V2 image - m2, err := m.convertToManifestSchema2(ctx, options) - if err != nil { - return nil, err - } - - return m2.convertToManifestSchema1(ctx, options) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestOCI1) SupportsEncryption(context.Context) bool { - return true -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts -// to a different manifest format). -func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool { - return m.m.CanChangeLayerCompression(mimeType) -} diff --git a/vendor/go.podman.io/image/v5/internal/image/oci_index.go b/vendor/go.podman.io/image/v5/internal/image/oci_index.go deleted file mode 100644 index b9071b55d..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/oci_index.go +++ /dev/null @@ -1,34 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "go.podman.io/image/v5/internal/manifest" - "go.podman.io/image/v5/types" -) - -func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - index, err := manifest.OCI1IndexFromManifest(manblob) - if err != nil { - return nil, fmt.Errorf("parsing OCI1 index: %w", err) - } - targetManifestDigest, err := index.ChooseInstance(sys) - if err != nil { - return nil, fmt.Errorf("choosing image instance: %w", err) - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err) - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, fmt.Errorf("computing manifest digest: %w", err) - } - if !matches { - return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} diff --git a/vendor/go.podman.io/image/v5/internal/image/sourced.go b/vendor/go.podman.io/image/v5/internal/image/sourced.go deleted file mode 100644 index ba2eaa0c9..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/sourced.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package image consolidates knowledge about various container image formats -// (as opposed to image storage mechanisms, which are handled by types.ImageSource) -// and exposes all of them using an unified interface. -package image - -import ( - "context" - - "go.podman.io/image/v5/types" -) - -// FromReference returns a types.ImageCloser implementation for the default instance reading from reference. -// If reference points to a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate image instance. -// -// The caller must call .Close() on the returned ImageCloser. -// -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) { - src, err := ref.NewImageSource(ctx, sys) - if err != nil { - return nil, err - } - img, err := FromSource(ctx, sys, src) - if err != nil { - src.Close() - return nil, err - } - return img, nil -} - -// imageCloser implements types.ImageCloser, perhaps allowing simple users -// to use a single object without having keep a reference to a types.ImageSource -// only to call types.ImageSource.Close(). -type imageCloser struct { - types.Image - src types.ImageSource -} - -// FromSource returns a types.ImageCloser implementation for the default instance of source. -// If source is a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate image instance. -// -// The caller must call .Close() on the returned ImageCloser. -// -// FromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// Image and ImageSource objects simultaneously, but it means that they only need to -// the Image.) -// -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -// -// Most callers can use either FromUnparsedImage or FromReference instead. -// -// This is publicly visible as c/image/image.FromSource. -func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { - img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) - if err != nil { - return nil, err - } - return &imageCloser{ - Image: img, - src: src, - }, nil -} - -func (ic *imageCloser) Close() error { - return ic.src.Close() -} - -// SourcedImage is a general set of utilities for working with container images, -// whatever is their underlying transport (i.e. ImageSource-independent). -// Note the existence of docker.Image and image.memoryImage: various instances -// of a types.Image may not be a SourcedImage directly. -// -// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do. -// -// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image. -type SourcedImage struct { - *UnparsedImage - ManifestBlob []byte // The manifest of the relevant instance - ManifestMIMEType string // MIME type of ManifestBlob - // genericManifest contains data corresponding to manifestBlob. - // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest - // if you want to preserve the original manifest; use manifestBlob directly. - genericManifest -} - -// FromUnparsedImage returns a types.Image implementation for unparsed. -// If unparsed represents a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate single image. -// -// The Image must not be used after the underlying ImageSource is Close()d. -// -// This is publicly visible as c/image/image.FromUnparsedImage. -func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) { - // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: - // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, - // this is the only UnparsedImage implementation around, anyway. - - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). - manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) - if err != nil { - return nil, err - } - - parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) - if err != nil { - return nil, err - } - - return &SourcedImage{ - UnparsedImage: unparsed, - ManifestBlob: manifestBlob, - ManifestMIMEType: manifestMIMEType, - genericManifest: parsedManifest, - }, nil -} - -// Size returns the size of the image as stored, if it's known, or -1 if it isn't. -func (i *SourcedImage) Size() (int64, error) { - return -1, nil -} - -// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. -func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { - return i.ManifestBlob, i.ManifestMIMEType, nil -} - -func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) -} diff --git a/vendor/go.podman.io/image/v5/internal/image/unparsed.go b/vendor/go.podman.io/image/v5/internal/image/unparsed.go deleted file mode 100644 index 45c372383..000000000 --- a/vendor/go.podman.io/image/v5/internal/image/unparsed.go +++ /dev/null @@ -1,125 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/imagesource" - "go.podman.io/image/v5/internal/private" - "go.podman.io/image/v5/internal/signature" - "go.podman.io/image/v5/manifest" - "go.podman.io/image/v5/types" -) - -// UnparsedImage implements types.UnparsedImage . -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// This is publicly visible as c/image/image.UnparsedImage. -type UnparsedImage struct { - src private.ImageSource - instanceDigest *digest.Digest - cachedManifest []byte // A private cache for Manifest(); nil if not yet known. - // A private cache for Manifest(), may be the empty string if guessing failed. - // Valid iff cachedManifest is not nil. - cachedManifestMIMEType string - cachedSignatures []signature.Signature // A private cache for Signatures(); nil if not yet known. -} - -// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). -// -// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image -// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -// -// This is publicly visible as c/image/image.UnparsedInstance. -func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { - return &UnparsedImage{ - src: imagesource.FromPublic(src), - instanceDigest: instanceDigest, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *UnparsedImage) Reference() types.ImageReference { - // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. - return i.src.Reference() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -// -// Users of UnparsedImage are promised that this validates the image -// against either i.instanceDigest if set, or against a digest included in i.src.Reference. -func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) - if err != nil { - return nil, "", err - } - - // ImageSource.GetManifest does not do digest verification, but we do; - // this immediately protects also any user of types.Image. - if digest, haveDigest := i.expectedManifestDigest(); haveDigest { - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", fmt.Errorf("computing manifest digest: %w", err) - } - if !matches { - return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest) - } - } - - i.cachedManifest = m - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. -// The bool return value seems redundant with digest != ""; it is used explicitly -// to refuse (unexpected) situations when the digest exists but is "". -func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { - if i.instanceDigest != nil { - return *i.instanceDigest, true - } - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - return canonical.Digest(), true - } - } - return "", false -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { - // It would be consistent to make this an internal/unparsedimage/impl.Compat wrapper, - // but this is very likely to be the only implementation ever. - sigs, err := i.UntrustedSignatures(ctx) - if err != nil { - return nil, err - } - simpleSigs := [][]byte{} - for _, sig := range sigs { - if sig, ok := sig.(signature.SimpleSigning); ok { - simpleSigs = append(simpleSigs, sig.UntrustedSignature()) - } - } - return simpleSigs, nil -} - -// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignaturesWithFormat(ctx, i.instanceDigest) - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil -} diff --git a/vendor/go.podman.io/image/v5/internal/imagedestination/impl/compat.go b/vendor/go.podman.io/image/v5/internal/imagedestination/impl/compat.go deleted file mode 100644 index 9a8d18713..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagedestination/impl/compat.go +++ /dev/null @@ -1,114 +0,0 @@ -package impl - -import ( - "context" - "io" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/internal/blobinfocache" - "go.podman.io/image/v5/internal/private" - "go.podman.io/image/v5/internal/signature" - "go.podman.io/image/v5/types" -) - -// Compat implements the obsolete parts of types.ImageDestination -// for implementations of private.ImageDestination. -// See AddCompat below. -type Compat struct { - dest private.ImageDestinationInternalOnly -} - -// AddCompat initializes Compat to implement the obsolete parts of types.ImageDestination -// for implementations of private.ImageDestination. -// -// Use it like this: -// -// type yourDestination struct { -// impl.Compat -// … -// } -// -// dest := &yourDestination{…} -// dest.Compat = impl.AddCompat(dest) -func AddCompat(dest private.ImageDestinationInternalOnly) Compat { - return Compat{dest} -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{ - Cache: blobinfocache.FromBlobInfoCache(cache), - IsConfig: isConfig, - }) - if err != nil { - return types.BlobInfo{}, err - } - return types.BlobInfo{ - Digest: res.Digest, - Size: res.Size, - }, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{ - Cache: blobinfocache.FromBlobInfoCache(cache), - CanSubstitute: canSubstitute, - }) - if !reused || err != nil { - return reused, types.BlobInfo{}, err - } - res := types.BlobInfo{ - Digest: blob.Digest, - Size: blob.Size, - CompressionOperation: blob.CompressionOperation, - CompressionAlgorithm: blob.CompressionAlgorithm, - } - // This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers. - // Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution; - // provide the value in cases where it is likely to be correct. - if blob.Digest == info.Digest { - res.MediaType = info.MediaType - } - return true, res, nil -} - -// PutSignatures writes a set of signatures to the destination. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for -// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -// MUST be called after PutManifest (signatures may reference manifest contents). -func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - withFormat := []signature.Signature{} - for _, sig := range signatures { - withFormat = append(withFormat, signature.SimpleSigningFromBlob(sig)) - } - return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest) -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (c *Compat) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return c.dest.CommitWithOptions(ctx, private.CommitOptions{ - UnparsedToplevel: unparsedToplevel, - }) -} diff --git a/vendor/go.podman.io/image/v5/internal/imagedestination/impl/helpers.go b/vendor/go.podman.io/image/v5/internal/imagedestination/impl/helpers.go deleted file mode 100644 index b12beff07..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagedestination/impl/helpers.go +++ /dev/null @@ -1,15 +0,0 @@ -package impl - -import ( - "go.podman.io/image/v5/internal/manifest" - "go.podman.io/image/v5/internal/private" -) - -// OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions -// is acceptable based on opts. -func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool { - return manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ - PossibleManifestFormats: opts.PossibleManifestFormats, - RequiredCompression: opts.RequiredCompression, - }, opts.OriginalCompression) -} diff --git a/vendor/go.podman.io/image/v5/internal/imagedestination/impl/properties.go b/vendor/go.podman.io/image/v5/internal/imagedestination/impl/properties.go deleted file mode 100644 index 1aab4b061..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagedestination/impl/properties.go +++ /dev/null @@ -1,72 +0,0 @@ -package impl - -import "go.podman.io/image/v5/types" - -// Properties collects properties of an ImageDestination that are constant throughout its lifetime -// (but might differ across instances). -type Properties struct { - // SupportedManifestMIMETypes tells which manifest MIME types the destination supports. - // A empty slice or nil means any MIME type can be tried to upload. - SupportedManifestMIMETypes []string - // DesiredLayerCompression indicates the kind of compression to apply on layers - DesiredLayerCompression types.LayerCompression - // AcceptsForeignLayerURLs is false if foreign layers in manifest should be actually - // uploaded to the image destination, true otherwise. - AcceptsForeignLayerURLs bool - // MustMatchRuntimeOS is set to true if the destination can store only images targeted for the current runtime architecture and OS. - MustMatchRuntimeOS bool - // IgnoresEmbeddedDockerReference is set to true if the destination does not care about Image.EmbeddedDockerReferenceConflicts(), - // and would prefer to receive an unmodified manifest instead of one modified for the destination. - // Does not make a difference if Reference().DockerReference() is nil. - IgnoresEmbeddedDockerReference bool - // HasThreadSafePutBlob indicates that PutBlob can be executed concurrently. - HasThreadSafePutBlob bool -} - -// PropertyMethodsInitialize implements parts of private.ImageDestination corresponding to Properties. -type PropertyMethodsInitialize struct { - // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name. - - vals Properties -} - -// PropertyMethods creates an PropertyMethodsInitialize for vals. -func PropertyMethods(vals Properties) PropertyMethodsInitialize { - return PropertyMethodsInitialize{ - vals: vals, - } -} - -// SupportedManifestMIMETypes tells which manifest mime types the destination supports -// If an empty slice or nil it's returned, then any mime type can be tried to upload -func (o PropertyMethodsInitialize) SupportedManifestMIMETypes() []string { - return o.vals.SupportedManifestMIMETypes -} - -// DesiredLayerCompression indicates the kind of compression to apply on layers -func (o PropertyMethodsInitialize) DesiredLayerCompression() types.LayerCompression { - return o.vals.DesiredLayerCompression -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (o PropertyMethodsInitialize) AcceptsForeignLayerURLs() bool { - return o.vals.AcceptsForeignLayerURLs -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (o PropertyMethodsInitialize) MustMatchRuntimeOS() bool { - return o.vals.MustMatchRuntimeOS -} - -// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (o PropertyMethodsInitialize) IgnoresEmbeddedDockerReference() bool { - return o.vals.IgnoresEmbeddedDockerReference -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (o PropertyMethodsInitialize) HasThreadSafePutBlob() bool { - return o.vals.HasThreadSafePutBlob -} diff --git a/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/original_oci_config.go b/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/original_oci_config.go deleted file mode 100644 index c4536e933..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/original_oci_config.go +++ /dev/null @@ -1,16 +0,0 @@ -package stubs - -import ( - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing. -type IgnoresOriginalOCIConfig struct{} - -// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, -// or an error obtaining that value (e.g. if the image is an artifact and not a container image). -// The destination can use it in its TryReusingBlob/PutBlob implementations -// (otherwise it only obtains the final config after all layers are written). -func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { - return nil -} diff --git a/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/put_blob_partial.go deleted file mode 100644 index 573ad832e..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ /dev/null @@ -1,52 +0,0 @@ -package stubs - -import ( - "context" - "fmt" - - "go.podman.io/image/v5/internal/private" - "go.podman.io/image/v5/types" -) - -// NoPutBlobPartialInitialize implements parts of private.ImageDestination -// for transports that don’t support PutBlobPartial(). -// See NoPutBlobPartial() below. -type NoPutBlobPartialInitialize struct { - transportName string -} - -// NoPutBlobPartial creates a NoPutBlobPartialInitialize for ref. -func NoPutBlobPartial(ref types.ImageReference) NoPutBlobPartialInitialize { - return NoPutBlobPartialRaw(ref.Transport().Name()) -} - -// NoPutBlobPartialRaw is the same thing as NoPutBlobPartial, but it can be used -// in situations where no ImageReference is available. -func NoPutBlobPartialRaw(transportName string) NoPutBlobPartialInitialize { - return NoPutBlobPartialInitialize{ - transportName: transportName, - } -} - -// SupportsPutBlobPartial returns true if PutBlobPartial is supported. -func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { - return false -} - -// PutBlobPartial attempts to create a blob using the data that is already present -// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. -// It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail. -// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. -// The fallback _must not_ be done otherwise. -func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { - return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) -} - -// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true. -type ImplementsPutBlobPartial struct{} - -// SupportsPutBlobPartial returns true if PutBlobPartial is supported. -func (stub ImplementsPutBlobPartial) SupportsPutBlobPartial() bool { - return true -} diff --git a/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/signatures.go b/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/signatures.go deleted file mode 100644 index c046449b1..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/signatures.go +++ /dev/null @@ -1,50 +0,0 @@ -package stubs - -import ( - "context" - "errors" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/internal/signature" -) - -// NoSignaturesInitialize implements parts of private.ImageDestination -// for transports that don’t support storing signatures. -// See NoSignatures() below. -type NoSignaturesInitialize struct { - message string -} - -// NoSignatures creates a NoSignaturesInitialize, failing with message. -func NoSignatures(message string) NoSignaturesInitialize { - return NoSignaturesInitialize{ - message: message, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (stub NoSignaturesInitialize) SupportsSignatures(ctx context.Context) error { - return errors.New(stub.message) -} - -// PutSignaturesWithFormat writes a set of signatures to the destination. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for -// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -// MUST be called after PutManifest (signatures may reference manifest contents). -func (stub NoSignaturesInitialize) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { - if len(signatures) != 0 { - return errors.New(stub.message) - } - return nil -} - -// SupportsSignatures implements SupportsSignatures() that returns nil. -// Note that it might be even more useful to return a value dynamically detected based on -type AlwaysSupportsSignatures struct{} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (stub AlwaysSupportsSignatures) SupportsSignatures(ctx context.Context) error { - return nil -} diff --git a/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/stubs.go b/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/stubs.go deleted file mode 100644 index ab233406a..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagedestination/stubs/stubs.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package stubs contains trivial stubs for parts of private.ImageDestination. -// It can be used from internal/wrapper, so it should not drag in any extra dependencies. -// Compare with imagedestination/impl, which might require non-trivial implementation work. -// -// There are two kinds of stubs: -// -// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination -// implementation: -// -// type yourDestination struct { -// stubs.ImplementsPutBlobPartial -// … -// } -// -// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker -// means that a constructor must be called: -// -// type yourDestination struct { -// stubs.NoPutBlobPartialInitialize -// … -// } -// -// dest := &yourDestination{ -// … -// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), -// } -package stubs diff --git a/vendor/go.podman.io/image/v5/internal/imagesource/impl/compat.go b/vendor/go.podman.io/image/v5/internal/imagesource/impl/compat.go deleted file mode 100644 index 4d56f4359..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagesource/impl/compat.go +++ /dev/null @@ -1,55 +0,0 @@ -package impl - -import ( - "context" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/internal/private" - "go.podman.io/image/v5/internal/signature" -) - -// Compat implements the obsolete parts of types.ImageSource -// for implementations of private.ImageSource. -// See AddCompat below. -type Compat struct { - src private.ImageSourceInternalOnly -} - -// AddCompat initializes Compat to implement the obsolete parts of types.ImageSource -// for implementations of private.ImageSource. -// -// Use it like this: -// -// type yourSource struct { -// impl.Compat -// … -// } -// -// src := &yourSource{…} -// src.Compat = impl.AddCompat(src) -func AddCompat(src private.ImageSourceInternalOnly) Compat { - return Compat{src} -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (c *Compat) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - // Silently ignore signatures with other formats; the caller can’t handle them. - // Admittedly callers that want to sync all of the image might want to fail instead; this - // way an upgrade of c/image neither breaks them nor adds new functionality. - // Alternatively, we could possibly define the old GetSignatures to use the multi-format - // signature.Blob representation now, in general, but that could silently break them as well. - sigs, err := c.src.GetSignaturesWithFormat(ctx, instanceDigest) - if err != nil { - return nil, err - } - simpleSigs := [][]byte{} - for _, sig := range sigs { - if sig, ok := sig.(signature.SimpleSigning); ok { - simpleSigs = append(simpleSigs, sig.UntrustedSignature()) - } - } - return simpleSigs, nil -} diff --git a/vendor/go.podman.io/image/v5/internal/imagesource/impl/layer_infos.go b/vendor/go.podman.io/image/v5/internal/imagesource/impl/layer_infos.go deleted file mode 100644 index 63016f333..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagesource/impl/layer_infos.go +++ /dev/null @@ -1,23 +0,0 @@ -package impl - -import ( - "context" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/types" -) - -// DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing. -type DoesNotAffectLayerInfosForCopy struct{} - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (stub DoesNotAffectLayerInfosForCopy) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/go.podman.io/image/v5/internal/imagesource/impl/properties.go b/vendor/go.podman.io/image/v5/internal/imagesource/impl/properties.go deleted file mode 100644 index 73e8c78e9..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagesource/impl/properties.go +++ /dev/null @@ -1,27 +0,0 @@ -package impl - -// Properties collects properties of an ImageSource that are constant throughout its lifetime -// (but might differ across instances). -type Properties struct { - // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. - HasThreadSafeGetBlob bool -} - -// PropertyMethodsInitialize implements parts of private.ImageSource corresponding to Properties. -type PropertyMethodsInitialize struct { - // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name. - - vals Properties -} - -// PropertyMethods creates an PropertyMethodsInitialize for vals. -func PropertyMethods(vals Properties) PropertyMethodsInitialize { - return PropertyMethodsInitialize{ - vals: vals, - } -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (o PropertyMethodsInitialize) HasThreadSafeGetBlob() bool { - return o.vals.HasThreadSafeGetBlob -} diff --git a/vendor/go.podman.io/image/v5/internal/imagesource/impl/signatures.go b/vendor/go.podman.io/image/v5/internal/imagesource/impl/signatures.go deleted file mode 100644 index 749e42a0c..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagesource/impl/signatures.go +++ /dev/null @@ -1,19 +0,0 @@ -package impl - -import ( - "context" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/internal/signature" -) - -// NoSignatures implements GetSignatures() that returns nothing. -type NoSignatures struct{} - -// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (stub NoSignatures) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { - return nil, nil -} diff --git a/vendor/go.podman.io/image/v5/internal/imagesource/stubs/get_blob_at.go b/vendor/go.podman.io/image/v5/internal/imagesource/stubs/get_blob_at.go deleted file mode 100644 index b1fe9b9a6..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagesource/stubs/get_blob_at.go +++ /dev/null @@ -1,54 +0,0 @@ -package stubs - -import ( - "context" - "fmt" - "io" - - "go.podman.io/image/v5/internal/private" - "go.podman.io/image/v5/types" -) - -// NoGetBlobAtInitialize implements parts of private.ImageSource -// for transports that don’t support GetBlobAt(). -// See NoGetBlobAt() below. -type NoGetBlobAtInitialize struct { - transportName string -} - -// NoGetBlobAt() creates a NoGetBlobAtInitialize for ref. -func NoGetBlobAt(ref types.ImageReference) NoGetBlobAtInitialize { - return NoGetBlobAtRaw(ref.Transport().Name()) -} - -// NoGetBlobAtRaw is the same thing as NoGetBlobAt, but it can be used -// in situations where no ImageReference is available. -func NoGetBlobAtRaw(transportName string) NoGetBlobAtInitialize { - return NoGetBlobAtInitialize{ - transportName: transportName, - } -} - -// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. -func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool { - return false -} - -// GetBlobAt returns a sequential channel of readers that contain data for the requested -// blob chunks, and a channel that might get a single error value. -// The specified chunks must be not overlapping and sorted by their offset. -// The readers must be fully consumed, in the order they are returned, before blocking -// to read the next chunk. -// If the Length for the last chunk is set to math.MaxUint64, then it -// fully fetches the remaining data from the offset to the end of the blob. -func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName) -} - -// ImplementsGetBlobAt implements SupportsGetBlobAt() that returns true. -type ImplementsGetBlobAt struct{} - -// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. -func (stub ImplementsGetBlobAt) SupportsGetBlobAt() bool { - return true -} diff --git a/vendor/go.podman.io/image/v5/internal/imagesource/stubs/stubs.go b/vendor/go.podman.io/image/v5/internal/imagesource/stubs/stubs.go deleted file mode 100644 index cb345395e..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagesource/stubs/stubs.go +++ /dev/null @@ -1,28 +0,0 @@ -// Package stubs contains trivial stubs for parts of private.ImageSource. -// It can be used from internal/wrapper, so it should not drag in any extra dependencies. -// Compare with imagesource/impl, which might require non-trivial implementation work. -// -// There are two kinds of stubs: -// -// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource -// -// implementation: -// -// type yourSource struct { -// stubs.ImplementsGetBlobAt -// … -// } -// -// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker -// means that a constructor must be called: -// -// type yourSource struct { -// stubs.NoGetBlobAtInitialize -// … -// } -// -// dest := &yourSource{ -// … -// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), -// } -package stubs diff --git a/vendor/go.podman.io/image/v5/internal/imagesource/wrapper.go b/vendor/go.podman.io/image/v5/internal/imagesource/wrapper.go deleted file mode 100644 index 00bf8893f..000000000 --- a/vendor/go.podman.io/image/v5/internal/imagesource/wrapper.go +++ /dev/null @@ -1,56 +0,0 @@ -package imagesource - -import ( - "context" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/internal/imagesource/stubs" - "go.podman.io/image/v5/internal/private" - "go.podman.io/image/v5/internal/signature" - "go.podman.io/image/v5/types" -) - -// wrapped provides the private.ImageSource operations -// for a source that only implements types.ImageSource -type wrapped struct { - stubs.NoGetBlobAtInitialize - - types.ImageSource -} - -// FromPublic(src) returns an object that provides the private.ImageSource API -// -// Eventually, we might want to expose this function, and methods of the returned object, -// as a public API (or rather, a variant that does not include the already-superseded -// methods of types.ImageSource, and has added more future-proofing), and more strongly -// deprecate direct use of types.ImageSource. -// -// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct -// with public methods, or perhaps a private interface), so that we can add methods -// without breaking any external implementers of a public interface. -func FromPublic(src types.ImageSource) private.ImageSource { - if src2, ok := src.(private.ImageSource); ok { - return src2 - } - return &wrapped{ - NoGetBlobAtInitialize: stubs.NoGetBlobAt(src.Reference()), - - ImageSource: src, - } -} - -// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (w *wrapped) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { - sigs, err := w.GetSignatures(ctx, instanceDigest) - if err != nil { - return nil, err - } - res := []signature.Signature{} - for _, sig := range sigs { - res = append(res, signature.SimpleSigningFromBlob(sig)) - } - return res, nil -} diff --git a/vendor/go.podman.io/image/v5/internal/iolimits/iolimits.go b/vendor/go.podman.io/image/v5/internal/iolimits/iolimits.go deleted file mode 100644 index f17d00246..000000000 --- a/vendor/go.podman.io/image/v5/internal/iolimits/iolimits.go +++ /dev/null @@ -1,58 +0,0 @@ -package iolimits - -import ( - "fmt" - "io" -) - -// All constants below are intended to be used as limits for `ReadAtMost`. The -// immediate use-case for limiting the size of in-memory copied data is to -// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of -// copying data until running out of memory, we error out after hitting the -// specified limit. -const ( - // megaByte denotes one megabyte and is intended to be used as a limit in - // `ReadAtMost`. - megaByte = 1 << 20 - // MaxManifestBodySize is the maximum allowed size of a manifest. The limit - // of 4 MB aligns with the one of a Docker registry: - // https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30 - MaxManifestBodySize = 4 * megaByte - // MaxAuthTokenBodySize is the maximum allowed size of an auth token. - // The limit of 1 MB is considered to be greatly sufficient. - MaxAuthTokenBodySize = megaByte - // MaxSignatureListBodySize is the maximum allowed size of a signature list. - // The limit of 4 MB is considered to be greatly sufficient. - MaxSignatureListBodySize = 4 * megaByte - // MaxSignatureBodySize is the maximum allowed size of a signature. - // The limit of 4 MB is considered to be greatly sufficient. - MaxSignatureBodySize = 4 * megaByte - // MaxErrorBodySize is the maximum allowed size of an error-response body. - // The limit of 1 MB is considered to be greatly sufficient. - MaxErrorBodySize = megaByte - // MaxConfigBodySize is the maximum allowed size of a config blob. - // The limit of 4 MB is considered to be greatly sufficient. - MaxConfigBodySize = 4 * megaByte - // MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body. - // The limit of 4 MB is considered to be greatly sufficient. - MaxOpenShiftStatusBody = 4 * megaByte - // MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images) - // The limit of 1 MB is considered to be greatly sufficient. - MaxTarFileManifestSize = megaByte -) - -// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded. -func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { - limitedReader := io.LimitReader(reader, int64(limit+1)) - - res, err := io.ReadAll(limitedReader) - if err != nil { - return nil, err - } - - if len(res) > limit { - return nil, fmt.Errorf("exceeded maximum allowed size of %d bytes", limit) - } - - return res, nil -} diff --git a/vendor/go.podman.io/image/v5/internal/manifest/common.go b/vendor/go.podman.io/image/v5/internal/manifest/common.go deleted file mode 100644 index 1f2ccb528..000000000 --- a/vendor/go.podman.io/image/v5/internal/manifest/common.go +++ /dev/null @@ -1,72 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" -) - -// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat -// can expect to be present. -type AllowedManifestFields int - -const ( - AllowedFieldConfig AllowedManifestFields = 1 << iota - AllowedFieldFSLayers - AllowedFieldHistory - AllowedFieldLayers - AllowedFieldManifests - AllowedFieldFirstUnusedBit // Keep this at the end! -) - -// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than -// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields -// other than the ones the caller specifically allows. -// expectedMIMEType is used only for diagnostics. -// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format -// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous -// data that just isn’t what was expected, as opposed to actually ambiguous data. -func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, - allowed AllowedManifestFields) error { - if allowed >= AllowedFieldFirstUnusedBit { - return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) - } - // Use a private type to decode, not just a map[string]any, because we want - // to also reject case-insensitive matches (which would be used by Go when really decoding - // the manifest). - // (It is expected that as manifest formats are added or extended over time, more fields will be added - // here.) - detectedFields := struct { - Config any `json:"config"` - FSLayers any `json:"fsLayers"` - History any `json:"history"` - Layers any `json:"layers"` - Manifests any `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &detectedFields); err != nil { - // The caller was supposed to already validate version numbers, so this should not happen; - // let’s not bother with making this error “nice”. - return err - } - unexpected := []string{} - // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. - if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 { - unexpected = append(unexpected, "config") - } - if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 { - unexpected = append(unexpected, "fsLayers") - } - if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 { - unexpected = append(unexpected, "history") - } - if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 { - unexpected = append(unexpected, "layers") - } - if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 { - unexpected = append(unexpected, "manifests") - } - if len(unexpected) != 0 { - return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, - unexpected, expectedMIMEType) - } - return nil -} diff --git a/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2.go b/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2.go deleted file mode 100644 index 68d079697..000000000 --- a/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2.go +++ /dev/null @@ -1,15 +0,0 @@ -package manifest - -import ( - "github.com/opencontainers/go-digest" -) - -// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -// -// This is publicly visible as c/image/manifest.Schema2Descriptor. -type Schema2Descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} diff --git a/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2_list.go b/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2_list.go deleted file mode 100644 index fdcc42083..000000000 --- a/vendor/go.podman.io/image/v5/internal/manifest/docker_schema2_list.go +++ /dev/null @@ -1,311 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - "slices" - - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - platform "go.podman.io/image/v5/internal/pkg/platform" - compression "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" -) - -// Schema2PlatformSpec describes the platform which a particular manifest is -// specialized for. -// This is publicly visible as c/image/manifest.Schema2PlatformSpec. -type Schema2PlatformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} - -// Schema2ManifestDescriptor references a platform-specific manifest. -// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor. -type Schema2ManifestDescriptor struct { - Schema2Descriptor - Platform Schema2PlatformSpec `json:"platform"` -} - -// Schema2ListPublic is a list of platform-specific manifests. -// This is publicly visible as c/image/manifest.Schema2List. -// Internal users should usually use Schema2List instead. -type Schema2ListPublic struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []Schema2ManifestDescriptor `json:"manifests"` -} - -// MIMEType returns the MIME type of this particular manifest list. -func (list *Schema2ListPublic) MIMEType() string { - return list.MediaType -} - -// Instances returns a slice of digests of the manifests that this list knows of. -func (list *Schema2ListPublic) Instances() []digest.Digest { - results := make([]digest.Digest, len(list.Manifests)) - for i, m := range list.Manifests { - results[i] = m.Digest - } - return results -} - -// Instance returns the ListUpdate of a particular instance in the list. -func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { - for _, manifest := range list.Manifests { - if manifest.Digest == instanceDigest { - ret := ListUpdate{ - Digest: manifest.Digest, - Size: manifest.Size, - MediaType: manifest.MediaType, - } - ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName} - platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) - ret.ReadOnly.Platform = &platform - return ret, nil - } - } - return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) -} - -// UpdateInstances updates the sizes, digests, and media types of the manifests -// which the list catalogs. -func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { - editInstances := []ListEdit{} - for i, instance := range updates { - editInstances = append(editInstances, ListEdit{ - UpdateOldDigest: list.Manifests[i].Digest, - UpdateDigest: instance.Digest, - UpdateSize: instance.Size, - UpdateMediaType: instance.MediaType, - ListOperation: ListOpUpdate}) - } - return list.editInstances(editInstances) -} - -func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error { - addedEntries := []Schema2ManifestDescriptor{} - for i, editInstance := range editInstances { - switch editInstance.ListOperation { - case ListOpUpdate: - if err := editInstance.UpdateOldDigest.Validate(); err != nil { - return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) - } - if err := editInstance.UpdateDigest.Validate(); err != nil { - return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) - } - targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool { - return m.Digest == editInstance.UpdateOldDigest - }) - if targetIndex == -1 { - return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) - } - list.Manifests[targetIndex].Digest = editInstance.UpdateDigest - if editInstance.UpdateSize < 0 { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) - } - list.Manifests[targetIndex].Size = editInstance.UpdateSize - if editInstance.UpdateMediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType) - } - list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType - case ListOpAdd: - if editInstance.AddPlatform == nil { - // Should we create a struct with empty fields instead? - // Right now ListOpAdd is only called when an instance with the same platform value - // already exists in the manifest, so this should not be reached in practice. - return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported") - } - addedEntries = append(addedEntries, Schema2ManifestDescriptor{ - Schema2Descriptor{ - Digest: editInstance.AddDigest, - Size: editInstance.AddSize, - MediaType: editInstance.AddMediaType, - }, - schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform), - }) - default: - return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) - } - } - if len(addedEntries) != 0 { - // slices.Clone() here to ensure a private backing array; - // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. - list.Manifests = append(slices.Clone(list.Manifests), addedEntries...) - } - return nil -} - -func (list *Schema2List) EditInstances(editInstances []ListEdit) error { - return list.editInstances(editInstances) -} - -func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - // ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list. - return list.ChooseInstance(ctx) -} - -// ChooseInstance parses blob as a schema2 manifest list, and returns the digest -// of the image which is appropriate for the current environment. -func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms := platform.WantedPlatforms(ctx) - for _, wantedPlatform := range wantedPlatforms { - for _, d := range list.Manifests { - imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform) - if platform.MatchesPlatform(imagePlatform, wantedPlatform) { - return d.Digest, nil - } - } - } - return "", fmt.Errorf("no image found in manifest list for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) -} - -// Serialize returns the list in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (list *Schema2ListPublic) Serialize() ([]byte, error) { - buf, err := json.Marshal(list) - if err != nil { - return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err) - } - return buf, nil -} - -// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the -// supplied data. -// This is publicly visible as c/image/manifest.Schema2ListFromComponents. -func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic { - list := Schema2ListPublic{ - SchemaVersion: 2, - MediaType: DockerV2ListMediaType, - Manifests: make([]Schema2ManifestDescriptor, len(components)), - } - for i, component := range components { - m := Schema2ManifestDescriptor{ - Schema2Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: slices.Clone(component.URLs), - }, - Schema2PlatformSpec{ - Architecture: component.Platform.Architecture, - OS: component.Platform.OS, - OSVersion: component.Platform.OSVersion, - OSFeatures: slices.Clone(component.Platform.OSFeatures), - Variant: component.Platform.Variant, - Features: slices.Clone(component.Platform.Features), - }, - } - list.Manifests[i] = m - } - return &list -} - -// Schema2ListPublicClone creates a deep copy of the passed-in list. -// This is publicly visible as c/image/manifest.Schema2ListClone. -func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic { - return Schema2ListPublicFromComponents(list.Manifests) -} - -// ToOCI1Index returns the list encoded as an OCI1 index. -func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) { - components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) - for _, manifest := range list.Manifests { - platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) - components = append(components, imgspecv1.Descriptor{ - MediaType: manifest.MediaType, - Size: manifest.Size, - Digest: manifest.Digest, - URLs: slices.Clone(manifest.URLs), - Platform: &platform, - }) - } - oci := OCI1IndexPublicFromComponents(components, nil) - return oci, nil -} - -// ToSchema2List returns the list encoded as a Schema2 list. -func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) { - return Schema2ListPublicClone(list), nil -} - -// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled -// JSON, presumably generated by encoding a Schema2 manifest list. -// This is publicly visible as c/image/manifest.Schema2ListFromManifest. -func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) { - list := Schema2ListPublic{ - Manifests: []Schema2ManifestDescriptor{}, - } - if err := json.Unmarshal(manifest, &list); err != nil { - return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err) - } - if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, - AllowedFieldManifests); err != nil { - return nil, err - } - return &list, nil -} - -// Clone returns a deep copy of this list and its contents. -func (list *Schema2ListPublic) Clone() ListPublic { - return Schema2ListPublicClone(list) -} - -// ConvertToMIMEType converts the passed-in manifest list to a manifest -// list of the specified type. -func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { - switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { - case DockerV2ListMediaType: - return list.Clone(), nil - case imgspecv1.MediaTypeImageIndex: - return list.ToOCI1Index() - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) - default: - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) - } -} - -// Schema2List is a list of platform-specific manifests. -type Schema2List struct { - Schema2ListPublic -} - -func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { - return &Schema2List{*public} -} - -func (list *Schema2List) CloneInternal() List { - return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic)) -} - -func (list *Schema2List) Clone() ListPublic { - return list.CloneInternal() -} - -// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled -// JSON, presumably generated by encoding a Schema2 manifest list. -func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { - public, err := Schema2ListPublicFromManifest(manifest) - if err != nil { - return nil, err - } - return schema2ListFromPublic(public), nil -} - -// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture. -func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform { - return imgspecv1.Platform{ - Architecture: p.Architecture, - OS: p.OS, - OSVersion: p.OSVersion, - OSFeatures: slices.Clone(p.OSFeatures), - Variant: p.Variant, - // Features is not supported in OCI, and discarded. - } -} diff --git a/vendor/go.podman.io/image/v5/internal/manifest/errors.go b/vendor/go.podman.io/image/v5/internal/manifest/errors.go deleted file mode 100644 index 6c8e233d9..000000000 --- a/vendor/go.podman.io/image/v5/internal/manifest/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -package manifest - -import ( - "fmt" - - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType. -// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 . -const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" - -// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation -// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) -// -// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor) -type NonImageArtifactError struct { - // Callers should not be blindly calling image-specific operations and only checking MIME types - // on failure; if they care about the artifact type, they should check before using it. - // If they blindly assume an image, they don’t really need this value; just a type check - // is sufficient for basic "we can only pull images" UI. - // - // Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig, - // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers - // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of - // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions - // based on that kind of data. - // - // So, let’s not expose this until a specific need is identified. - mimeType string -} - -// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest. -// -// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig . -func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error { - // Callers decide based on manifest.Config.MediaType that this is not an image; - // in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically - // more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON. - // - // If ArtifactType and Config.MediaType are both defined and non-trivial, presumably - // ArtifactType is the “top-level” one, although that’s not defined by the spec. - mimeType := manifest.ArtifactType - if mimeType == "" { - mimeType = manifest.Config.MediaType - } - return NonImageArtifactError{mimeType: mimeType} -} - -func (e NonImageArtifactError) Error() string { - // Special-case these invalid mixed images, which show up from time to time: - if e.mimeType == dockerV2Schema2ConfigMediaType { - return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType) - } - return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType) -} diff --git a/vendor/go.podman.io/image/v5/internal/manifest/list.go b/vendor/go.podman.io/image/v5/internal/manifest/list.go deleted file mode 100644 index 100d1c86b..000000000 --- a/vendor/go.podman.io/image/v5/internal/manifest/list.go +++ /dev/null @@ -1,133 +0,0 @@ -package manifest - -import ( - "fmt" - - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - compression "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" -) - -// ListPublic is a subset of List which is a part of the public API; -// so no methods can be added, removed or changed. -// -// Internal users should usually use List instead. -type ListPublic interface { - // MIMEType returns the MIME type of this particular manifest list. - MIMEType() string - - // Instances returns a list of the manifests that this list knows of, other than its own. - Instances() []digest.Digest - - // Update information about the list's instances. The length of the passed-in slice must - // match the length of the list of instances which the list already contains, and every field - // must be specified. - UpdateInstances([]ListUpdate) error - - // Instance returns the size and MIME type of a particular instance in the list. - Instance(digest.Digest) (ListUpdate, error) - - // ChooseInstance selects which manifest is most appropriate for the platform described by the - // SystemContext, or for the current platform if the SystemContext doesn't specify any details. - ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) - - // Serialize returns the list in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded - // from, even if no modifications were made! - Serialize() ([]byte, error) - - // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. - ConvertToMIMEType(mimeType string) (ListPublic, error) - - // Clone returns a deep copy of this list and its contents. - Clone() ListPublic -} - -// List is an interface for parsing, modifying lists of image manifests. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members -// directly. -type List interface { - ListPublic - // CloneInternal returns a deep copy of this list and its contents. - CloneInternal() List - // ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the - // SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which - // when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined. - ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) - // Edit information about the list's instances. Contains Slice of ListEdit where each element - // is responsible for either Modifying or Adding a new instance to the Manifest. Operation is - // selected on the basis of configured ListOperation field. - EditInstances([]ListEdit) error -} - -// ListUpdate includes the fields which a List's UpdateInstances() method will modify. -// This is publicly visible as c/image/manifest.ListUpdate. -type ListUpdate struct { - Digest digest.Digest - Size int64 - MediaType string - // ReadOnly fields: may be set by Instance(), ignored by UpdateInstance() - ReadOnly struct { - Platform *imgspecv1.Platform - Annotations map[string]string - CompressionAlgorithmNames []string - ArtifactType string - } -} - -type ListOp int - -const ( - listOpInvalid ListOp = iota - ListOpAdd - ListOpUpdate -) - -// ListEdit includes the fields which a List's EditInstances() method will modify. -type ListEdit struct { - ListOperation ListOp - - // if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set. - UpdateOldDigest digest.Digest - UpdateDigest digest.Digest - UpdateSize int64 - UpdateMediaType string - UpdateAffectAnnotations bool - UpdateAnnotations map[string]string - UpdateCompressionAlgorithms []compression.Algorithm - - // If Op = ListEditAdd. All fields must be set. - AddDigest digest.Digest - AddSize int64 - AddMediaType string - AddArtifactType string - AddPlatform *imgspecv1.Platform - AddAnnotations map[string]string - AddCompressionAlgorithms []compression.Algorithm -} - -// ListPublicFromBlob parses a list of manifests. -// This is publicly visible as c/image/manifest.ListFromBlob. -func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) { - list, err := ListFromBlob(manifest, manifestMIMEType) - if err != nil { - return nil, err - } - return list, nil -} - -// ListFromBlob parses a list of manifests. -func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { - normalized := NormalizedMIMEType(manifestMIMEType) - switch normalized { - case DockerV2ListMediaType: - return Schema2ListFromManifest(manifest) - case imgspecv1.MediaTypeImageIndex: - return OCI1IndexFromManifest(manifest) - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") - } - return nil, fmt.Errorf("Unimplemented manifest list MIME type %q (normalized as %q)", manifestMIMEType, normalized) -} diff --git a/vendor/go.podman.io/image/v5/internal/manifest/manifest.go b/vendor/go.podman.io/image/v5/internal/manifest/manifest.go deleted file mode 100644 index 687b37fb0..000000000 --- a/vendor/go.podman.io/image/v5/internal/manifest/manifest.go +++ /dev/null @@ -1,228 +0,0 @@ -package manifest - -import ( - "encoding/json" - "slices" - - "github.com/containers/libtrust" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - compressiontypes "go.podman.io/image/v5/pkg/compression/types" -) - -// FIXME: Should we just use docker/distribution and docker/docker implementations directly? - -// FIXME(runcom, mitr): should we have a mediatype pkg?? -const ( - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" - // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. - DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2SchemaLayerMediaTypeZstd = "application/vnd.docker.image.rootfs.diff.tar.zstd" - // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" -) - -// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. -// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, -// but we may not have such metadata available (e.g. when the manifest is a local file). -// This is publicly visible as c/image/manifest.GuessMIMEType. -func GuessMIMEType(manifest []byte) string { - // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. - // Also docker/distribution/manifest.Versioned. - meta := struct { - MediaType string `json:"mediaType"` - SchemaVersion int `json:"schemaVersion"` - Signatures any `json:"signatures"` - }{} - if err := json.Unmarshal(manifest, &meta); err != nil { - return "" - } - - switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType, - imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. - return meta.MediaType - } - // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. - switch meta.SchemaVersion { - case 1: - if meta.Signatures != nil { - return DockerV2Schema1SignedMediaType - } - return DockerV2Schema1MediaType - case 2: - // Best effort to understand if this is an OCI image since mediaType - // wasn't in the manifest for OCI image-spec < 1.0.2. - // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. - ociMan := struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - }{} - if err := json.Unmarshal(manifest, &ociMan); err != nil { - return "" - } - switch ociMan.Config.MediaType { - case imgspecv1.MediaTypeImageConfig: - return imgspecv1.MediaTypeImageManifest - case DockerV2Schema2ConfigMediaType: - // This case should not happen since a Docker image - // must declare a top-level media type and - // `meta.MediaType` has already been checked. - return DockerV2Schema2MediaType - } - // Maybe an image index or an OCI artifact. - ociIndex := struct { - Manifests []imgspecv1.Descriptor `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &ociIndex); err != nil { - return "" - } - if len(ociIndex.Manifests) != 0 { - if ociMan.Config.MediaType == "" { - return imgspecv1.MediaTypeImageIndex - } - // FIXME: this is mixing media types of manifests and configs. - return ociMan.Config.MediaType - } - // It's most likely an OCI artifact with a custom config media - // type which is not (and cannot) be covered by the media-type - // checks cabove. - return imgspecv1.MediaTypeImageManifest - } - return "" -} - -// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -// This is publicly visible as c/image/manifest.Digest. -func Digest(manifest []byte) (digest.Digest, error) { - if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { - sig, err := libtrust.ParsePrettySignature(manifest, "signatures") - if err != nil { - return "", err - } - manifest, err = sig.Payload() - if err != nil { - // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string - // that libtrust itself has josebase64UrlEncode()d - return "", err - } - } - - return digest.FromBytes(manifest), nil -} - -// MatchesDigest returns true iff the manifest matches expectedDigest. -// Error may be set if this returns false. -// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, -// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -// This is publicly visible as c/image/manifest.MatchesDigest. -func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { - // This should eventually support various digest types. - actualDigest, err := Digest(manifest) - if err != nil { - return false, err - } - return expectedDigest == actualDigest, nil -} - -// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, -// centralizing various workarounds. -// This is publicly visible as c/image/manifest.NormalizedMIMEType. -func NormalizedMIMEType(input string) string { - switch input { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case "application/json": - return DockerV2Schema1SignedMediaType - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, - imgspecv1.MediaTypeImageManifest, - imgspecv1.MediaTypeImageIndex, - DockerV2Schema2MediaType, - DockerV2ListMediaType: - return input - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return DockerV2Schema1SignedMediaType - } -} - -// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values. -func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool { - // Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm(). - switch algo.Name() { - case compressiontypes.GzipAlgorithmName: - return true - default: - return false - } -} - -// MIMETypeSupportsCompressionAlgorithm returns true if mimeType can represent algo. -func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes.Algorithm) bool { - if CompressionAlgorithmIsUniversallySupported(algo) { - return true - } - // This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields. - // The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge. - switch algo.Name() { - case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName: - return mimeType == imgspecv1.MediaTypeImageManifest - default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere - return false - } -} - -// ReuseConditions are an input to CandidateCompressionMatchesReuseConditions; -// it is a struct to allow longer and better-documented field names. -type ReuseConditions struct { - PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer - RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm -} - -// CandidateCompressionMatchesReuseConditions returns true if a layer with candidateCompression -// (which can be nil to represent uncompressed or unknown) matches reuseConditions. -func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool { - if c.RequiredCompression != nil { - if candidateCompression == nil || - (c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { - return false - } - } - - // For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”; - // and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1), - // so don’t impose any restrictions if candidateCompression == nil - if c.PossibleManifestFormats != nil && candidateCompression != nil { - if !slices.ContainsFunc(c.PossibleManifestFormats, func(mt string) bool { - return MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression) - }) { - return false - } - } - - return true -} diff --git a/vendor/go.podman.io/image/v5/internal/manifest/oci_index.go b/vendor/go.podman.io/image/v5/internal/manifest/oci_index.go deleted file mode 100644 index 046d8e607..000000000 --- a/vendor/go.podman.io/image/v5/internal/manifest/oci_index.go +++ /dev/null @@ -1,466 +0,0 @@ -package manifest - -import ( - "bytes" - "encoding/json" - "fmt" - "maps" - "math" - "runtime" - "slices" - - "github.com/opencontainers/go-digest" - imgspec "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - platform "go.podman.io/image/v5/internal/pkg/platform" - compression "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" -) - -const ( - // OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index. - // The value of the annotation must be the string "true". - // If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression. - // That also suggests that this instance benefits from - // Zstd compression, so it can be preferred by compatible consumers over instances that - // use gzip, depending on their local policy. - OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd" - OCI1InstanceAnnotationCompressionZSTDValue = "true" -) - -// OCI1IndexPublic is just an alias for the OCI index type, but one which we can -// provide methods for. -// This is publicly visible as c/image/manifest.OCI1Index -// Internal users should usually use OCI1Index instead. -type OCI1IndexPublic struct { - imgspecv1.Index -} - -// MIMEType returns the MIME type of this particular manifest index. -func (index *OCI1IndexPublic) MIMEType() string { - return imgspecv1.MediaTypeImageIndex -} - -// Instances returns a slice of digests of the manifests that this index knows of. -func (index *OCI1IndexPublic) Instances() []digest.Digest { - results := make([]digest.Digest, len(index.Manifests)) - for i, m := range index.Manifests { - results[i] = m.Digest - } - return results -} - -// Instance returns the ListUpdate of a particular instance in the index. -func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { - for _, manifest := range index.Manifests { - if manifest.Digest == instanceDigest { - ret := ListUpdate{ - Digest: manifest.Digest, - Size: manifest.Size, - MediaType: manifest.MediaType, - } - ret.ReadOnly.Platform = manifest.Platform - ret.ReadOnly.Annotations = manifest.Annotations - ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations) - ret.ReadOnly.ArtifactType = manifest.ArtifactType - return ret, nil - } - } - return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) -} - -// UpdateInstances updates the sizes, digests, and media types of the manifests -// which the list catalogs. -func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error { - editInstances := []ListEdit{} - for i, instance := range updates { - editInstances = append(editInstances, ListEdit{ - UpdateOldDigest: index.Manifests[i].Digest, - UpdateDigest: instance.Digest, - UpdateSize: instance.Size, - UpdateMediaType: instance.MediaType, - ListOperation: ListOpUpdate}) - } - return index.editInstances(editInstances) -} - -func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string { - result := make([]string, 0, 1) - if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue { - result = append(result, compression.ZstdAlgorithmName) - } - // No compression was detected, hence assume instance has default compression `Gzip` - if len(result) == 0 { - result = append(result, compression.GzipAlgorithmName) - } - return result -} - -func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) { - // TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm - // list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable - // and full compressionAlghorithms list. - if *annotationsMap == nil && len(compressionAlgorithms) > 0 { - *annotationsMap = map[string]string{} - } - for _, algo := range compressionAlgorithms { - switch algo.BaseVariantName() { - case compression.ZstdAlgorithmName: - (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue - default: - continue - } - } -} - -func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { - addedEntries := []imgspecv1.Descriptor{} - updatedAnnotations := false - for i, editInstance := range editInstances { - switch editInstance.ListOperation { - case ListOpUpdate: - if err := editInstance.UpdateOldDigest.Validate(); err != nil { - return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) - } - if err := editInstance.UpdateDigest.Validate(); err != nil { - return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) - } - targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool { - return m.Digest == editInstance.UpdateOldDigest - }) - if targetIndex == -1 { - return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest) - } - index.Manifests[targetIndex].Digest = editInstance.UpdateDigest - if editInstance.UpdateSize < 0 { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) - } - index.Manifests[targetIndex].Size = editInstance.UpdateSize - if editInstance.UpdateMediaType == "" { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) - } - index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType - if editInstance.UpdateAnnotations != nil { - updatedAnnotations = true - if editInstance.UpdateAffectAnnotations { - index.Manifests[targetIndex].Annotations = maps.Clone(editInstance.UpdateAnnotations) - } else { - if index.Manifests[targetIndex].Annotations == nil { - index.Manifests[targetIndex].Annotations = map[string]string{} - } - maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations) - } - } - addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations) - case ListOpAdd: - annotations := map[string]string{} - if editInstance.AddAnnotations != nil { - annotations = maps.Clone(editInstance.AddAnnotations) - } - addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations) - addedEntries = append(addedEntries, imgspecv1.Descriptor{ - MediaType: editInstance.AddMediaType, - ArtifactType: editInstance.AddArtifactType, - Size: editInstance.AddSize, - Digest: editInstance.AddDigest, - Platform: editInstance.AddPlatform, - Annotations: annotations, - }) - default: - return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) - } - } - if len(addedEntries) != 0 { - // slices.Clone() here to ensure the slice uses a private backing array; - // an external caller could have manually created OCI1IndexPublic with a slice with extra capacity. - index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) - } - if len(addedEntries) != 0 || updatedAnnotations { - slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int { - // FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes - // into more algorithms? - aZstd := instanceIsZstd(a) - bZstd := instanceIsZstd(b) - switch { - case aZstd == bZstd: - return 0 - case !aZstd: // Implies bZstd - return -1 - default: // aZstd && !bZstd - return 1 - } - }) - } - return nil -} - -func (index *OCI1Index) EditInstances(editInstances []ListEdit) error { - return index.editInstances(editInstances) -} - -// instanceIsZstd returns true if instance is a zstd instance otherwise false. -func instanceIsZstd(manifest imgspecv1.Descriptor) bool { - if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" { - return true - } - return false -} - -type instanceCandidate struct { - platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesn’t have a platform - isZstd bool // tells if particular instance if zstd instance - manifestPosition int // A zero-based index of the instance in the manifest list - digest digest.Digest // Instance digest -} - -func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip types.OptionalBool) bool { - switch { - case ic.platformIndex != other.platformIndex: - return ic.platformIndex < other.platformIndex - case ic.isZstd != other.isZstd: - if preferGzip != types.OptionalBoolTrue { - return ic.isZstd - } else { - return !ic.isZstd - } - case ic.manifestPosition != other.manifestPosition: - return ic.manifestPosition < other.manifestPosition - } - panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. -} - -// chooseInstance is a private equivalent to ChooseInstanceByCompression, -// shared by ChooseInstance and ChooseInstanceByCompression. -func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - wantedPlatforms := platform.WantedPlatforms(ctx) - var bestMatch *instanceCandidate - bestMatch = nil - for manifestIndex, d := range index.Manifests { - candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest} - if d.Platform != nil { - imagePlatform := ociPlatformClone(*d.Platform) - platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { - return platform.MatchesPlatform(imagePlatform, wantedPlatform) - }) - if platformIndex == -1 { - continue - } - candidate.platformIndex = platformIndex - } - if bestMatch == nil || candidate.isPreferredOver(bestMatch, preferGzip) { - bestMatch = &candidate - } - } - if bestMatch != nil { - return bestMatch.digest, nil - } - return "", fmt.Errorf("no image found in image index for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) -} - -func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - return index.chooseInstance(ctx, preferGzip) -} - -// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest -// of the image which is appropriate for the current environment. -func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - return index.chooseInstance(ctx, types.OptionalBoolFalse) -} - -// Serialize returns the index in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (index *OCI1IndexPublic) Serialize() ([]byte, error) { - buf, err := json.Marshal(index) - if err != nil { - return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err) - } - return buf, nil -} - -// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the -// supplied data. -// This is publicly visible as c/image/manifest.OCI1IndexFromComponents. -func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic { - index := OCI1IndexPublic{ - imgspecv1.Index{ - Versioned: imgspec.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageIndex, - Manifests: make([]imgspecv1.Descriptor, len(components)), - Annotations: maps.Clone(annotations), - }, - } - for i, component := range components { - index.Manifests[i] = oci1DescriptorClone(component) - } - return &index -} - -func oci1DescriptorClone(d imgspecv1.Descriptor) imgspecv1.Descriptor { - var platform *imgspecv1.Platform - if d.Platform != nil { - platformCopy := ociPlatformClone(*d.Platform) - platform = &platformCopy - } - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest, - Size: d.Size, - URLs: slices.Clone(d.URLs), - Annotations: maps.Clone(d.Annotations), - Data: bytes.Clone(d.Data), - Platform: platform, - ArtifactType: d.ArtifactType, - } -} - -// OCI1IndexPublicClone creates a deep copy of the passed-in index. -// This is publicly visible as c/image/manifest.OCI1IndexClone. -func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic { - var subject *imgspecv1.Descriptor - if index.Subject != nil { - s := oci1DescriptorClone(*index.Subject) - subject = &s - } - manifests := make([]imgspecv1.Descriptor, len(index.Manifests)) - for i, m := range index.Manifests { - manifests[i] = oci1DescriptorClone(m) - } - return &OCI1IndexPublic{ - Index: imgspecv1.Index{ - Versioned: index.Versioned, - MediaType: index.MediaType, - ArtifactType: index.ArtifactType, - Manifests: manifests, - Subject: subject, - Annotations: maps.Clone(index.Annotations), - }, - } -} - -// ToOCI1Index returns the index encoded as an OCI1 index. -func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) { - return OCI1IndexPublicClone(index), nil -} - -// ToSchema2List returns the index encoded as a Schema2 list. -func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) { - components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) - for _, manifest := range index.Manifests { - platform := manifest.Platform - if platform == nil { - platform = &imgspecv1.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - } - } - components = append(components, Schema2ManifestDescriptor{ - Schema2Descriptor{ - MediaType: manifest.MediaType, - Size: manifest.Size, - Digest: manifest.Digest, - URLs: slices.Clone(manifest.URLs), - }, - schema2PlatformSpecFromOCIPlatform(*platform), - }) - } - s2 := Schema2ListPublicFromComponents(components) - return s2, nil -} - -// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled -// JSON, presumably generated by encoding a OCI1 manifest index. -// This is publicly visible as c/image/manifest.OCI1IndexFromManifest. -func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) { - index := OCI1IndexPublic{ - Index: imgspecv1.Index{ - Versioned: imgspec.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageIndex, - Manifests: []imgspecv1.Descriptor{}, - Annotations: make(map[string]string), - }, - } - if err := json.Unmarshal(manifest, &index); err != nil { - return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err) - } - if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, - AllowedFieldManifests); err != nil { - return nil, err - } - return &index, nil -} - -// Clone returns a deep copy of this list and its contents. -func (index *OCI1IndexPublic) Clone() ListPublic { - return OCI1IndexPublicClone(index) -} - -// ConvertToMIMEType converts the passed-in image index to a manifest list of -// the specified type. -func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { - switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { - case DockerV2ListMediaType: - return index.ToSchema2List() - case imgspecv1.MediaTypeImageIndex: - return index.Clone(), nil - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) - default: - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) - } -} - -type OCI1Index struct { - OCI1IndexPublic -} - -func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index { - return &OCI1Index{*public} -} - -func (index *OCI1Index) CloneInternal() List { - return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic)) -} - -func (index *OCI1Index) Clone() ListPublic { - return index.CloneInternal() -} - -// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled -// JSON, presumably generated by encoding a OCI1 manifest list. -func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { - public, err := OCI1IndexPublicFromManifest(manifest) - if err != nil { - return nil, err - } - return oci1IndexFromPublic(public), nil -} - -// ociPlatformClone returns an independent copy of p. -func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform { - // The only practical way in Go to give read-only access to an array is to copy it. - // The only practical way in Go to copy a deep structure is to either do it manually field by field, - // or to use reflection (incl. a round-trip through JSON, which uses reflection). - // - // The combination of the two is just sad, and leads to code like this, which will - // need to be updated with every new Platform field. - return imgspecv1.Platform{ - Architecture: p.Architecture, - OS: p.OS, - OSVersion: p.OSVersion, - OSFeatures: slices.Clone(p.OSFeatures), - Variant: p.Variant, - } -} - -// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure. -func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec { - return Schema2PlatformSpec{ - Architecture: p.Architecture, - OS: p.OS, - OSVersion: p.OSVersion, - OSFeatures: slices.Clone(p.OSFeatures), - Variant: p.Variant, - Features: nil, - } -} diff --git a/vendor/go.podman.io/image/v5/internal/multierr/multierr.go b/vendor/go.podman.io/image/v5/internal/multierr/multierr.go deleted file mode 100644 index 1341925c1..000000000 --- a/vendor/go.podman.io/image/v5/internal/multierr/multierr.go +++ /dev/null @@ -1,34 +0,0 @@ -package multierr - -import ( - "fmt" - "strings" -) - -// Format creates an error value from the input array (which should not be empty) -// If the input contains a single error value, it is returned as is. -// If there are multiple, they are formatted as a multi-error (with Unwrap() []error) with the provided initial, separator, and ending strings. -// -// Typical usage: -// -// var errs []error -// // … -// errs = append(errs, …) -// // … -// if errs != nil { return multierr.Format("Failures doing $FOO", "\n* ", "", errs)} -func Format(first, middle, last string, errs []error) error { - switch len(errs) { - case 0: - return fmt.Errorf("internal error: multierr.Format called with 0 errors") - case 1: - return errs[0] - default: - // We have to do this — and this function only really exists — because fmt.Errorf(format, errs...) is invalid: - // []error is not a valid parameter to a function expecting []any - anyErrs := make([]any, 0, len(errs)) - for _, e := range errs { - anyErrs = append(anyErrs, e) - } - return fmt.Errorf(first+"%w"+strings.Repeat(middle+"%w", len(errs)-1)+last, anyErrs...) - } -} diff --git a/vendor/go.podman.io/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/go.podman.io/image/v5/internal/pkg/platform/platform_matcher.go deleted file mode 100644 index 171438891..000000000 --- a/vendor/go.podman.io/image/v5/internal/pkg/platform/platform_matcher.go +++ /dev/null @@ -1,223 +0,0 @@ -package platform - -// Largely based on -// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go -// Copyright 2012-2017 Docker, Inc. -// -// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go -// Copyright The containerd Authors. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// https://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bufio" - "fmt" - "os" - "runtime" - "slices" - "strings" - - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/types" -) - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if runtime.GOOS != "linux" { - return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern) -} - -func getCPUVariantDarwinWindows(arch string) string { - // Darwin and Windows only support v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch arch { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "" - } - - return variant -} - -func getCPUVariantArm() string { - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - logrus.Errorf("Couldn't get cpu architecture: %v", err) - return "" - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "7": - // handle RPi Zero variant mismatch due to wrong variant from kernel - // https://github.com/containerd/containerd/pull/4530 - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - // https://github.com/moby/moby/pull/36121#issuecomment-398328286 - model, err := getCPUInfo("model name") - if err != nil { - logrus.Errorf("Couldn't get cpu model name, it may be the corner case where variant is 6: %v", err) - return "" - } - // model name is NOT a value provided by the CPU; it is another outcome of Linux CPU detection, - // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L178C35-L178C35 - // (matching happens based on value + mask at https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L273-L274 ) - // ARM CPU ID starts with a “main” ID register https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/System-Control-Registers-in-a-VMSA-implementation/VMSA-System-control-registers-descriptions--in-register-order/MIDR--Main-ID-Register--VMSA?lang=en , - // but the ARMv6/ARMv7 differences are not a single dimension, https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-CPUID-Identification-Scheme?lang=en . - // The Linux "cpu architecture" is determined by a “memory model” feature. - // - // So, the "armv6-compatible" check basically checks for a "v6 or v7 CPU, but not one found listed as a known v7 one in the .proc.info.init tables of - // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v7.S . - if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - logrus.Debugf("Detected corner case, setting cpu variant to v6") - variant = "v6" - } else { - variant = "v7" - } - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "" - } - - return variant -} - -func getCPUVariant(os string, arch string) string { - if os == "darwin" || os == "windows" { - return getCPUVariantDarwinWindows(arch) - } - if arch == "arm" || arch == "arm64" { - return getCPUVariantArm() - } - return "" -} - -// compatibility contains, for a specified architecture, a list of known variants, in the -// order from most capable (most restrictive) to least capable (most compatible). -// Architectures that don’t have variants should not have an entry here. -var compatibility = map[string][]string{ - "arm": {"v8", "v7", "v6", "v5"}, - "arm64": {"v8"}, -} - -// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, -// the most compatible platform is first. -// If some option (arch, os, variant) is not present, a value from current platform is detected. -func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform { - // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all. - // The fields are not specified by the OCI specification, as of version 1.1, usefully enough - // to be interoperable, anyway. - - wantedArch := runtime.GOARCH - wantedVariant := "" - if ctx != nil && ctx.ArchitectureChoice != "" { - wantedArch = ctx.ArchitectureChoice - } else { - // Only auto-detect the variant if we are using the default architecture. - // If the user has specified the ArchitectureChoice, don't autodetect, even if - // ctx.ArchitectureChoice == runtime.GOARCH, because we have no idea whether the runtime.GOARCH - // value is relevant to the use case, and if we do autodetect a variant, - // ctx.VariantChoice can't be used to override it back to "". - wantedVariant = getCPUVariant(runtime.GOOS, runtime.GOARCH) - } - if ctx != nil && ctx.VariantChoice != "" { - wantedVariant = ctx.VariantChoice - } - - wantedOS := runtime.GOOS - if ctx != nil && ctx.OSChoice != "" { - wantedOS = ctx.OSChoice - } - - var variants []string = nil - if wantedVariant != "" { - // If the user requested a specific variant, we'll walk down - // the list from most to least compatible. - if variantOrder := compatibility[wantedArch]; variantOrder != nil { - if i := slices.Index(variantOrder, wantedVariant); i != -1 { - variants = variantOrder[i:] - } - } - if variants == nil { - // user wants a variant which we know nothing about - not even compatibility - variants = []string{wantedVariant} - } - // Make sure to have a candidate with an empty variant as well. - variants = append(variants, "") - } else { - // Make sure to have a candidate with an empty variant as well. - variants = append(variants, "") - // If available add the entire compatibility matrix for the specific architecture. - if possibleVariants, ok := compatibility[wantedArch]; ok { - variants = append(variants, possibleVariants...) - } - } - - res := make([]imgspecv1.Platform, 0, len(variants)) - for _, v := range variants { - res = append(res, imgspecv1.Platform{ - OS: wantedOS, - Architecture: wantedArch, - Variant: v, - }) - } - return res -} - -// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches -// an item from the return value of WantedPlatforms. -func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool { - return image.Architecture == wanted.Architecture && - image.OS == wanted.OS && - image.Variant == wanted.Variant -} diff --git a/vendor/go.podman.io/image/v5/internal/private/private.go b/vendor/go.podman.io/image/v5/internal/private/private.go deleted file mode 100644 index a5d2057ae..000000000 --- a/vendor/go.podman.io/image/v5/internal/private/private.go +++ /dev/null @@ -1,239 +0,0 @@ -package private - -import ( - "context" - "io" - "time" - - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/blobinfocache" - "go.podman.io/image/v5/internal/signature" - compression "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" -) - -// ImageSourceInternalOnly is the part of private.ImageSource that is not -// a part of types.ImageSource. -type ImageSourceInternalOnly interface { - // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. - SupportsGetBlobAt() bool - // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt(). - BlobChunkAccessor - - // GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) -} - -// ImageSource is an internal extension to the types.ImageSource interface. -type ImageSource interface { - types.ImageSource - ImageSourceInternalOnly -} - -// ImageDestinationInternalOnly is the part of private.ImageDestination that is not -// a part of types.ImageDestination. -type ImageDestinationInternalOnly interface { - // SupportsPutBlobPartial returns true if PutBlobPartial is supported. - SupportsPutBlobPartial() bool - // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures - // on unsupported formats. - - // NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, - // or an error obtaining that value (e.g. if the image is an artifact and not a container image). - // The destination can use it in its TryReusingBlob/PutBlob implementations - // (otherwise it only obtains the final config after all layers are written). - NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error - - // PutBlobWithOptions writes contents of stream and returns data representing the result. - // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. - // inputInfo.Size is the expected length of stream, if known. - // inputInfo.MediaType describes the blob format, if known. - // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available - // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. - PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error) - - // PutBlobPartial attempts to create a blob using the data that is already present - // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. - // It is available only if SupportsPutBlobPartial(). - // Even if SupportsPutBlobPartial() returns true, the call can fail. - // If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. - // The fallback _must not_ be done otherwise. - PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error) - - // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination - // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). - // info.Digest must not be empty. - // If the blob has been successfully reused, returns (true, info, nil). - // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. - TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error) - - // PutSignaturesWithFormat writes a set of signatures to the destination. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for - // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. - // MUST be called after PutManifest (signatures may reference manifest contents). - PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error - - // CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. - // WARNING: This does not have any transactional semantics: - // - Uploaded data MAY be visible to others before CommitWithOptions() is called - // - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) - CommitWithOptions(ctx context.Context, options CommitOptions) error -} - -// ImageDestination is an internal extension to the types.ImageDestination -// interface. -type ImageDestination interface { - types.ImageDestination - ImageDestinationInternalOnly -} - -// UploadedBlob is information about a blob written to a destination. -// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided. -type UploadedBlob struct { - Digest digest.Digest - Size int64 -} - -// PutBlobOptions are used in PutBlobWithOptions. -type PutBlobOptions struct { - Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos. - IsConfig bool // True if the blob is a config - - // The following fields are new to internal/private. Users of internal/private MUST fill them in, - // but they also must expect that they will be ignored by types.ImageDestination transports. - // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers - // if they use internal/imagedestination/impl.Compat; - // in that case, they will all be consistently zero-valued. - - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. -} - -// PutBlobPartialOptions are used in PutBlobPartial. -type PutBlobPartialOptions struct { - Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs) -} - -// TryReusingBlobOptions are used in TryReusingBlobWithOptions. -type TryReusingBlobOptions struct { - Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. - // If true, it is allowed to use an equivalent of the desired blob; - // in that case the returned info may not match the input. - CanSubstitute bool - - // The following fields are new to internal/private. Users of internal/private MUST fill them in, - // but they also must expect that they will be ignored by types.ImageDestination transports. - // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers - // if they use internal/imagedestination/impl.Compat; - // in that case, they will all be consistently zero-valued. - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. - SrcRef reference.Named // A reference to the source image that contains the input blob. - PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob. - RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go - OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed” or “unknown”. - TOCDigest digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest. -} - -// ReusedBlob is information about a blob reused in a destination. -// It is the subset of types.BlobInfo fields the transport is responsible for setting. -type ReusedBlob struct { - Digest digest.Digest // Must be provided - Size int64 // Must be provided - // The following compression fields should be set when the reuse substitutes - // a differently-compressed blob. - // They may be set also to change from a base variant to a specific variant of an algorithm. - CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A - CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A - - // Annotations that should be added, for CompressionAlgorithm. Note that they might need to be - // added even if the digest doesn’t change (if we found the annotations in a cache). - CompressionAnnotations map[string]string - - MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. -} - -// CommitOptions are used in CommitWithOptions -type CommitOptions struct { - // UnparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list - // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the - // original manifest list digest, if desired. - UnparsedToplevel types.UnparsedImage - // ReportResolvedReference, if set, asks the transport to store a “resolved” (more detailed) reference to the created image - // into the value this option points to. - // What “resolved” means is transport-specific. - // Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value. - ReportResolvedReference *types.ImageReference - // Timestamp, if set, will force timestamps of content created in the destination to this value. - // Most transports don't support this. - // - // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry - // (but not a timestamp of the created archive file). - Timestamp *time.Time -} - -// ImageSourceChunk is a portion of a blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceChunk struct { - // Offset specifies the starting position of the chunk within the source blob. - Offset uint64 - - // Length specifies the size of the chunk. If it is set to math.MaxUint64, - // then it refers to all the data from Offset to the end of the blob. - Length uint64 -} - -// BlobChunkAccessor allows fetching discontiguous chunks of a blob. -type BlobChunkAccessor interface { - // GetBlobAt returns a sequential channel of readers that contain data for the requested - // blob chunks, and a channel that might get a single error value. - // The specified chunks must be not overlapping and sorted by their offset. - // The readers must be fully consumed, in the order they are returned, before blocking - // to read the next chunk. - // If the Length for the last chunk is set to math.MaxUint64, then it - // fully fetches the remaining data from the offset to the end of the blob. - GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) -} - -// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request. -type BadPartialRequestError struct { - Status string -} - -func (e BadPartialRequestError) Error() string { - return e.Status -} - -// UnparsedImage is an internal extension to the types.UnparsedImage interface. -type UnparsedImage interface { - types.UnparsedImage - // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. - UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) -} - -// ErrFallbackToOrdinaryLayerDownload is a custom error type returned by PutBlobPartial. -// It suggests to the caller that a fallback mechanism can be used instead of a hard failure; -// otherwise the caller of PutBlobPartial _must not_ fall back to PutBlob. -type ErrFallbackToOrdinaryLayerDownload struct { - err error -} - -func (c ErrFallbackToOrdinaryLayerDownload) Error() string { - return c.err.Error() -} - -func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error { - return c.err -} - -func NewErrFallbackToOrdinaryLayerDownload(err error) error { - return ErrFallbackToOrdinaryLayerDownload{err: err} -} diff --git a/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go deleted file mode 100644 index ce5054275..000000000 --- a/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go +++ /dev/null @@ -1,57 +0,0 @@ -package putblobdigest - -import ( - "io" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/types" -) - -// Digester computes a digest of the provided stream, if not known yet. -type Digester struct { - knownDigest digest.Digest // Or "" - digester digest.Digester // Or nil -} - -// newDigester initiates computation of a digest.Canonical digest of stream, -// if !validDigest; otherwise it just records knownDigest to be returned later. -// The caller MUST use the returned stream instead of the original value. -func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) { - if validDigest { - return Digester{knownDigest: knownDigest}, stream - } else { - res := Digester{ - digester: digest.Canonical.Digester(), - } - stream = io.TeeReader(stream, res.digester.Hash()) - return res, stream - } -} - -// DigestIfUnknown initiates computation of a digest.Canonical digest of stream, -// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will -// be used (accepting any algorithm). -// The caller MUST use the returned stream instead of the original value. -func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { - d := blobInfo.Digest - return newDigester(stream, d, d != "") -} - -// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream, -// if a digest.Canonical digest is not supplied in the provided blobInfo; -// otherwise blobInfo.Digest will be used. -// The caller MUST use the returned stream instead of the original value. -func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { - d := blobInfo.Digest - return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical) -} - -// Digest() returns a digest value possibly computed by Digester. -// This must be called only after all of the stream returned by a Digester constructor -// has been successfully read. -func (d Digester) Digest() digest.Digest { - if d.digester != nil { - return d.digester.Digest() - } - return d.knownDigest -} diff --git a/vendor/go.podman.io/image/v5/internal/rootless/rootless.go b/vendor/go.podman.io/image/v5/internal/rootless/rootless.go deleted file mode 100644 index 80623bfbc..000000000 --- a/vendor/go.podman.io/image/v5/internal/rootless/rootless.go +++ /dev/null @@ -1,25 +0,0 @@ -package rootless - -import ( - "os" - "strconv" -) - -// GetRootlessEUID returns the UID of the current user (in the parent userNS, if any) -// -// Podman and similar software, in “rootless” configuration, when run as a non-root -// user, very early switches to a user namespace, where Geteuid() == 0 (but does not -// switch to a limited mount namespace); so, code relying on Geteuid() would use -// system-wide paths in e.g. /var, when the user is actually not privileged to write to -// them, and expects state to be stored in the home directory. -// -// If Podman is setting up such a user namespace, it records the original UID in an -// environment variable, allowing us to make choices based on the actual user’s identity. -func GetRootlessEUID() int { - euidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") - if euidEnv != "" { - euid, _ := strconv.Atoi(euidEnv) - return euid - } - return os.Geteuid() -} diff --git a/vendor/go.podman.io/image/v5/internal/set/set.go b/vendor/go.podman.io/image/v5/internal/set/set.go deleted file mode 100644 index 7716b12d5..000000000 --- a/vendor/go.podman.io/image/v5/internal/set/set.go +++ /dev/null @@ -1,55 +0,0 @@ -package set - -import ( - "iter" - "maps" -) - -// FIXME: -// - Docstrings -// - This should be in a public library somewhere - -type Set[E comparable] struct { - m map[E]struct{} -} - -func New[E comparable]() *Set[E] { - return &Set[E]{ - m: map[E]struct{}{}, - } -} - -func NewWithValues[E comparable](values ...E) *Set[E] { - s := New[E]() - for _, v := range values { - s.Add(v) - } - return s -} - -func (s *Set[E]) Add(v E) { - s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. -} - -func (s *Set[E]) AddSeq(seq iter.Seq[E]) { - for v := range seq { - s.Add(v) - } -} - -func (s *Set[E]) Delete(v E) { - delete(s.m, v) -} - -func (s *Set[E]) Contains(v E) bool { - _, ok := s.m[v] - return ok -} - -func (s *Set[E]) Empty() bool { - return len(s.m) == 0 -} - -func (s *Set[E]) All() iter.Seq[E] { - return maps.Keys(s.m) -} diff --git a/vendor/go.podman.io/image/v5/internal/signature/signature.go b/vendor/go.podman.io/image/v5/internal/signature/signature.go deleted file mode 100644 index 6f95115a1..000000000 --- a/vendor/go.podman.io/image/v5/internal/signature/signature.go +++ /dev/null @@ -1,102 +0,0 @@ -package signature - -import ( - "bytes" - "errors" - "fmt" -) - -// FIXME FIXME: MIME type? Int? String? -// An interface with a name, parse methods? -type FormatID string - -const ( - SimpleSigningFormat FormatID = "simple-signing" - SigstoreFormat FormatID = "sigstore-json" - // Update also UnsupportedFormatError below -) - -// Signature is an image signature of some kind. -type Signature interface { - FormatID() FormatID - // blobChunk returns a representation of signature as a []byte, suitable for long-term storage. - // Almost everyone should use signature.Blob() instead. - blobChunk() ([]byte, error) -} - -// Blob returns a representation of sig as a []byte, suitable for long-term storage. -func Blob(sig Signature) ([]byte, error) { - chunk, err := sig.blobChunk() - if err != nil { - return nil, err - } - - format := sig.FormatID() - switch format { - case SimpleSigningFormat: - // For compatibility with old dir formats: - return chunk, nil - default: - res := []byte{0} // Start with a zero byte to clearly mark this is a binary format, and disambiguate from random text. - res = append(res, []byte(format)...) - res = append(res, '\n') - res = append(res, chunk...) - return res, nil - } -} - -// FromBlob returns a signature from parsing a blob created by signature.Blob. -func FromBlob(blob []byte) (Signature, error) { - if len(blob) == 0 { - return nil, errors.New("empty signature blob") - } - // Historically we’ve just been using GPG with no identification; try to auto-detect that. - switch blob[0] { - // OpenPGP "compressed data" wrapping the message - case 0xA0, 0xA1, 0xA2, 0xA3, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 8 (tag: compressed data packet); bits 1…0 = length-type (any) - 0xC8, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 8 (tag: compressed data packet) - // OpenPGP “one-pass signature” starting a signature - 0x90, 0x91, 0x92, 0x3d, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 4 (tag: one-pass signature packet); bits 1…0 = length-type (any) - 0xC4, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 4 (tag: one-pass signature packet) - // OpenPGP signature packet signing the following data - 0x88, 0x89, 0x8A, 0x8B, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 2 (tag: signature packet); bits 1…0 = length-type (any) - 0xC2: // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 2 (tag: signature packet) - return SimpleSigningFromBlob(blob), nil - - // The newer format: binary 0, format name, newline, data - case 0x00: - blob = blob[1:] - formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'}) - if !foundNewline { - return nil, fmt.Errorf("invalid signature format, missing newline") - } - for _, b := range formatBytes { - if b < 32 || b >= 0x7F { - return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b) - } - } - switch { - case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)): - return SimpleSigningFromBlob(blobChunk), nil - case bytes.Equal(formatBytes, []byte(SigstoreFormat)): - return sigstoreFromBlobChunk(blobChunk) - default: - return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes)) - } - - default: - return nil, fmt.Errorf("unrecognized signature format, starting with binary %#x", blob[0]) - } - -} - -// UnsupportedFormatError returns an error complaining about sig having an unsupported format. -func UnsupportedFormatError(sig Signature) error { - formatID := sig.FormatID() - switch formatID { - case SimpleSigningFormat, SigstoreFormat: - return fmt.Errorf("unsupported signature format %s", string(formatID)) - default: - return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID)) - } -} diff --git a/vendor/go.podman.io/image/v5/internal/signature/sigstore.go b/vendor/go.podman.io/image/v5/internal/signature/sigstore.go deleted file mode 100644 index 8025cd270..000000000 --- a/vendor/go.podman.io/image/v5/internal/signature/sigstore.go +++ /dev/null @@ -1,86 +0,0 @@ -package signature - -import ( - "bytes" - "encoding/json" - "maps" -) - -const ( - // from sigstore/cosign/pkg/types.SimpleSigningMediaType - SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json" - // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey - SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature" - // from sigstore/cosign/pkg/oci/static.BundleAnnotationKey - SigstoreSETAnnotationKey = "dev.sigstore.cosign/bundle" - // from sigstore/cosign/pkg/oci/static.CertificateAnnotationKey - SigstoreCertificateAnnotationKey = "dev.sigstore.cosign/certificate" - // from sigstore/cosign/pkg/oci/static.ChainAnnotationKey - SigstoreIntermediateCertificateChainAnnotationKey = "dev.sigstore.cosign/chain" -) - -// Sigstore is a github.com/cosign/cosign signature. -// For the persistent-storage format used for blobChunk(), we want -// a degree of forward compatibility against unexpected field changes -// (as has happened before), which is why this data type -// contains just a payload + annotations (including annotations -// that we don’t recognize or support), instead of individual fields -// for the known annotations. -type Sigstore struct { - untrustedMIMEType string - untrustedPayload []byte - untrustedAnnotations map[string]string -} - -// sigstoreJSONRepresentation needs the files to be public, which we don’t want for -// the main Sigstore type. -type sigstoreJSONRepresentation struct { - UntrustedMIMEType string `json:"mimeType"` - UntrustedPayload []byte `json:"payload"` - UntrustedAnnotations map[string]string `json:"annotations"` -} - -// SigstoreFromComponents returns a Sigstore object from its components. -func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore { - return Sigstore{ - untrustedMIMEType: untrustedMimeType, - untrustedPayload: bytes.Clone(untrustedPayload), - untrustedAnnotations: maps.Clone(untrustedAnnotations), - } -} - -// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object. -func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) { - var v sigstoreJSONRepresentation - if err := json.Unmarshal(blobChunk, &v); err != nil { - return Sigstore{}, err - } - return SigstoreFromComponents(v.UntrustedMIMEType, - v.UntrustedPayload, - v.UntrustedAnnotations), nil -} - -func (s Sigstore) FormatID() FormatID { - return SigstoreFormat -} - -// blobChunk returns a representation of signature as a []byte, suitable for long-term storage. -// Almost everyone should use signature.Blob() instead. -func (s Sigstore) blobChunk() ([]byte, error) { - return json.Marshal(sigstoreJSONRepresentation{ - UntrustedMIMEType: s.UntrustedMIMEType(), - UntrustedPayload: s.UntrustedPayload(), - UntrustedAnnotations: s.UntrustedAnnotations(), - }) -} - -func (s Sigstore) UntrustedMIMEType() string { - return s.untrustedMIMEType -} -func (s Sigstore) UntrustedPayload() []byte { - return bytes.Clone(s.untrustedPayload) -} - -func (s Sigstore) UntrustedAnnotations() map[string]string { - return maps.Clone(s.untrustedAnnotations) -} diff --git a/vendor/go.podman.io/image/v5/internal/signature/simple.go b/vendor/go.podman.io/image/v5/internal/signature/simple.go deleted file mode 100644 index 76f270b48..000000000 --- a/vendor/go.podman.io/image/v5/internal/signature/simple.go +++ /dev/null @@ -1,29 +0,0 @@ -package signature - -import "bytes" - -// SimpleSigning is a “simple signing” signature. -type SimpleSigning struct { - untrustedSignature []byte -} - -// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object. -func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning { - return SimpleSigning{ - untrustedSignature: bytes.Clone(blobChunk), - } -} - -func (s SimpleSigning) FormatID() FormatID { - return SimpleSigningFormat -} - -// blobChunk returns a representation of signature as a []byte, suitable for long-term storage. -// Almost everyone should use signature.Blob() instead. -func (s SimpleSigning) blobChunk() ([]byte, error) { - return bytes.Clone(s.untrustedSignature), nil -} - -func (s SimpleSigning) UntrustedSignature() []byte { - return bytes.Clone(s.untrustedSignature) -} diff --git a/vendor/go.podman.io/image/v5/internal/streamdigest/stream_digest.go b/vendor/go.podman.io/image/v5/internal/streamdigest/stream_digest.go deleted file mode 100644 index 83608e04a..000000000 --- a/vendor/go.podman.io/image/v5/internal/streamdigest/stream_digest.go +++ /dev/null @@ -1,40 +0,0 @@ -package streamdigest - -import ( - "fmt" - "io" - "os" - - "go.podman.io/image/v5/internal/putblobdigest" - "go.podman.io/image/v5/internal/tmpdir" - "go.podman.io/image/v5/types" -) - -// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo. -// The temporary file is returned as an io.Reader along with a cleanup function. -// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. -// If an error occurs, inputInfo is not modified. -func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob") - if err != nil { - return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) - } - cleanup := func() { - diskBlob.Close() - os.Remove(diskBlob.Name()) - } - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo) - written, err := io.Copy(diskBlob, stream) - if err != nil { - cleanup() - return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err) - } - _, err = diskBlob.Seek(0, io.SeekStart) - if err != nil { - cleanup() - return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err) - } - inputInfo.Digest = digester.Digest() - inputInfo.Size = written - return diskBlob, cleanup, nil -} diff --git a/vendor/go.podman.io/image/v5/internal/tmpdir/tmpdir.go b/vendor/go.podman.io/image/v5/internal/tmpdir/tmpdir.go deleted file mode 100644 index 634b2d062..000000000 --- a/vendor/go.podman.io/image/v5/internal/tmpdir/tmpdir.go +++ /dev/null @@ -1,44 +0,0 @@ -package tmpdir - -import ( - "os" - "runtime" - - "go.podman.io/image/v5/types" -) - -// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. -// You can override this at build time with -// -ldflags '-X go.podman.io/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' -var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles - -// builtinUnixTempDirForBigFiles is the directory path to store big files. -// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. -// DO NOT change this, instead see unixTempDirForBigFiles above. -const builtinUnixTempDirForBigFiles = "/var/tmp" - -const prefix = "container_images_" - -// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. -// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp -// which on systemd based systems could be the unsuitable tmpfs filesystem. -func temporaryDirectoryForBigFiles(sys *types.SystemContext) string { - if sys != nil && sys.BigFilesTemporaryDir != "" { - return sys.BigFilesTemporaryDir - } - var temporaryDirectoryForBigFiles string - if runtime.GOOS == "windows" { - temporaryDirectoryForBigFiles = os.TempDir() - } else { - temporaryDirectoryForBigFiles = unixTempDirForBigFiles - } - return temporaryDirectoryForBigFiles -} - -func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) { - return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name) -} - -func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) { - return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name) -} diff --git a/vendor/go.podman.io/image/v5/internal/uploadreader/upload_reader.go b/vendor/go.podman.io/image/v5/internal/uploadreader/upload_reader.go deleted file mode 100644 index b95370af7..000000000 --- a/vendor/go.podman.io/image/v5/internal/uploadreader/upload_reader.go +++ /dev/null @@ -1,61 +0,0 @@ -package uploadreader - -import ( - "io" - "sync" -) - -// UploadReader is a pass-through reader for use in sending non-trivial data using the net/http -// package (http.NewRequest, http.Post and the like). -// -// The net/http package uses a separate goroutine to upload data to a HTTP connection, -// and it is possible for the server to return a response (typically an error) before consuming -// the full body of the request. In that case http.Client.Do can return with an error while -// the body is still being read — regardless of the cancellation, if any, of http.Request.Context(). -// -// As a result, any data used/updated by the io.Reader() provided as the request body may be -// used/updated even after http.Client.Do returns, causing races. -// -// To fix this, UploadReader provides a synchronized Terminate() method, which can block for -// a not-completely-negligible time (for a duration of the underlying Read()), but guarantees that -// after Terminate() returns, the underlying reader is never used any more (unlike calling -// the cancellation callback of context.WithCancel, which returns before any recipients may have -// reacted to the cancellation). -type UploadReader struct { - mutex sync.Mutex - // The following members can only be used with mutex held - reader io.Reader - terminationError error // nil if not terminated yet -} - -// NewUploadReader returns an UploadReader for an "underlying" reader. -func NewUploadReader(underlying io.Reader) *UploadReader { - return &UploadReader{ - reader: underlying, - terminationError: nil, - } -} - -// Read returns the error set by Terminate, if any, or calls the underlying reader. -// It is safe to call this from a different goroutine than Terminate. -func (ur *UploadReader) Read(p []byte) (int, error) { - ur.mutex.Lock() - defer ur.mutex.Unlock() - - if ur.terminationError != nil { - return 0, ur.terminationError - } - return ur.reader.Read(p) -} - -// Terminate waits for in-progress Read calls, if any, to finish, and ensures that after -// this function returns, any Read calls will fail with the provided error, and the underlying -// reader will never be used any more. -// -// It is safe to call this from a different goroutine than Read. -func (ur *UploadReader) Terminate(err error) { - ur.mutex.Lock() // May block for some time if ur.reader.Read() is in progress - defer ur.mutex.Unlock() - - ur.terminationError = err -} diff --git a/vendor/go.podman.io/image/v5/internal/useragent/useragent.go b/vendor/go.podman.io/image/v5/internal/useragent/useragent.go deleted file mode 100644 index 54d8fcb70..000000000 --- a/vendor/go.podman.io/image/v5/internal/useragent/useragent.go +++ /dev/null @@ -1,6 +0,0 @@ -package useragent - -import "go.podman.io/image/v5/version" - -// DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise. -var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" diff --git a/vendor/go.podman.io/image/v5/manifest/common.go b/vendor/go.podman.io/image/v5/manifest/common.go deleted file mode 100644 index dde1bf3c8..000000000 --- a/vendor/go.podman.io/image/v5/manifest/common.go +++ /dev/null @@ -1,152 +0,0 @@ -package manifest - -import ( - "fmt" - - "github.com/sirupsen/logrus" - compressiontypes "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" -) - -// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() -// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. -func layerInfosToStrings(infos []LayerInfo) []string { - layers := make([]string, len(infos)) - for i, info := range infos { - layers[i] = info.Digest.String() - } - return layers -} - -// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed -// versions of “the same kind of content”. -// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed; -// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported". -type compressionMIMETypeSet map[string]string - -const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant -const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported” - -// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found -func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet { - for _, variants := range variantTable { - for _, mt := range variants { - if mt == mimeType { - return variants - } - } - } - return nil -} - -// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil -// to mean "no compression"), based on variantTable. -// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants -// that differ only in what type of compression is applied, but it can't be combined with this -// algorithm to produce an updated MIME type that complies with the standard that defines mimeType. -// If the compression algorithm is unrecognized, or mimeType is not known to have variants that -// differ from it only in what type of compression has been applied, the returned error will not be -// a ManifestLayerCompressionIncompatibilityError. -func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) { - if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries - return "", fmt.Errorf("cannot update unknown MIME type") - } - variants := findCompressionMIMETypeSet(variantTable, mimeType) - if variants != nil { - name := mtsUncompressed - if algorithm != nil { - name = algorithm.BaseVariantName() - } - if res, ok := variants[name]; ok { - if res != mtsUnsupportedMIMEType { - return res, nil - } - if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)} - } - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} - } - if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %q", name, mimeType)} - } - // We can't very well say “the idea of no compression is unknown” - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} - } - if algorithm != nil { - return "", fmt.Errorf("unsupported MIME type for compression: %q", mimeType) - } - return "", fmt.Errorf("unsupported MIME type for decompression: %q", mimeType) -} - -// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to -// mimeType, based on variantTable. It may use updated.Digest for error messages. -// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants -// that differ only in what type of compression is applied, but applying updated.CompressionOperation -// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the -// standard that defines mimeType. -func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) { - // Note that manifests in containers-storage might be reporting the - // wrong media type since the original manifests are stored while layers - // are decompressed in storage. Hence, we need to consider the case - // that an already {de}compressed layer should be {de}compressed; - // compressionVariantMIMEType does that by not caring whether the original is - // {de}compressed. - switch updated.CompressionOperation { - case types.PreserveOriginal: - // Force a change to the media type if we're being told to use a particular compressor, - // since it might be different from the one associated with the media type. Otherwise, - // try to keep the original media type. - if updated.CompressionAlgorithm != nil { - return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) - } - // Keep the original media type. - return mimeType, nil - - case types.Decompress: - return compressionVariantMIMEType(variantTable, mimeType, nil) - - case types.Compress: - if updated.CompressionAlgorithm == nil { - logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest) - return mimeType, nil - } - return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) - - default: - return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation) - } -} - -// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm -// could not be applied to a layer MIME type. A caller that receives this should either retry -// the call with a different compression algorithm, or attempt to use a different manifest type. -type ManifestLayerCompressionIncompatibilityError struct { - text string -} - -func (m ManifestLayerCompressionIncompatibilityError) Error() string { - return m.text -} - -// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType -// Note that the caller still needs to worry about a specific algorithm not being supported. -func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool { - if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries - return false - } - variants := findCompressionMIMETypeSet(variantTable, mimeType) - return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm. -} - -// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() -// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. -func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer { - layers := make([]types.ImageInspectLayer, len(infos)) - for i, info := range infos { - layers[i].MIMEType = info.MediaType - layers[i].Digest = info.Digest - layers[i].Size = info.Size - layers[i].Annotations = info.Annotations - } - return layers -} diff --git a/vendor/go.podman.io/image/v5/manifest/docker_schema1.go b/vendor/go.podman.io/image/v5/manifest/docker_schema1.go deleted file mode 100644 index 28c9fea30..000000000 --- a/vendor/go.podman.io/image/v5/manifest/docker_schema1.go +++ /dev/null @@ -1,346 +0,0 @@ -package manifest - -import ( - "encoding/json" - "errors" - "fmt" - "slices" - "strings" - "time" - - "github.com/docker/docker/api/types/versions" - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/manifest" - "go.podman.io/image/v5/internal/set" - compressiontypes "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" - "go.podman.io/storage/pkg/regexp" -) - -// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. -type Schema1FSLayers struct { - BlobSum digest.Digest `json:"blobSum"` -} - -// Schema1History is an entry of the "history" array in docker/distribution schema 1. -type Schema1History struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// Schema1 is a manifest in docker/distribution schema 1. -type Schema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []Schema1FSLayers `json:"fsLayers"` - History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! - ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) - SchemaVersion int `json:"schemaVersion"` -} - -type schema1V1CompatibilityContainerConfig struct { - Cmd []string -} - -// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. -type Schema1V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. -// (NOTE: The instance is not necessary a literal representation of the original blob, -// layers with duplicate IDs are eliminated.) -func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) { - s1 := Schema1{} - if err := json.Unmarshal(manifestBlob, &s1); err != nil { - return nil, err - } - if s1.SchemaVersion != 1 { - return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion) - } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType, - manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil { - return nil, err - } - if err := s1.initialize(); err != nil { - return nil, err - } - if err := s1.fixManifestLayers(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. -func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - s1 := Schema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } - if err := s1.initialize(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1Clone creates a copy of the supplied Schema1 manifest. -func Schema1Clone(src *Schema1) *Schema1 { - copy := *src - return © -} - -// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. -func (m *Schema1) initialize() error { - if len(m.FSLayers) != len(m.History) { - return errors.New("length of history not equal to number of layers") - } - if len(m.FSLayers) == 0 { - return errors.New("no FSLayers in manifest") - } - m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) - for i, h := range m.History { - if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { - return fmt.Errorf("parsing v2s1 history entry %d: %w", i, err) - } - } - return nil -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema1) LayerInfos() []LayerInfo { - layers := make([]LayerInfo, 0, len(m.FSLayers)) - for i, layer := range slices.Backward(m.FSLayers) { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers = append(layers, LayerInfo{ - BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, - EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, - }) - } - return layers -} - -const fakeSchema1MIMEType = DockerV2Schema2LayerMediaType // Used only in schema1CompressionMIMETypeSets -var schema1CompressionMIMETypeSets = []compressionMIMETypeSet{ - { - mtsUncompressed: fakeSchema1MIMEType, - compressiontypes.GzipAlgorithmName: fakeSchema1MIMEType, - compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, - }, -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. - if len(m.FSLayers) != len(layerInfos) { - return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) - } - m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) - for i, info := range layerInfos { - // There are no MIME types in schema1, but we do a “conversion” here to reject unsupported compression algorithms, - // in a way that is consistent with the other schema implementations. - if _, err := updatedMIMEType(schema1CompressionMIMETypeSets, fakeSchema1MIMEType, info); err != nil { - return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) - } - // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest - if info.CryptoOperation != types.PreserveOriginalCrypto { - return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest) - } - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema1) Serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) - if err != nil { - return nil, err - } - return AddDummyV2S1Signature(unsigned) -} - -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), -// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, -// both from m.History and m.FSLayers). -// Note that even after this succeeds, m.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func (m *Schema1) fixManifestLayers() error { - // m.initialize() has verified that len(m.FSLayers) == len(m.History) - for _, compat := range m.ExtractedV1Compatibility { - if err := validateV1ID(compat.ID); err != nil { - return err - } - } - if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := set.New[string]() - var lastID string - for _, img := range m.ExtractedV1Compatibility { - // skip IDs that appear after each other, we handle those later - if img.ID != lastID && idmap.Contains(img.ID) { - return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap.Add(lastID) - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { - if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue - m.FSLayers = slices.Delete(m.FSLayers, i, i+1) - m.History = slices.Delete(m.History, i, i+1) - m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1) - } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { - return fmt.Errorf("Invalid parent ID. Expected %v, got %q", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) - } - } - return nil -} - -var validHex = regexp.Delayed(`^([a-f0-9]{64})$`) - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - s1 := &Schema2V1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { - return nil, err - } - layerInfos := m.LayerInfos() - i := &types.ImageInspectInfo{ - Tag: m.Tag, - Created: &s1.Created, - DockerVersion: s1.DockerVersion, - Architecture: s1.Architecture, - Variant: s1.Variant, - Os: s1.OS, - Layers: layerInfosToStrings(layerInfos), - LayersData: imgInspectLayersFromLayerInfos(layerInfos), - Author: s1.Author, - } - if s1.Config != nil { - i.Labels = s1.Config.Labels - i.Env = s1.Config.Env - } - return i, nil -} - -// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. -func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { - // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields - // that aren't directly comparable using info from the manifest. - if len(m.History) == 0 { - return nil, errors.New("image has no layers") - } - s1 := Schema2V1Image{} - config := []byte(m.History[0].V1Compatibility) - err := json.Unmarshal(config, &s1) - if err != nil { - return nil, fmt.Errorf("decoding configuration: %w", err) - } - // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, - // adding some fields that aren't "omitempty". - if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { - config, err = json.Marshal(&s1) - if err != nil { - return nil, fmt.Errorf("re-encoding compat image config %#v: %w", s1, err) - } - } - // Build the history. - convertedHistory := []Schema2History{} - for _, compat := range slices.Backward(m.ExtractedV1Compatibility) { - hitem := Schema2History{ - Created: compat.Created, - CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), - Author: compat.Author, - Comment: compat.Comment, - EmptyLayer: compat.ThrowAway, - } - convertedHistory = append(convertedHistory, hitem) - } - // Build the rootfs information. We need the decompressed sums that we've been - // calculating to fill in the DiffIDs. It's expected (but not enforced by us) - // that the number of diffIDs corresponds to the number of non-EmptyLayer - // entries in the history. - rootFS := &Schema2RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - // And now for some raw manipulation. - raw := make(map[string]*json.RawMessage) - err = json.Unmarshal(config, &raw) - if err != nil { - return nil, fmt.Errorf("re-decoding compat image config %#v: %w", s1, err) - } - // Drop some fields. - delete(raw, "id") - delete(raw, "parent") - delete(raw, "parent_id") - delete(raw, "layer_id") - delete(raw, "throwaway") - delete(raw, "Size") - // Add the history and rootfs information. - rootfs, err := json.Marshal(rootFS) - if err != nil { - return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err) - } - rawRootfs := json.RawMessage(rootfs) - raw["rootfs"] = &rawRootfs - history, err := json.Marshal(convertedHistory) - if err != nil { - return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err) - } - rawHistory := json.RawMessage(history) - raw["history"] = &rawHistory - // Encode the result. - config, err = json.Marshal(raw) - if err != nil { - return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err) - } - return config, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { - image, err := m.ToSchema2Config(diffIDs) - if err != nil { - return "", err - } - return digest.FromBytes(image).Encoded(), nil -} diff --git a/vendor/go.podman.io/image/v5/manifest/docker_schema2.go b/vendor/go.podman.io/image/v5/manifest/docker_schema2.go deleted file mode 100644 index b4255d886..000000000 --- a/vendor/go.podman.io/image/v5/manifest/docker_schema2.go +++ /dev/null @@ -1,307 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/internal/manifest" - compressiontypes "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/pkg/strslice" - "go.podman.io/image/v5/types" -) - -// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -type Schema2Descriptor = manifest.Schema2Descriptor - -// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. -func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { - return types.BlobInfo{ - Digest: desc.Digest, - Size: desc.Size, - URLs: desc.URLs, - MediaType: desc.MediaType, - } -} - -// Schema2 is a manifest in docker/distribution schema 2. -type Schema2 struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor Schema2Descriptor `json:"config"` - LayersDescriptors []Schema2Descriptor `json:"layers"` -} - -// Schema2Port is a Port, a string containing port number and protocol in the -// format "80/tcp", from docker/go-connections/nat. -type Schema2Port string - -// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from -// docker/go-connections/nat. -type Schema2PortSet map[Schema2Port]struct{} - -// Schema2HealthConfig is a HealthConfig, which holds configuration settings -// for the HEALTHCHECK feature, from docker/docker/api/types/container. -type Schema2HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. - StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Schema2Config is a Config in docker/docker/api/types/container. -type Schema2Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// Schema2V1Image is a V1Image in docker/docker/image. -type Schema2V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Schema2Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Schema2Config `json:"config,omitempty"` - // Architecture is the hardware that the image is built and runs on - Architecture string `json:"architecture,omitempty"` - // Variant is a variant of the CPU that the image is built and runs on - Variant string `json:"variant,omitempty"` - // OS is the operating system used to built and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. -type Schema2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// Schema2History stores build commands that were used to create an image, from docker/docker/image. -type Schema2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Schema2Image is an Image in docker/docker/image. -type Schema2Image struct { - Schema2V1Image - Parent digest.Digest `json:"parent,omitempty"` - RootFS *Schema2RootFS `json:"rootfs,omitempty"` - History []Schema2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` -} - -// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. -func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) { - s2 := Schema2{} - if err := json.Unmarshal(manifestBlob, &s2); err != nil { - return nil, err - } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType, - manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { - return nil, err - } - // Check manifest's and layers' media types. - if err := SupportedSchema2MediaType(s2.MediaType); err != nil { - return nil, err - } - for _, layer := range s2.LayersDescriptors { - if err := SupportedSchema2MediaType(layer.MediaType); err != nil { - return nil, err - } - } - return &s2, nil -} - -// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. -func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { - return &Schema2{ - SchemaVersion: 2, - MediaType: DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, - } -} - -// Schema2Clone creates a copy of the supplied Schema2 manifest. -func Schema2Clone(src *Schema2) *Schema2 { - copy := *src - return © -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema2) ConfigInfo() types.BlobInfo { - return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema2) LayerInfos() []LayerInfo { - blobs := make([]LayerInfo, 0, len(m.LayersDescriptors)) - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, LayerInfo{ - BlobInfo: BlobInfoFromSchema2Descriptor(layer), - EmptyLayer: false, - }) - } - return blobs -} - -var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ - { - mtsUncompressed: DockerV2Schema2ForeignLayerMediaType, - compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip, - compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, - }, - { - mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed, - compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType, - compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, - }, -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and -// CompressionAlgorithm that would result in anything other than gzip compression. -func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.LayersDescriptors) != len(layerInfos) { - return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) - } - original := m.LayersDescriptors - m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) - for i, info := range layerInfos { - mimeType := original[i].MediaType - // First make sure we support the media type of the original layer. - if err := SupportedSchema2MediaType(mimeType); err != nil { - return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType) - } - mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info) - if err != nil { - return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) - } - m.LayersDescriptors[i].MediaType = mimeType - m.LayersDescriptors[i].Digest = info.Digest - m.LayersDescriptors[i].Size = info.Size - m.LayersDescriptors[i].URLs = info.URLs - if info.CryptoOperation != types.PreserveOriginalCrypto { - return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest) - } - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema2) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - s2 := &Schema2Image{} - if err := json.Unmarshal(config, s2); err != nil { - return nil, err - } - layerInfos := m.LayerInfos() - i := &types.ImageInspectInfo{ - Tag: "", - Created: &s2.Created, - DockerVersion: s2.DockerVersion, - Architecture: s2.Architecture, - Variant: s2.Variant, - Os: s2.OS, - Layers: layerInfosToStrings(layerInfos), - LayersData: imgInspectLayersFromLayerInfos(layerInfos), - Author: s2.Author, - } - if s2.Config != nil { - i.Labels = s2.Config.Labels - i.Env = s2.Config.Env - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema2) ImageID([]digest.Digest) (string, error) { - if err := m.ConfigDescriptor.Digest.Validate(); err != nil { - return "", err - } - return m.ConfigDescriptor.Digest.Encoded(), nil -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion. -func (m *Schema2) CanChangeLayerCompression(mimeType string) bool { - return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType) -} diff --git a/vendor/go.podman.io/image/v5/manifest/docker_schema2_list.go b/vendor/go.podman.io/image/v5/manifest/docker_schema2_list.go deleted file mode 100644 index 158f7949e..000000000 --- a/vendor/go.podman.io/image/v5/manifest/docker_schema2_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package manifest - -import ( - "go.podman.io/image/v5/internal/manifest" -) - -// Schema2PlatformSpec describes the platform which a particular manifest is -// specialized for. -type Schema2PlatformSpec = manifest.Schema2PlatformSpec - -// Schema2ManifestDescriptor references a platform-specific manifest. -type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor - -// Schema2List is a list of platform-specific manifests. -type Schema2List = manifest.Schema2ListPublic - -// Schema2ListFromComponents creates a Schema2 manifest list instance from the -// supplied data. -func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { - return manifest.Schema2ListPublicFromComponents(components) -} - -// Schema2ListClone creates a deep copy of the passed-in list. -func Schema2ListClone(list *Schema2List) *Schema2List { - return manifest.Schema2ListPublicClone(list) -} - -// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled -// JSON, presumably generated by encoding a Schema2 manifest list. -func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) { - return manifest.Schema2ListPublicFromManifest(manifestBlob) -} diff --git a/vendor/go.podman.io/image/v5/manifest/list.go b/vendor/go.podman.io/image/v5/manifest/list.go deleted file mode 100644 index 846ea7d43..000000000 --- a/vendor/go.podman.io/image/v5/manifest/list.go +++ /dev/null @@ -1,35 +0,0 @@ -package manifest - -import ( - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/internal/manifest" -) - -var ( - // SupportedListMIMETypes is a list of the manifest list types that we know how to - // read/manipulate/write. - SupportedListMIMETypes = []string{ - DockerV2ListMediaType, - imgspecv1.MediaTypeImageIndex, - } -) - -// List is an interface for parsing, modifying lists of image manifests. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members -// directly. -type List = manifest.ListPublic - -// ListUpdate includes the fields which a List's UpdateInstances() method will modify. -type ListUpdate = manifest.ListUpdate - -// ListFromBlob parses a list of manifests. -func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) { - return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType) -} - -// ConvertListToMIMEType converts the passed-in manifest list to a manifest -// list of the specified type. -func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) { - return list.ConvertToMIMEType(manifestMIMEType) -} diff --git a/vendor/go.podman.io/image/v5/manifest/manifest.go b/vendor/go.podman.io/image/v5/manifest/manifest.go deleted file mode 100644 index ed489a5a6..000000000 --- a/vendor/go.podman.io/image/v5/manifest/manifest.go +++ /dev/null @@ -1,173 +0,0 @@ -package manifest - -import ( - "fmt" - - "github.com/containers/libtrust" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/internal/manifest" - "go.podman.io/image/v5/types" -) - -// FIXME: Should we just use docker/distribution and docker/docker implementations directly? - -// FIXME(runcom, mitr): should we have a mediatype pkg?? -const ( - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType - // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType - // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType - // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType - // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. - DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed - // DockerV2SchemaLayerMediaTypeZstd is the mediaType used for zstd layers. - // Warning: This mediaType is not officially supported in https://github.com/distribution/distribution/blob/main/docs/content/spec/manifest-v2-2.md but some images may exhibit it. Support is partial. - DockerV2SchemaLayerMediaTypeZstd = manifest.DockerV2SchemaLayerMediaTypeZstd - // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = manifest.DockerV2ListMediaType - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip -) - -// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation -// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) -type NonImageArtifactError = manifest.NonImageArtifactError - -// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. -func SupportedSchema2MediaType(m string) error { - switch m { - case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed, DockerV2SchemaLayerMediaTypeZstd: - return nil - default: - return fmt.Errorf("unsupported docker v2s2 media type: %q", m) - } -} - -// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource -// should request from the backend unless directed otherwise. -var DefaultRequestedManifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2Schema1SignedMediaType, - DockerV2Schema1MediaType, - DockerV2ListMediaType, - imgspecv1.MediaTypeImageIndex, -} - -// Manifest is an interface for parsing, modifying image manifests in isolation. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members -// directly. -// -// See types.Image for functionality not limited to manifests, including format conversions and config parsing. -// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. -type Manifest interface { - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - ConfigInfo() types.BlobInfo - // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []LayerInfo - // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) - UpdateLayerInfos(layerInfos []types.BlobInfo) error - - // ImageID computes an ID which can uniquely identify this image by its contents, irrespective - // of which (of possibly more than one simultaneously valid) reference was used to locate the - // image, and unchanged by whether or how the layers are compressed. The result takes the form - // of the hexadecimal portion of a digest.Digest. - ImageID(diffIDs []digest.Digest) (string, error) - - // Inspect returns various information for (skopeo inspect) parsed from the manifest, - // incorporating information from a configuration blob returned by configGetter, if - // the underlying image format is expected to include a configuration blob. - Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) - - // Serialize returns the manifest in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! - Serialize() ([]byte, error) -} - -// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. -type LayerInfo struct { - types.BlobInfo - EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. -} - -// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. -// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, -// but we may not have such metadata available (e.g. when the manifest is a local file). -func GuessMIMEType(manifestBlob []byte) string { - return manifest.GuessMIMEType(manifestBlob) -} - -// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifestBlob []byte) (digest.Digest, error) { - return manifest.Digest(manifestBlob) -} - -// MatchesDigest returns true iff the manifest matches expectedDigest. -// Error may be set if this returns false. -// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, -// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) { - return manifest.MatchesDigest(manifestBlob, expectedDigest) -} - -// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. -// This is useful to make the manifest acceptable to a docker/distribution registry (even though nothing needs or wants the JWS signature). -func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err // Coverage: This can fail only if rand.Reader fails. - } - - js, err := libtrust.NewJSONSignature(manifest) - if err != nil { - return nil, err - } - if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. - return nil, err - } - return js.PrettySignature("signatures") -} - -// MIMETypeIsMultiImage returns true if mimeType is a list of images -func MIMETypeIsMultiImage(mimeType string) bool { - return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex -} - -// MIMETypeSupportsEncryption returns true if the mimeType supports encryption -func MIMETypeSupportsEncryption(mimeType string) bool { - return mimeType == imgspecv1.MediaTypeImageManifest -} - -// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, -// centralizing various workarounds. -func NormalizedMIMEType(input string) string { - return manifest.NormalizedMIMEType(input) -} - -// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type -func FromBlob(manblob []byte, mt string) (Manifest, error) { - nmt := NormalizedMIMEType(mt) - switch nmt { - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: - return Schema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return OCI1FromManifest(manblob) - case DockerV2Schema2MediaType: - return Schema2FromManifest(manblob) - case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex: - return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") - } - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %q (normalized as %q)", mt, nmt) -} diff --git a/vendor/go.podman.io/image/v5/manifest/oci.go b/vendor/go.podman.io/image/v5/manifest/oci.go deleted file mode 100644 index 286d58c42..000000000 --- a/vendor/go.podman.io/image/v5/manifest/oci.go +++ /dev/null @@ -1,276 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - "slices" - "strings" - - ociencspec "github.com/containers/ocicrypt/spec" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/internal/manifest" - compressiontypes "go.podman.io/image/v5/pkg/compression/types" - "go.podman.io/image/v5/types" -) - -// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. -func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { - return types.BlobInfo{ - Digest: desc.Digest, - Size: desc.Size, - URLs: desc.URLs, - Annotations: desc.Annotations, - MediaType: desc.MediaType, - } -} - -// OCI1 is a manifest.Manifest implementation for OCI images. -// The underlying data from imgspecv1.Manifest is also available. -type OCI1 struct { - imgspecv1.Manifest -} - -// SupportedOCI1MediaType checks if the specified string is a supported OCI1 -// media type. -// -// Deprecated: blindly rejecting unknown MIME types when the consumer does not -// need to process the input just reduces interoperability (and violates the -// standard) with no benefit, and that this function does not check that the -// media type is appropriate for any specific purpose, so it’s not all that -// useful for validation anyway. -func SupportedOCI1MediaType(m string) error { - switch m { - case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, - imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd, - imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - imgspecv1.MediaTypeImageManifest, - imgspecv1.MediaTypeLayoutHeader, - ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: - return nil - default: - return fmt.Errorf("unsupported OCIv1 media type: %q", m) - } -} - -// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. -func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) { - oci1 := OCI1{} - if err := json.Unmarshal(manifestBlob, &oci1); err != nil { - return nil, err - } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest, - manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { - return nil, err - } - return &oci1, nil -} - -// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. -func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { - return &OCI1{ - imgspecv1.Manifest{ - Versioned: specs.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageManifest, - Config: config, - Layers: layers, - }, - } -} - -// OCI1Clone creates a copy of the supplied OCI1 manifest. -func OCI1Clone(src *OCI1) *OCI1 { - return &OCI1{ - Manifest: src.Manifest, - } -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *OCI1) ConfigInfo() types.BlobInfo { - return BlobInfoFromOCI1Descriptor(m.Config) -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *OCI1) LayerInfos() []LayerInfo { - blobs := make([]LayerInfo, 0, len(m.Layers)) - for _, layer := range m.Layers { - blobs = append(blobs, LayerInfo{ - BlobInfo: BlobInfoFromOCI1Descriptor(layer), - EmptyLayer: false, - }) - } - return blobs -} - -var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ - { - mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - }, - { - mtsUncompressed: imgspecv1.MediaTypeImageLayer, - compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip, - compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd, - }, -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and -// CompressionAlgorithm that isn't supported by OCI. -// -// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on -// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs -// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression -// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently -// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers. -func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.Layers) != len(layerInfos) { - return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) - } - original := m.Layers - m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) - for i, info := range layerInfos { - mimeType := original[i].MediaType - if info.CryptoOperation == types.Decrypt { - decMimeType, err := getDecryptedMediaType(mimeType) - if err != nil { - return fmt.Errorf("error preparing updated manifest: decryption specified but original mediatype is not encrypted: %q", mimeType) - } - mimeType = decMimeType - } - mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info) - if err != nil { - return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) - } - if info.CryptoOperation == types.Encrypt { - encMediaType, err := getEncryptedMediaType(mimeType) - if err != nil { - return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType) - } - mimeType = encMediaType - } - - m.Layers[i].MediaType = mimeType - m.Layers[i].Digest = info.Digest - m.Layers[i].Size = info.Size - m.Layers[i].Annotations = info.Annotations - m.Layers[i].URLs = info.URLs - } - return nil -} - -// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return -// an error if the mediatype does not support encryption -func getEncryptedMediaType(mediatype string) (string, error) { - parts := strings.Split(mediatype, "+") - if slices.Contains(parts[1:], "encrypted") { - return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype) - } - unsuffixedMediatype := parts[0] - switch unsuffixedMediatype { - case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, - imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - return mediatype + "+encrypted", nil - } - - return "", fmt.Errorf("unsupported mediaType to encrypt: %q", mediatype) -} - -// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return -// an error if the mediatype does not support decryption -func getDecryptedMediaType(mediatype string) (string, error) { - res, ok := strings.CutSuffix(mediatype, "+encrypted") - if !ok { - return "", fmt.Errorf("unsupported mediaType to decrypt: %q", mediatype) - } - - return res, nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *OCI1) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos. - // Most software calling this without human intervention is going to expect the values to be realistic and relevant, - // and is probably better served by failing; we can always re-visit that later if we fail now, but - // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later. - return nil, manifest.NewNonImageArtifactError(&m.Manifest) - } - - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - v1 := &imgspecv1.Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - d1 := &Schema2V1Image{} - if err := json.Unmarshal(config, d1); err != nil { - return nil, err - } - layerInfos := m.LayerInfos() - i := &types.ImageInspectInfo{ - Tag: "", - Created: v1.Created, - DockerVersion: d1.DockerVersion, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Variant: v1.Variant, - Os: v1.OS, - Layers: layerInfosToStrings(layerInfos), - LayersData: imgInspectLayersFromLayerInfos(layerInfos), - Env: v1.Config.Env, - Author: v1.Author, - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) { - // The way m.Config.Digest “uniquely identifies” an image is - // by containing RootFS.DiffIDs, which identify the layers of the image. - // For non-image artifacts, the we can’t expect the config to change - // any time the other layers (semantically) change, so this approach of - // distinguishing objects only by m.Config.Digest doesn’t work in general. - // - // Any caller of this method presumably wants to disambiguate the same - // images with a different representation, but doesn’t want to disambiguate - // representations (by using a manifest digest). So, submitting a non-image - // artifact to such a caller indicates an expectation mismatch. - // So, we just fail here instead of inventing some other ID value (e.g. - // by combining the config and blob layer digests). That still - // gives us the option to not fail, and return some value, in the future, - // without committing to that approach now. - // (The only known caller of ImageID is storage/storageImageDestination.computeID, - // which can’t work with non-image artifacts.) - if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return "", manifest.NewNonImageArtifactError(&m.Manifest) - } - - if err := m.Config.Digest.Validate(); err != nil { - return "", err - } - return m.Config.Digest.Encoded(), nil -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion. -func (m *OCI1) CanChangeLayerCompression(mimeType string) bool { - if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return false - } - return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType) -} diff --git a/vendor/go.podman.io/image/v5/manifest/oci_index.go b/vendor/go.podman.io/image/v5/manifest/oci_index.go deleted file mode 100644 index 84dae6070..000000000 --- a/vendor/go.podman.io/image/v5/manifest/oci_index.go +++ /dev/null @@ -1,27 +0,0 @@ -package manifest - -import ( - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/internal/manifest" -) - -// OCI1Index is just an alias for the OCI index type, but one which we can -// provide methods for. -type OCI1Index = manifest.OCI1IndexPublic - -// OCI1IndexFromComponents creates an OCI1 image index instance from the -// supplied data. -func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { - return manifest.OCI1IndexPublicFromComponents(components, annotations) -} - -// OCI1IndexClone creates a deep copy of the passed-in index. -func OCI1IndexClone(index *OCI1Index) *OCI1Index { - return manifest.OCI1IndexPublicClone(index) -} - -// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled -// JSON, presumably generated by encoding a OCI1 manifest index. -func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) { - return manifest.OCI1IndexPublicFromManifest(manifestBlob) -} diff --git a/vendor/go.podman.io/image/v5/pkg/blobinfocache/none/none.go b/vendor/go.podman.io/image/v5/pkg/blobinfocache/none/none.go deleted file mode 100644 index 88c9024fd..000000000 --- a/vendor/go.podman.io/image/v5/pkg/blobinfocache/none/none.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package none implements a dummy BlobInfoCache which records no data. -package none - -import ( - "github.com/opencontainers/go-digest" - "go.podman.io/image/v5/internal/blobinfocache" - "go.podman.io/image/v5/types" -) - -// noCache implements a dummy BlobInfoCache which records no data. -type noCache struct { -} - -// NoCache implements BlobInfoCache by not recording any data. -// -// This exists primarily for implementations of configGetter for -// Manifest.Inspect, because configs only have one representation. -// Any use of BlobInfoCache with blobs should usually use at least a -// short-lived cache, ideally blobinfocache.DefaultCache. -var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{}) - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - return "" -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { -} - -// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. -// Returns "" if the uncompressed digest is unknown. -func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { - return "" -} - -// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - return nil -} diff --git a/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go b/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go deleted file mode 100644 index e715705b4..000000000 --- a/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go +++ /dev/null @@ -1,80 +0,0 @@ -package internal - -import "io" - -// CompressorFunc writes the compressed stream to the given writer using the specified compression level. -// -// Compressing a stream may create integrity data that allows consuming the compressed byte stream -// while only using subsets of the compressed data (if the compressed data is seekable and most -// of the uncompressed data is already present via other means), while still protecting integrity -// of the compressed stream against unwanted modification. (In OCI container images, this metadata -// is usually carried in manifest annotations.) -// -// If the compression generates such metadata, it is written to the provided metadata map. -// -// The caller must call Close() on the stream (even if the input stream does not need closing!). -type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc func(io.Reader) (io.ReadCloser, error) - -// Algorithm is a compression algorithm that can be used for CompressStream. -type Algorithm struct { - name string - baseVariantName string - prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. - decompressor DecompressorFunc - compressor CompressorFunc -} - -// NewAlgorithm creates an Algorithm instance. -// nontrivialBaseVariantName is typically "". -// This function exists so that Algorithm instances can only be created by code that -// is allowed to import this internal subpackage. -func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { - baseVariantName := name - if nontrivialBaseVariantName != "" { - baseVariantName = nontrivialBaseVariantName - } - return Algorithm{ - name: name, - baseVariantName: baseVariantName, - prefix: prefix, - decompressor: decompressor, - compressor: compressor, - } -} - -// Name returns the name for the compression algorithm. -func (c Algorithm) Name() string { - return c.name -} - -// BaseVariantName returns the name of the “base variant” of the compression algorithm. -// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”). -// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data. -func (c Algorithm) BaseVariantName() string { - return c.baseVariantName -} - -// AlgorithmCompressor returns the compressor field of algo. -// This is a function instead of a public method so that it is only callable by code -// that is allowed to import this internal subpackage. -func AlgorithmCompressor(algo Algorithm) CompressorFunc { - return algo.compressor -} - -// AlgorithmDecompressor returns the decompressor field of algo. -// This is a function instead of a public method so that it is only callable by code -// that is allowed to import this internal subpackage. -func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { - return algo.decompressor -} - -// AlgorithmPrefix returns the prefix field of algo. -// This is a function instead of a public method so that it is only callable by code -// that is allowed to import this internal subpackage. -func AlgorithmPrefix(algo Algorithm) []byte { - return algo.prefix -} diff --git a/vendor/go.podman.io/image/v5/pkg/compression/types/types.go b/vendor/go.podman.io/image/v5/pkg/compression/types/types.go deleted file mode 100644 index 197122c7b..000000000 --- a/vendor/go.podman.io/image/v5/pkg/compression/types/types.go +++ /dev/null @@ -1,41 +0,0 @@ -package types - -import ( - "go.podman.io/image/v5/pkg/compression/internal" -) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc = internal.DecompressorFunc - -// Algorithm is a compression algorithm provided and supported by pkg/compression. -// It can’t be supplied from the outside. -type Algorithm = internal.Algorithm - -const ( - // GzipAlgorithmName is the name used by pkg/compression.Gzip. - // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - GzipAlgorithmName = "gzip" - // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2. - // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - Bzip2AlgorithmName = "bzip2" - // XzAlgorithmName is the name used by pkg/compression.Xz. - // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - XzAlgorithmName = "Xz" - // ZstdAlgorithmName is the name used by pkg/compression.Zstd. - // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - ZstdAlgorithmName = "zstd" - // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked. - // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - ZstdChunkedAlgorithmName = "zstd:chunked" -) diff --git a/vendor/go.podman.io/image/v5/pkg/docker/config/config.go b/vendor/go.podman.io/image/v5/pkg/docker/config/config.go deleted file mode 100644 index 56d4eb916..000000000 --- a/vendor/go.podman.io/image/v5/pkg/docker/config/config.go +++ /dev/null @@ -1,950 +0,0 @@ -package config - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/fs" - "iter" - "maps" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - helperclient "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/multierr" - "go.podman.io/image/v5/internal/set" - "go.podman.io/image/v5/pkg/sysregistriesv2" - "go.podman.io/image/v5/types" - "go.podman.io/storage/pkg/fileutils" - "go.podman.io/storage/pkg/homedir" - "go.podman.io/storage/pkg/ioutils" -) - -type dockerAuthConfig struct { - Auth string `json:"auth,omitempty"` - IdentityToken string `json:"identitytoken,omitempty"` -} - -type dockerConfigFile struct { - AuthConfigs map[string]dockerAuthConfig `json:"auths"` - CredHelpers map[string]string `json:"credHelpers,omitempty"` -} - -var ( - defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") - xdgConfigHomePath = filepath.FromSlash("containers/auth.json") - xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") - dockerHomePath = filepath.FromSlash(".docker/config.json") - dockerLegacyHomePath = ".dockercfg" - nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json") - - // ErrNotLoggedIn is returned for users not logged into a registry - // that they are trying to logout of - ErrNotLoggedIn = errors.New("not logged in") - // ErrNotSupported is returned for unsupported methods - ErrNotSupported = errors.New("not supported") -) - -// authPath combines a path to a file with container registry credentials, -// along with expected properties of that path (currently just whether it's -// legacy format or not). -type authPath struct { - path string - legacyFormat bool -} - -// newAuthPathDefault constructs an authPath in non-legacy format. -func newAuthPathDefault(path string) authPath { - return authPath{path: path, legacyFormat: false} -} - -// GetAllCredentials returns the registry credentials for all registries stored -// in any of the configured credential helpers. -func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) { - // To keep things simple, let's first extract all registries from all - // possible sources, and then call `GetCredentials` on them. That - // prevents us from having to reverse engineer the logic in - // `GetCredentials`. - allKeys := set.New[string]() - - // To use GetCredentials, we must at least convert the URL forms into host names. - // While we're at it, we’ll also canonicalize docker.io to the standard format. - normalizedDockerIORegistry := normalizeRegistry("docker.io") - - helpers, err := sysregistriesv2.CredentialHelpers(sys) - if err != nil { - return nil, err - } - for _, helper := range helpers { - switch helper { - // Special-case the built-in helper for auth files. - case sysregistriesv2.AuthenticationFileHelper: - for _, path := range getAuthFilePaths(sys, homedir.Get()) { - // parse returns an empty map in case the path doesn't exist. - fileContents, err := path.parse() - if err != nil { - return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err) - } - // Credential helpers in the auth file have a - // direct mapping to a registry, so we can just - // walk the map. - allKeys.AddSeq(maps.Keys(fileContents.CredHelpers)) - for key := range fileContents.AuthConfigs { - key := normalizeAuthFileKey(key, path.legacyFormat) - if key == normalizedDockerIORegistry { - key = "docker.io" - } - allKeys.Add(key) - } - } - // External helpers. - default: - creds, err := listCredsInCredHelper(helper) - if err != nil { - logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err) - if errors.Is(err, exec.ErrNotFound) { - creds = nil // It's okay if the helper doesn't exist. - } else { - return nil, err - } - } - allKeys.AddSeq(maps.Keys(creds)) - } - } - - // Now use `GetCredentials` to the specific auth configs for each - // previously listed registry. - allCreds := make(map[string]types.DockerAuthConfig) - for key := range allKeys.All() { - creds, err := GetCredentials(sys, key) - if err != nil { - // Note: we rely on the logging in `GetCredentials`. - return nil, err - } - if creds != (types.DockerAuthConfig{}) { - allCreds[key] = creds - } - } - - return allCreds, nil -} - -// getAuthFilePaths returns a slice of authPaths based on the system context -// in the order they should be searched. Note that some paths may not exist. -// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden -// by tests. -func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { - paths := []authPath{} - pathToAuth, userSpecifiedPath, err := getPathToAuth(sys) - if err == nil { - paths = append(paths, pathToAuth) - } else { - // Error means that the path set for XDG_RUNTIME_DIR does not exist - // but we don't want to completely fail in the case that the user is pulling a public image - // Logging the error as a warning instead and moving on to pulling the image - logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) - } - if !userSpecifiedPath { - xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") - if xdgCfgHome == "" { - xdgCfgHome = filepath.Join(homeDir, ".config") - } - paths = append(paths, newAuthPathDefault(filepath.Join(xdgCfgHome, xdgConfigHomePath))) - if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { - paths = append(paths, newAuthPathDefault(filepath.Join(dockerConfig, "config.json"))) - } else { - paths = append(paths, - newAuthPathDefault(filepath.Join(homeDir, dockerHomePath)), - ) - } - paths = append(paths, - authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, - ) - } - return paths -} - -// GetCredentials returns the registry credentials matching key, appropriate for -// sys and the users’ configuration. -// If an entry is not found, an empty struct is returned. -// A valid key is a repository, a namespace within a registry, or a registry hostname. -// -// GetCredentialsForRef should almost always be used in favor of this API. -func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, key, homedir.Get()) -} - -// GetCredentialsForRef returns the registry credentials necessary for -// accessing ref on the registry ref points to, -// appropriate for sys and the users’ configuration. -// If an entry is not found, an empty struct is returned. -func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get()) -} - -// getCredentialsWithHomeDir is an internal implementation detail of -// GetCredentialsForRef and GetCredentials. It exists only to allow testing it -// with an artificial home directory. -func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) { - _, err := validateKey(key) - if err != nil { - return types.DockerAuthConfig{}, err - } - - if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key) - return *sys.DockerAuthConfig, nil - } - - var registry string // We compute this once because it is used in several places. - if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 { - registry = key[:firstSlash] - } else { - registry = key - } - - // Anonymous function to query credentials from auth files. - getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { - for _, path := range getAuthFilePaths(sys, homeDir) { - creds, err := findCredentialsInFile(key, registry, path) - if err != nil { - return types.DockerAuthConfig{}, "", err - } - - if creds != (types.DockerAuthConfig{}) { - return creds, path.path, nil - } - } - return types.DockerAuthConfig{}, "", nil - } - - helpers, err := sysregistriesv2.CredentialHelpers(sys) - if err != nil { - return types.DockerAuthConfig{}, err - } - - var multiErr []error - for _, helper := range helpers { - var ( - creds types.DockerAuthConfig - helperKey string - credHelperPath string - err error - ) - switch helper { - // Special-case the built-in helper for auth files. - case sysregistriesv2.AuthenticationFileHelper: - helperKey = key - creds, credHelperPath, err = getCredentialsFromAuthFiles() - // External helpers. - default: - // This intentionally uses "registry", not "key"; we don't support namespaced - // credentials in helpers, but a "registry" is a valid parent of "key". - helperKey = registry - creds, err = getCredsFromCredHelper(helper, registry) - } - if err != nil { - logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) - multiErr = append(multiErr, err) - continue - } - if creds != (types.DockerAuthConfig{}) { - msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper) - if credHelperPath != "" { - msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) - } - logrus.Debug(msg) - return creds, nil - } - } - if multiErr != nil { - return types.DockerAuthConfig{}, multierr.Format("errors looking up credentials:\n\t* ", "\nt* ", "\n", multiErr) - } - - logrus.Debugf("No credentials for %s found", key) - return types.DockerAuthConfig{}, nil -} - -// GetAuthentication returns the registry credentials matching key, appropriate for -// sys and the users’ configuration. -// If an entry is not found, an empty struct is returned. -// A valid key is a repository, a namespace within a registry, or a registry hostname. -// -// Deprecated: This API only has support for username and password. To get the -// support for oauth2 in container registry authentication, we added the new -// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to -// maintain backward compatibility. -func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) { - return getAuthenticationWithHomeDir(sys, key, homedir.Get()) -} - -// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, -// it exists only to allow testing it with an artificial home directory. -func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { - creds, err := getCredentialsWithHomeDir(sys, key, homeDir) - if err != nil { - return "", "", err - } - if creds.IdentityToken != "" { - return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported) - } - return creds.Username, creds.Password, nil -} - -// SetCredentials stores the username and password in a location -// appropriate for sys and the users’ configuration. -// A valid key is a repository, a namespace within a registry, or a registry hostname; -// using forms other than just a registry may fail depending on configuration. -// Returns a human-readable description of the location that was updated. -// NOTE: The return value is only intended to be read by humans; its form is not an API, -// it may change (or new forms can be added) any time. -func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { - helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true) - if err != nil { - return "", err - } - - // Make sure to collect all errors. - var multiErr []error - for _, helper := range helpers { - var desc string - var err error - switch helper { - // Special-case the built-in helpers for auth files. - case sysregistriesv2.AuthenticationFileHelper: - desc, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { - if ch, exists := fileContents.CredHelpers[key]; exists { - if isNamespaced { - return false, "", unsupportedNamespaceErr(ch) - } - desc, err := setCredsInCredHelper(ch, key, username, password) - if err != nil { - return false, "", err - } - return false, desc, nil - } - creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - newCreds := dockerAuthConfig{Auth: creds} - fileContents.AuthConfigs[key] = newCreds - return true, "", nil - }) - // External helpers. - default: - if isNamespaced { - err = unsupportedNamespaceErr(helper) - } else { - desc, err = setCredsInCredHelper(helper, key, username, password) - } - } - if err != nil { - multiErr = append(multiErr, err) - logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) - continue - } - logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) - return desc, nil - } - return "", multierr.Format("Errors storing credentials\n\t* ", "\n\t* ", "\n", multiErr) -} - -func unsupportedNamespaceErr(helper string) error { - return fmt.Errorf("namespaced key is not supported for credential helper %s", helper) -} - -// SetAuthentication stores the username and password in the credential helper or file -// See the documentation of SetCredentials for format of "key" -func SetAuthentication(sys *types.SystemContext, key, username, password string) error { - _, err := SetCredentials(sys, key, username, password) - return err -} - -// RemoveAuthentication removes credentials for `key` from all possible -// sources such as credential helpers and auth files. -// A valid key is a repository, a namespace within a registry, or a registry hostname; -// using forms other than just a registry may fail depending on configuration. -func RemoveAuthentication(sys *types.SystemContext, key string) error { - helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true) - if err != nil { - return err - } - - isLoggedIn := false - - removeFromCredHelper := func(helper string) error { - if isNamespaced { - logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) - return nil - } - err := deleteCredsFromCredHelper(helper, key) - if err == nil { - logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) - isLoggedIn = true - return nil - } - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) - return nil - } - return fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err) - } - - var multiErr []error - for _, helper := range helpers { - var err error - switch helper { - // Special-case the built-in helper for auth files. - case sysregistriesv2.AuthenticationFileHelper: - _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { - var helperErr error - if innerHelper, exists := fileContents.CredHelpers[key]; exists { - helperErr = removeFromCredHelper(innerHelper) - } - if _, ok := fileContents.AuthConfigs[key]; ok { - isLoggedIn = true - delete(fileContents.AuthConfigs, key) - } - return true, "", helperErr - }) - if err != nil { - multiErr = append(multiErr, err) - } - // External helpers. - default: - if err := removeFromCredHelper(helper); err != nil { - multiErr = append(multiErr, err) - } - } - } - - if multiErr != nil { - return multierr.Format("errors removing credentials\n\t* ", "\n\t*", "\n", multiErr) - } - if !isLoggedIn { - return ErrNotLoggedIn - } - - return nil -} - -// RemoveAllAuthentication deletes all the credentials stored in credential -// helpers and auth files. -func RemoveAllAuthentication(sys *types.SystemContext) error { - helpers, jsonEditor, _, _, err := prepareForEdit(sys, "", false) - if err != nil { - return err - } - - var multiErr []error - for _, helper := range helpers { - var err error - switch helper { - // Special-case the built-in helper for auth files. - case sysregistriesv2.AuthenticationFileHelper: - _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { - for registry, helper := range fileContents.CredHelpers { - // Helpers in auth files are expected - // to exist, so no special treatment - // for them. - if err := deleteCredsFromCredHelper(helper, registry); err != nil { - return false, "", err - } - } - fileContents.CredHelpers = make(map[string]string) - fileContents.AuthConfigs = make(map[string]dockerAuthConfig) - return true, "", nil - }) - // External helpers. - default: - var creds map[string]string - creds, err = listCredsInCredHelper(helper) - if err != nil { - if errors.Is(err, exec.ErrNotFound) { - // It's okay if the helper doesn't exist. - continue - } else { - break - } - } - for registry := range creds { - err = deleteCredsFromCredHelper(helper, registry) - if err != nil { - break - } - } - } - if err != nil { - logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err) - multiErr = append(multiErr, err) - continue - } - logrus.Debugf("All credentials removed from credential helper %s", helper) - } - - if multiErr != nil { - return multierr.Format("errors removing all credentials:\n\t* ", "\n\t* ", "\n", multiErr) - } - return nil -} - -// prepareForEdit processes sys and key (if keyRelevant) to return: -// - a list of credential helpers -// - a function which can be used to edit the JSON file -// - the key value to actually use in credential helpers / JSON -// - a boolean which is true if key is namespaced (and should not be used with credential helpers). -func prepareForEdit(sys *types.SystemContext, key string, keyRelevant bool) ([]string, func(*types.SystemContext, func(*dockerConfigFile) (bool, string, error)) (string, error), string, bool, error) { - var isNamespaced bool - if keyRelevant { - ns, err := validateKey(key) - if err != nil { - return nil, nil, "", false, err - } - isNamespaced = ns - } - - if sys != nil && sys.DockerCompatAuthFilePath != "" { - if sys.AuthFilePath != "" { - return nil, nil, "", false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously") - } - if keyRelevant { - if isNamespaced { - return nil, nil, "", false, fmt.Errorf("Credentials cannot be recorded in Docker-compatible format with namespaced key %q", key) - } - if key == "docker.io" { - key = "https://index.docker.io/v1/" - } - } - - // Do not use helpers defined in sysregistriesv2 because Docker isn’t aware of them. - return []string{sysregistriesv2.AuthenticationFileHelper}, modifyDockerConfigJSON, key, false, nil - } - - helpers, err := sysregistriesv2.CredentialHelpers(sys) - if err != nil { - return nil, nil, "", false, err - } - - return helpers, modifyJSON, key, isNamespaced, nil -} - -func listCredsInCredHelper(credHelper string) (map[string]string, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - return helperclient.List(p) -} - -// getPathToAuth gets the path of the auth.json file used for reading and writing credentials, -// and a boolean indicating whether the return value came from an explicit user choice (i.e. not defaults) -func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) { - return getPathToAuthWithOS(sys, runtime.GOOS) -} - -// getPathToAuthWithOS is an internal implementation detail of getPathToAuth, -// it exists only to allow testing it with an artificial runtime.GOOS. -func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) { - if sys != nil { - if sys.AuthFilePath != "" && sys.DockerCompatAuthFilePath != "" { - return authPath{}, false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously") - } - if sys.AuthFilePath != "" { - return newAuthPathDefault(sys.AuthFilePath), true, nil - } - // When reading, we can process auth.json and Docker’s config.json with the same code. - // When writing, prepareForEdit chooses an appropriate jsonEditor implementation. - if sys.DockerCompatAuthFilePath != "" { - return newAuthPathDefault(sys.DockerCompatAuthFilePath), true, nil - } - if sys.LegacyFormatAuthFilePath != "" { - return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil - } - // Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME - if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" { - return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil - } - } - if goOS != "linux" { - return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil - } - - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - if runtimeDir != "" { - // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. - // We are checking for fs.ErrNotExist here only to give the user better guidance what to do in this special case. - err := fileutils.Exists(runtimeDir) - if errors.Is(err, fs.ErrNotExist) { - // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory - // or made a typo while setting the environment variable, - // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. - return authPath{}, false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err) - } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. - return newAuthPathDefault(filepath.Join(runtimeDir, xdgRuntimeDirPath)), false, nil - } - return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil -} - -// parse unmarshals the credentials stored in the auth.json file and returns it -// or returns an empty dockerConfigFile data structure if auth.json does not exist -// if the file exists and is empty, this function returns an error. -func (path authPath) parse() (dockerConfigFile, error) { - var fileContents dockerConfigFile - - raw, err := os.ReadFile(path.path) - if err != nil { - if os.IsNotExist(err) { - fileContents.AuthConfigs = map[string]dockerAuthConfig{} - return fileContents, nil - } - return dockerConfigFile{}, err - } - - if path.legacyFormat { - if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil { - return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) - } - return fileContents, nil - } - - if err = json.Unmarshal(raw, &fileContents); err != nil { - return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) - } - - if fileContents.AuthConfigs == nil { - fileContents.AuthConfigs = map[string]dockerAuthConfig{} - } - if fileContents.CredHelpers == nil { - fileContents.CredHelpers = make(map[string]string) - } - - return fileContents, nil -} - -// modifyJSON finds an auth.json file, calls editor on the contents, and -// writes it back if editor returns true. -// Returns a human-readable description of the file, to be returned by SetCredentials. -// -// The editor may also return a human-readable description of the updated location; if it is "", -// the file itself is used. -func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { - path, _, err := getPathToAuth(sys) - if err != nil { - return "", err - } - if path.legacyFormat { - return "", fmt.Errorf("writes to %s using legacy format are not supported", path.path) - } - - dir := filepath.Dir(path.path) - if err = os.MkdirAll(dir, 0700); err != nil { - return "", err - } - - fileContents, err := path.parse() - if err != nil { - return "", fmt.Errorf("reading JSON file %q: %w", path.path, err) - } - - updated, description, err := editor(&fileContents) - if err != nil { - return "", fmt.Errorf("updating %q: %w", path.path, err) - } - if updated { - newData, err := json.MarshalIndent(fileContents, "", "\t") - if err != nil { - return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err) - } - - if err = ioutils.AtomicWriteFile(path.path, newData, 0600); err != nil { - return "", fmt.Errorf("writing to file %q: %w", path.path, err) - } - } - - if description == "" { - description = path.path - } - return description, nil -} - -// modifyDockerConfigJSON finds a docker config.json file, calls editor on the contents, and -// writes it back if editor returns true. -// Returns a human-readable description of the file, to be returned by SetCredentials. -// -// The editor may also return a human-readable description of the updated location; if it is "", -// the file itself is used. -func modifyDockerConfigJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { - if sys == nil || sys.DockerCompatAuthFilePath == "" { - return "", errors.New("internal error: modifyDockerConfigJSON called with DockerCompatAuthFilePath not set") - } - path := sys.DockerCompatAuthFilePath - - dir := filepath.Dir(path) - if err := os.MkdirAll(dir, 0700); err != nil { - return "", err - } - - // Try hard not to clobber fields we don’t understand, even fields which may be added in future Docker versions. - var rawContents map[string]json.RawMessage - originalBytes, err := os.ReadFile(path) - switch { - case err == nil: - if err := json.Unmarshal(originalBytes, &rawContents); err != nil { - return "", fmt.Errorf("unmarshaling JSON at %q: %w", path, err) - } - case errors.Is(err, fs.ErrNotExist): - rawContents = map[string]json.RawMessage{} - default: // err != nil - return "", err - } - - syntheticContents := dockerConfigFile{ - AuthConfigs: map[string]dockerAuthConfig{}, - CredHelpers: map[string]string{}, - } - // json.Unmarshal also falls back to case-insensitive field matching; this code does not do that. Presumably - // config.json is mostly maintained by machines doing `docker login`, so the files should, hopefully, not contain field names with - // unexpected case. - if rawAuths, ok := rawContents["auths"]; ok { - // This conversion will lose fields we don’t know about; when updating an entry, we can’t tell whether an unknown field - // should be preserved or discarded (because it is made obsolete/unwanted with the new credentials). - // It might make sense to track which entries of "auths" we actually modified, and to not touch any others. - if err := json.Unmarshal(rawAuths, &syntheticContents.AuthConfigs); err != nil { - return "", fmt.Errorf(`unmarshaling "auths" in JSON at %q: %w`, path, err) - } - } - if rawCH, ok := rawContents["credHelpers"]; ok { - if err := json.Unmarshal(rawCH, &syntheticContents.CredHelpers); err != nil { - return "", fmt.Errorf(`unmarshaling "credHelpers" in JSON at %q: %w`, path, err) - - } - } - - updated, description, err := editor(&syntheticContents) - if err != nil { - return "", fmt.Errorf("updating %q: %w", path, err) - } - if updated { - rawAuths, err := json.MarshalIndent(syntheticContents.AuthConfigs, "", "\t") - if err != nil { - return "", fmt.Errorf("marshaling JSON %q: %w", path, err) - } - rawContents["auths"] = rawAuths - // We never modify syntheticContents.CredHelpers, so we don’t need to update it. - newData, err := json.MarshalIndent(rawContents, "", "\t") - if err != nil { - return "", fmt.Errorf("marshaling JSON %q: %w", path, err) - } - - if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { - return "", fmt.Errorf("writing to file %q: %w", path, err) - } - } - - if description == "" { - description = path - } - return description, nil -} - -func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds, err := helperclient.Get(p, registry) - if err != nil { - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper) - err = nil - } - return types.DockerAuthConfig{}, err - } - - switch creds.Username { - case "": - return types.DockerAuthConfig{ - IdentityToken: creds.Secret, - }, nil - default: - return types.DockerAuthConfig{ - Username: creds.Username, - Password: creds.Secret, - }, nil - } -} - -// setCredsInCredHelper stores (username, password) for registry in credHelper. -// Returns a human-readable description of the destination, to be returned by SetCredentials. -func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds := &credentials.Credentials{ - ServerURL: registry, - Username: username, - Secret: password, - } - if err := helperclient.Store(p, creds); err != nil { - return "", err - } - return fmt.Sprintf("credential helper: %s", credHelper), nil -} - -func deleteCredsFromCredHelper(credHelper, registry string) error { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - return helperclient.Erase(p, registry) -} - -// findCredentialsInFile looks for credentials matching "key" -// (which is "registry" or a namespace in "registry") in "path". -func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) { - fileContents, err := path.parse() - if err != nil { - return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err) - } - - // First try cred helpers. They should always be normalized. - // This intentionally uses "registry", not "key"; we don't support namespaced - // credentials in helpers. - if ch, exists := fileContents.CredHelpers[registry]; exists { - logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path) - return getCredsFromCredHelper(ch, registry) - } - - // Support sub-registry namespaces in auth. - // (This is not a feature of ~/.docker/config.json; we support it even for - // those files as an extension.) - // - // Repo or namespace keys are only supported as exact matches. For registry - // keys we prefer exact matches as well. - for key := range authKeyLookupOrder(key, registry, path.legacyFormat) { - if val, exists := fileContents.AuthConfigs[key]; exists { - return decodeDockerAuth(path.path, key, val) - } - } - - // bad luck; let's normalize the entries first - // This primarily happens for legacyFormat, which for a time used API URLs - // (http[s:]//…/v1/) as keys. - // Secondarily, (docker login) accepted URLs with no normalization for - // several years, and matched registry hostnames against that, so support - // those entries even in non-legacyFormat ~/.docker/config.json. - // The docker.io registry still uses the /v1/ key with a special host name, - // so account for that as well. - registry = normalizeRegistry(registry) - for k, v := range fileContents.AuthConfigs { - if normalizeAuthFileKey(k, path.legacyFormat) == registry { - return decodeDockerAuth(path.path, k, v) - } - } - - // Only log this if we found nothing; getCredentialsWithHomeDir logs the - // source of found data. - logrus.Debugf("No credentials matching %s found in %s", key, path.path) - return types.DockerAuthConfig{}, nil -} - -// authKeyLookupOrder returns a sequence for lookup keys matching (key or registry) -// in file with legacyFormat, in order from the best match to worst. -// For example, in a non-legacy file, -// when given a repository key "quay.io/repo/ns/image", it returns -// - quay.io/repo/ns/image -// - quay.io/repo/ns -// - quay.io/repo -// - quay.io -func authKeyLookupOrder(key, registry string, legacyFormat bool) iter.Seq[string] { - return func(yield func(string) bool) { - if legacyFormat { - _ = yield(registry) // We stop in any case - return - } - - for { - if !yield(key) { - return - } - - lastSlash := strings.LastIndex(key, "/") - if lastSlash == -1 { - break - } - key = key[:lastSlash] - } - } -} - -// decodeDockerAuth decodes the username and password from conf, -// which is entry key in path. -func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) { - decoded, err := base64.StdEncoding.DecodeString(conf.Auth) - if err != nil { - return types.DockerAuthConfig{}, err - } - - user, passwordPart, valid := strings.Cut(string(decoded), ":") - if !valid { - // if it's invalid just skip, as docker does - if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those - logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log. - } else { - logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path) - } - return types.DockerAuthConfig{}, nil - } - - password := strings.Trim(passwordPart, "\x00") - return types.DockerAuthConfig{ - Username: user, - Password: password, - IdentityToken: conf.IdentityToken, - }, nil -} - -// normalizeAuthFileKey takes a key, converts it to a host name and normalizes -// the resulting registry. -func normalizeAuthFileKey(key string, legacyFormat bool) string { - stripped := strings.TrimPrefix(key, "http://") - stripped = strings.TrimPrefix(stripped, "https://") - - if legacyFormat || stripped != key { - stripped, _, _ = strings.Cut(stripped, "/") - } - - return normalizeRegistry(stripped) -} - -// normalizeRegistry converts the provided registry if a known docker.io host -// is provided. -func normalizeRegistry(registry string) string { - switch registry { - case "registry-1.docker.io", "docker.io": - return "index.docker.io" - } - return registry -} - -// validateKey verifies that the input key does not have a prefix that is not -// allowed and returns an indicator if the key is namespaced. -func validateKey(key string) (bool, error) { - if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { - return false, fmt.Errorf("key %s contains http[s]:// prefix", key) - } - - // Ideally this should only accept explicitly valid keys, compare - // validateIdentityRemappingPrefix. For now, just reject values that look - // like tagged or digested values. - if strings.ContainsRune(key, '@') { - return false, fmt.Errorf(`key %s contains a '@' character`, key) - } - - firstSlash := strings.IndexRune(key, '/') - isNamespaced := firstSlash != -1 - // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo. - if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') { - return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key) - } - // check if the provided key contains one or more subpaths. - return isNamespaced, nil -} diff --git a/vendor/go.podman.io/image/v5/pkg/strslice/README.md b/vendor/go.podman.io/image/v5/pkg/strslice/README.md deleted file mode 100644 index ae6097e82..000000000 --- a/vendor/go.podman.io/image/v5/pkg/strslice/README.md +++ /dev/null @@ -1 +0,0 @@ -This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/go.podman.io/image/v5/pkg/strslice/strslice.go b/vendor/go.podman.io/image/v5/pkg/strslice/strslice.go deleted file mode 100644 index bad493fb8..000000000 --- a/vendor/go.podman.io/image/v5/pkg/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_common.go deleted file mode 100644 index c9e8ac5cb..000000000 --- a/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_common.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !freebsd - -package sysregistriesv2 - -// builtinRegistriesConfPath is the path to the registry configuration file. -// DO NOT change this, instead see systemRegistriesConfPath above. -const builtinRegistriesConfPath = "/etc/containers/registries.conf" - -// builtinRegistriesConfDirPath is the path to the registry configuration directory. -// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. -const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" diff --git a/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_freebsd.go deleted file mode 100644 index 7dada4b77..000000000 --- a/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/paths_freebsd.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build freebsd - -package sysregistriesv2 - -// builtinRegistriesConfPath is the path to the registry configuration file. -// DO NOT change this, instead see systemRegistriesConfPath above. -const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf" - -// builtinRegistriesConfDirPath is the path to the registry configuration directory. -// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. -const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d" diff --git a/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/shortnames.go deleted file mode 100644 index 8c72ce7ff..000000000 --- a/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/shortnames.go +++ /dev/null @@ -1,353 +0,0 @@ -package sysregistriesv2 - -import ( - "fmt" - "maps" - "os" - "path/filepath" - "reflect" - "strings" - - "github.com/BurntSushi/toml" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/internal/multierr" - "go.podman.io/image/v5/internal/rootless" - "go.podman.io/image/v5/types" - "go.podman.io/storage/pkg/homedir" - "go.podman.io/storage/pkg/lockfile" -) - -// defaultShortNameMode is the default mode of registries.conf files if the -// corresponding field is left empty. -const defaultShortNameMode = types.ShortNameModePermissive - -// userShortNamesFile is the user-specific config file to store aliases. -var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf") - -// shortNameAliasesConfPath returns the path to the machine-generated -// short-name-aliases.conf file. -func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) { - if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 { - return ctx.UserShortNameAliasConfPath, nil - } - - if rootless.GetRootlessEUID() == 0 { - // Root user or in a non-conforming user NS - return filepath.Join("/var/cache", userShortNamesFile), nil - } - - // Rootless user - cacheRoot, err := homedir.GetCacheHome() - if err != nil { - return "", err - } - - return filepath.Join(cacheRoot, userShortNamesFile), nil -} - -// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the -// software-maintained `userShortNamesFile`. -type shortNameAliasConf struct { - // A map for aliasing short names to their fully-qualified image - // reference counter parts. - // Note that Aliases is niled after being loaded from a file. - Aliases map[string]string `toml:"aliases"` - - // If you add any field, make sure to update nonempty() below. -} - -// nonempty returns true if config contains at least one configuration entry. -func (c *shortNameAliasConf) nonempty() bool { - copy := *c // A shallow copy - if copy.Aliases != nil && len(copy.Aliases) == 0 { - copy.Aliases = nil - } - return !reflect.DeepEqual(copy, shortNameAliasConf{}) -} - -// alias combines the parsed value of an alias with the config file it has been -// specified in. The config file is crucial for an improved user experience -// such that users are able to resolve potential pull errors. -type alias struct { - // The parsed value of an alias. May be nil if set to "" in a config. - value reference.Named - // The config file the alias originates from. - configOrigin string -} - -// shortNameAliasCache is the result of parsing shortNameAliasConf, -// pre-processed for faster usage. -type shortNameAliasCache struct { - // Note that an alias value may be nil iff it's set as an empty string - // in the config. - namedAliases map[string]alias -} - -// ResolveShortNameAlias performs an alias resolution of the specified name. -// The user-specific short-name-aliases.conf has precedence over aliases in the -// assembled registries.conf. It returns the possibly resolved alias or nil, a -// human-readable description of the config where the alias is specified, and -// an error. The origin of the config file is crucial for an improved user -// experience such that users are able to resolve potential pull errors. -// Almost all callers should use pkg/shortnames instead. -// -// Note that it’s the caller’s responsibility to pass only a repository -// (reference.IsNameOnly) as the short name. -func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) { - if err := validateShortName(name); err != nil { - return nil, "", err - } - confPath, lock, err := shortNameAliasesConfPathAndLock(ctx) - if err != nil { - return nil, "", err - } - - // Acquire the lock as a reader to allow for multiple routines in the - // same process space to read simultaneously. - lock.RLock() - defer lock.Unlock() - - _, aliasCache, err := loadShortNameAliasConf(confPath) - if err != nil { - return nil, "", err - } - - // First look up the short-name-aliases.conf. Note that a value may be - // nil iff it's set as an empty string in the config. - alias, resolved := aliasCache.namedAliases[name] - if resolved { - return alias.value, alias.configOrigin, nil - } - - config, err := getConfig(ctx) - if err != nil { - return nil, "", err - } - alias, resolved = config.aliasCache.namedAliases[name] - if resolved { - return alias.value, alias.configOrigin, nil - } - return nil, "", nil -} - -// editShortNameAlias loads the aliases.conf file and changes it. If value is -// set, it adds the name-value pair as a new alias. Otherwise, it will remove -// name from the config. -func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) { - if err := validateShortName(name); err != nil { - return err - } - if value != nil { - if _, err := parseShortNameValue(*value); err != nil { - return err - } - } - - confPath, lock, err := shortNameAliasesConfPathAndLock(ctx) - if err != nil { - return err - } - - // Acquire the lock as a writer to prevent data corruption. - lock.Lock() - defer lock.Unlock() - - // Load the short-name-alias.conf, add the specified name-value pair, - // and write it back to the file. - conf, _, err := loadShortNameAliasConf(confPath) - if err != nil { - return err - } - - if conf.Aliases == nil { // Ensure we have a map to update. - conf.Aliases = make(map[string]string) - } - if value != nil { - conf.Aliases[name] = *value - } else { - // If the name does not exist, throw an error. - if _, exists := conf.Aliases[name]; !exists { - return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath) - } - - delete(conf.Aliases, name) - } - - f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - // since we are writing to this file, make sure we handle err on Close() - defer func() { - closeErr := f.Close() - if retErr == nil { - retErr = closeErr - } - }() - - encoder := toml.NewEncoder(f) - return encoder.Encode(conf) -} - -// AddShortNameAlias adds the specified name-value pair as a new alias to the -// user-specific aliases.conf. It may override an existing alias for `name`. -// -// Note that it’s the caller’s responsibility to pass only a repository -// (reference.IsNameOnly) as the short name. -func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error { - return editShortNameAlias(ctx, name, &value) -} - -// RemoveShortNameAlias clears the alias for the specified name. It throws an -// error in case name does not exist in the machine-generated -// short-name-alias.conf. In such case, the alias must be specified in one of -// the registries.conf files, which is the users' responsibility. -// -// Note that it’s the caller’s responsibility to pass only a repository -// (reference.IsNameOnly) as the short name. -func RemoveShortNameAlias(ctx *types.SystemContext, name string) error { - return editShortNameAlias(ctx, name, nil) -} - -// parseShortNameValue parses the specified alias into a reference.Named. The alias is -// expected to not be tagged or carry a digest and *must* include a -// domain/registry. -// -// Note that the returned reference is always normalized. -func parseShortNameValue(alias string) (reference.Named, error) { - ref, err := reference.Parse(alias) - if err != nil { - return nil, fmt.Errorf("parsing alias %q: %w", alias, err) - } - - if _, ok := ref.(reference.Digested); ok { - return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias) - } - - if _, ok := ref.(reference.Tagged); ok { - return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias) - } - - named, ok := ref.(reference.Named) - if !ok { - return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) - } - - registry := reference.Domain(named) - if !strings.ContainsAny(registry, ".:") && registry != "localhost" { - return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) - } - - // A final parse to make sure that docker.io references are correctly - // normalized (e.g., docker.io/alpine to docker.io/library/alpine. - named, err = reference.ParseNormalizedNamed(alias) - return named, err -} - -// validateShortName parses the specified `name` of an alias (i.e., the left-hand -// side) and checks if it's a short name and does not include a tag or digest. -func validateShortName(name string) error { - repo, err := reference.Parse(name) - if err != nil { - return fmt.Errorf("cannot parse short name: %q: %w", name, err) - } - - if _, ok := repo.(reference.Digested); ok { - return fmt.Errorf("invalid short name %q: must not contain digest", name) - } - - if _, ok := repo.(reference.Tagged); ok { - return fmt.Errorf("invalid short name %q: must not contain tag", name) - } - - named, ok := repo.(reference.Named) - if !ok { - return fmt.Errorf("invalid short name %q: no name", name) - } - - registry := reference.Domain(named) - if strings.ContainsAny(registry, ".:") || registry == "localhost" { - return fmt.Errorf("invalid short name %q: must not contain registry", name) - } - return nil -} - -// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal -// representation. -func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) { - res := shortNameAliasCache{ - namedAliases: make(map[string]alias), - } - errs := []error{} - for name, value := range conf.Aliases { - if err := validateShortName(name); err != nil { - errs = append(errs, err) - } - - // Empty right-hand side values in config files allow to reset - // an alias in a previously loaded config. This way, drop-in - // config files from registries.conf.d can reset potentially - // malconfigured aliases. - if value == "" { - res.namedAliases[name] = alias{nil, path} - continue - } - - named, err := parseShortNameValue(value) - if err != nil { - // We want to report *all* malformed entries to avoid a - // whack-a-mole for the user. - errs = append(errs, err) - } else { - res.namedAliases[name] = alias{named, path} - } - } - if len(errs) > 0 { - return nil, multierr.Format("", "\n", "", errs) - } - return &res, nil -} - -// updateWithConfigurationFrom updates c with configuration from updates. -// In case of conflict, updates is preferred. -func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) { - maps.Copy(c.namedAliases, updates.namedAliases) -} - -func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { - conf := shortNameAliasConf{} - - meta, err := toml.DecodeFile(confPath, &conf) - if err != nil && !os.IsNotExist(err) { - // It's okay if the config doesn't exist. Other errors are not. - return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err) - } - if keys := meta.Undecoded(); len(keys) > 0 { - logrus.Debugf("Failed to decode keys %q from %q", keys, confPath) - } - - // Even if we don’t always need the cache, doing so validates the machine-generated config. The - // file could still be corrupted by another process or user. - cache, err := newShortNameAliasCache(confPath, &conf) - if err != nil { - return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err) - } - - return &conf, cache, nil -} - -func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, *lockfile.LockFile, error) { - shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx) - if err != nil { - return "", nil, err - } - // Make sure the path to file exists. - if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil { - return "", nil, err - } - - lockPath := shortNameAliasesConfPath + ".lock" - locker, err := lockfile.GetLockFile(lockPath) - return shortNameAliasesConfPath, locker, err -} diff --git a/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/system_registries_v2.go deleted file mode 100644 index 1a1fcccf8..000000000 --- a/vendor/go.podman.io/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ /dev/null @@ -1,1083 +0,0 @@ -package sysregistriesv2 - -import ( - "errors" - "fmt" - "io/fs" - "maps" - "os" - "path/filepath" - "reflect" - "slices" - "sort" - "strings" - "sync" - - "github.com/BurntSushi/toml" - "github.com/sirupsen/logrus" - "go.podman.io/image/v5/docker/reference" - "go.podman.io/image/v5/types" - "go.podman.io/storage/pkg/fileutils" - "go.podman.io/storage/pkg/homedir" - "go.podman.io/storage/pkg/regexp" -) - -// systemRegistriesConfPath is the path to the system-wide registry -// configuration file and is used to add/subtract potential registries for -// obtaining images. You can override this at build time with -// -ldflags '-X go.podman.io/image/v5/sysregistries.systemRegistriesConfPath=$your_path' -var systemRegistriesConfPath = builtinRegistriesConfPath - -// systemRegistriesConfDirPath is the path to the system-wide registry -// configuration directory and is used to add/subtract potential registries for -// obtaining images. You can override this at build time with -// -ldflags '-X go.podman.io/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' -var systemRegistriesConfDirPath = builtinRegistriesConfDirPath - -// AuthenticationFileHelper is a special key for credential helpers indicating -// the usage of consulting containers-auth.json files instead of a credential -// helper. -const AuthenticationFileHelper = "containers-auth.json" - -const ( - // configuration values for "pull-from-mirror" - // mirrors will be used for both digest pulls and tag pulls - MirrorAll = "all" - // mirrors will only be used for digest pulls - MirrorByDigestOnly = "digest-only" - // mirrors will only be used for tag pulls - MirrorByTagOnly = "tag-only" -) - -// Endpoint describes a remote location of a registry. -type Endpoint struct { - // The endpoint's remote location. Can be empty iff Prefix contains - // wildcard in the format: "*.example.com" for subdomain matching. - // Please refer to FindRegistry / PullSourcesFromReference instead - // of accessing/interpreting `Location` directly. - Location string `toml:"location,omitempty"` - // If true, certs verification will be skipped and HTTP (non-TLS) - // connections will be allowed. - Insecure bool `toml:"insecure,omitempty"` - // PullFromMirror is used for adding restrictions to image pull through the mirror. - // Set to "all", "digest-only", or "tag-only". - // If "digest-only", mirrors will only be used for digest pulls. Pulling images by - // tag can potentially yield different images, depending on which endpoint - // we pull from. Restricting mirrors to pulls by digest avoids that issue. - // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror - // that it is less likely to be out of sync if tags move, it should not be unnecessarily - // used for digest references. - // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry. - // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint. - // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry. - PullFromMirror string `toml:"pull-from-mirror,omitempty"` -} - -// userRegistriesFile is the path to the per user registry configuration file. -var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf") - -// userRegistriesDir is the path to the per user registry configuration file. -var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d") - -// rewriteReference will substitute the provided reference `prefix` to the -// endpoints `location` from the `ref` and creates a new named reference from it. -// The function errors if the newly created reference is not parsable. -func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { - refString := ref.String() - var newNamedRef string - // refMatchingPrefix returns the length of the match. Everything that - // follows the match gets appended to registries location. - prefixLen := refMatchingPrefix(refString, prefix) - if prefixLen == -1 { - return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) - } - // In the case of an empty `location` field, simply return the original - // input ref as-is. - // - // FIXME: already validated in postProcessRegistries, so check can probably - // be dropped. - // https://github.com/containers/image/pull/1191#discussion_r610621608 - if e.Location == "" { - if !strings.HasPrefix(prefix, "*.") { - return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix) - } - return ref, nil - } - newNamedRef = e.Location + refString[prefixLen:] - newParsedRef, err := reference.ParseNamed(newNamedRef) - if err != nil { - return nil, fmt.Errorf("rewriting reference: %w", err) - } - - return newParsedRef, nil -} - -// Registry represents a registry. -type Registry struct { - // Prefix is used for matching images, and to translate one namespace to - // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` - // and we pull from "example.com/bar/myimage:latest", the image will - // effectively be pulled from "example.com/foo/bar/myimage:latest". - // If no Prefix is specified, it defaults to the specified location. - // Prefix can also be in the format: "*.example.com" for matching - // subdomains. The wildcard should only be in the beginning and should also - // not contain any namespaces or special characters: "/", "@" or ":". - // Please refer to FindRegistry / PullSourcesFromReference instead - // of accessing/interpreting `Prefix` directly. - Prefix string `toml:"prefix"` - // A registry is an Endpoint too - Endpoint - // The registry's mirrors. - Mirrors []Endpoint `toml:"mirror,omitempty"` - // If true, pulling from the registry will be blocked. - Blocked bool `toml:"blocked,omitempty"` - // If true, mirrors will only be used for digest pulls. Pulling images by - // tag can potentially yield different images, depending on which endpoint - // we pull from. Restricting mirrors to pulls by digest avoids that issue. - MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` -} - -// PullSource consists of an Endpoint and a Reference. Note that the reference is -// rewritten according to the registries prefix and the Endpoint's location. -type PullSource struct { - Endpoint Endpoint - Reference reference.Named -} - -// PullSourcesFromReference returns a slice of PullSource's based on the passed -// reference. -func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { - var endpoints []Endpoint - _, isDigested := ref.(reference.Canonical) - if r.MirrorByDigestOnly { - // Only use mirrors when the reference is a digested one. - if isDigested { - endpoints = append(endpoints, r.Mirrors...) - } - } else { - for _, mirror := range r.Mirrors { - // skip the mirror if per mirror setting exists but reference does not match the restriction - switch mirror.PullFromMirror { - case MirrorByDigestOnly: - if !isDigested { - continue - } - case MirrorByTagOnly: - if isDigested { - continue - } - } - endpoints = append(endpoints, mirror) - } - } - endpoints = append(endpoints, r.Endpoint) - - sources := []PullSource{} - for _, ep := range endpoints { - rewritten, err := ep.rewriteReference(ref, r.Prefix) - if err != nil { - return nil, err - } - sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) - } - - return sources, nil -} - -// V1TOMLregistries is for backwards compatibility to sysregistries v1 -type V1TOMLregistries struct { - Registries []string `toml:"registries"` -} - -// V1TOMLConfig is for backwards compatibility to sysregistries v1 -type V1TOMLConfig struct { - Search V1TOMLregistries `toml:"search"` - Insecure V1TOMLregistries `toml:"insecure"` - Block V1TOMLregistries `toml:"block"` -} - -// V1RegistriesConf is the sysregistries v1 configuration format. -type V1RegistriesConf struct { - V1TOMLConfig `toml:"registries"` -} - -// Nonempty returns true if config contains at least one configuration entry. -// Empty arrays are treated as missing entries. -func (config *V1RegistriesConf) Nonempty() bool { - copy := *config // A shallow copy - if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 { - copy.V1TOMLConfig.Search.Registries = nil - } - if copy.V1TOMLConfig.Insecure.Registries != nil && len(copy.V1TOMLConfig.Insecure.Registries) == 0 { - copy.V1TOMLConfig.Insecure.Registries = nil - } - if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 { - copy.V1TOMLConfig.Block.Registries = nil - } - return copy.hasSetField() -} - -// hasSetField returns true if config contains at least one configuration entry. -// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field -// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled -// as a non-nil []string{}. -func (config *V1RegistriesConf) hasSetField() bool { - return !reflect.DeepEqual(*config, V1RegistriesConf{}) -} - -// V2RegistriesConf is the sysregistries v2 configuration format. -type V2RegistriesConf struct { - Registries []Registry `toml:"registry"` - // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references - UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` - // An array of global credential helpers to use for authentication - // (e.g., ["pass", "secretservice"]). The helpers are consulted in the - // specified order. Note that "containers-auth.json" is a reserved - // value for consulting auth files as specified in - // containers-auth.json(5). - // - // If empty, CredentialHelpers defaults to ["containers-auth.json"]. - CredentialHelpers []string `toml:"credential-helpers"` - - // ShortNameMode defines how short-name resolution should be handled by - // _consumers_ of this package. Depending on the mode, the user should - // be prompted with a choice of using one of the unqualified-search - // registries when referring to a short name. - // - // Valid modes are: * "prompt": prompt if stdout is a TTY, otherwise - // use all unqualified-search registries * "enforcing": always prompt - // and error if stdout is not a TTY * "disabled": do not prompt and - // potentially use all unqualified-search registries - ShortNameMode string `toml:"short-name-mode"` - - // AdditionalLayerStoreAuthHelper is a helper binary that receives - // registry credentials pass them to Additional Layer Store for - // registry authentication. These credentials are only collected when pulling (not pushing). - AdditionalLayerStoreAuthHelper string `toml:"additional-layer-store-auth-helper"` - - shortNameAliasConf - - // If you add any field, make sure to update Nonempty() below. -} - -// Nonempty returns true if config contains at least one configuration entry. -func (config *V2RegistriesConf) Nonempty() bool { - copy := *config // A shallow copy - if copy.Registries != nil && len(copy.Registries) == 0 { - copy.Registries = nil - } - if copy.UnqualifiedSearchRegistries != nil && len(copy.UnqualifiedSearchRegistries) == 0 { - copy.UnqualifiedSearchRegistries = nil - } - if copy.CredentialHelpers != nil && len(copy.CredentialHelpers) == 0 { - copy.CredentialHelpers = nil - } - if !copy.shortNameAliasConf.nonempty() { - copy.shortNameAliasConf = shortNameAliasConf{} - } - return copy.hasSetField() -} - -// hasSetField returns true if config contains at least one configuration entry. -// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field -// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled -// as a non-nil []string{}. -func (config *V2RegistriesConf) hasSetField() bool { - return !reflect.DeepEqual(*config, V2RegistriesConf{}) -} - -// parsedConfig is the result of parsing, and possibly merging, configuration files; -// it is the boundary between the process of reading+ingesting the files, and -// later interpreting the configuration based on caller’s requests. -type parsedConfig struct { - // NOTE: Update also parsedConfig.updateWithConfigurationFrom! - - // partialV2 must continue to exist to maintain the return value of TryUpdatingCache - // for compatibility with existing callers. - // We store the authoritative Registries and UnqualifiedSearchRegistries values there as well. - partialV2 V2RegistriesConf - // Absolute path to the configuration file that set the UnqualifiedSearchRegistries. - unqualifiedSearchRegistriesOrigin string - // Result of parsing of partialV2.ShortNameMode. - // NOTE: May be ShortNameModeInvalid to represent ShortNameMode == "" in intermediate values; - // the full configuration in configCache / getConfig() always contains a valid value. - shortNameMode types.ShortNameMode - aliasCache *shortNameAliasCache -} - -// InvalidRegistries represents an invalid registry configurations. An example -// is when "registry.com" is defined multiple times in the configuration but -// with conflicting security settings. -type InvalidRegistries struct { - s string -} - -// Error returns the error string. -func (e *InvalidRegistries) Error() string { - return e.s -} - -// parseLocation parses the input string, performs some sanity checks and returns -// the sanitized input string. An error is returned if the input string is -// empty or if contains an "http{s,}://" prefix. -func parseLocation(input string) (string, error) { - trimmed := strings.TrimRight(input, "/") - - // FIXME: This check needs to exist but fails for empty Location field with - // wildcarded prefix. Removal of this check "only" allows invalid input in, - // and does not prevent correct operation. - // https://github.com/containers/image/pull/1191#discussion_r610122617 - // - // if trimmed == "" { - // return "", &InvalidRegistries{s: "invalid location: cannot be empty"} - // } - // - - if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { - msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) - return "", &InvalidRegistries{s: msg} - } - - return trimmed, nil -} - -// ConvertToV2 returns a v2 config corresponding to a v1 one. -func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { - regMap := make(map[string]*Registry) - // The order of the registries is not really important, but make it deterministic (the same for the same config file) - // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. - registryOrder := []string{} - - getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object - var err error - location, err = parseLocation(location) - if err != nil { - return nil, err - } - reg, exists := regMap[location] - if !exists { - reg = &Registry{ - Endpoint: Endpoint{Location: location}, - Mirrors: []Endpoint{}, - Prefix: location, - } - regMap[location] = reg - registryOrder = append(registryOrder, location) - } - return reg, nil - } - - for _, blocked := range config.V1TOMLConfig.Block.Registries { - reg, err := getRegistry(blocked) - if err != nil { - return nil, err - } - reg.Blocked = true - } - for _, insecure := range config.V1TOMLConfig.Insecure.Registries { - reg, err := getRegistry(insecure) - if err != nil { - return nil, err - } - reg.Insecure = true - } - - res := &V2RegistriesConf{ - UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, - } - for _, location := range registryOrder { - reg := regMap[location] - res.Registries = append(res.Registries, *reg) - } - return res, nil -} - -// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. -var anchoredDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") - -// postProcess checks the consistency of all the configuration, looks for conflicts, -// and normalizes the configuration (e.g., sets the Prefix to Location if not set). -func (config *V2RegistriesConf) postProcessRegistries() error { - regMap := make(map[string][]*Registry) - - for i := range config.Registries { - reg := &config.Registries[i] - // make sure Location and Prefix are valid - var err error - reg.Location, err = parseLocation(reg.Location) - if err != nil { - return err - } - - if reg.Prefix == "" { - if reg.Location == "" { - return &InvalidRegistries{s: "invalid condition: both location and prefix are unset"} - } - reg.Prefix = reg.Location - } else { - reg.Prefix, err = parseLocation(reg.Prefix) - if err != nil { - return err - } - // FIXME: allow config authors to always use Prefix. - // https://github.com/containers/image/pull/1191#discussion_r610622495 - if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" { - return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"} - } - } - - // validate the mirror usage settings does not apply to primary registry - if reg.PullFromMirror != "" { - return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) - } - // make sure mirrors are valid - for j := range reg.Mirrors { - mir := ®.Mirrors[j] - mir.Location, err = parseLocation(mir.Location) - if err != nil { - return err - } - - //FIXME: unqualifiedSearchRegistries now also accepts empty values - //and shouldn't - // https://github.com/containers/image/pull/1191#discussion_r610623216 - if mir.Location == "" { - return &InvalidRegistries{s: "invalid condition: mirror location is unset"} - } - - if reg.MirrorByDigestOnly && mir.PullFromMirror != "" { - return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)} - } - if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll && - mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly { - return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)} - } - } - if reg.Location == "" { - regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) - } else { - regMap[reg.Location] = append(regMap[reg.Location], reg) - } - } - - // Given a registry can be mentioned multiple times (e.g., to have - // multiple prefixes backed by different mirrors), we need to make sure - // there are no conflicts among them. - // - // Note: we need to iterate over the registries array to ensure a - // deterministic behavior which is not guaranteed by maps. - for _, reg := range config.Registries { - var others []*Registry - var ok bool - if reg.Location == "" { - others, ok = regMap[reg.Prefix] - } else { - others, ok = regMap[reg.Location] - } - if !ok { - return fmt.Errorf("Internal error in V2RegistriesConf.PostProcess: entry in regMap is missing") - } - for _, other := range others { - if reg.Insecure != other.Insecure { - msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) - return &InvalidRegistries{s: msg} - } - - if reg.Blocked != other.Blocked { - msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) - return &InvalidRegistries{s: msg} - } - } - } - - for i := range config.UnqualifiedSearchRegistries { - registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) - if err != nil { - return err - } - if !anchoredDomainRegexp.MatchString(registry) { - return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} - } - config.UnqualifiedSearchRegistries[i] = registry - } - - // Registries are ordered and the first longest prefix always wins, - // rendering later items with the same prefix non-existent. We cannot error - // out anymore as this might break existing users, so let's just ignore them - // to guarantee that the same prefix exists only once. - // - // As a side effect of parsedConfig.updateWithConfigurationFrom, the Registries slice - // is always sorted. To be consistent in situations where it is not called (no drop-ins), - // sort it here as well. - prefixes := []string{} - uniqueRegistries := make(map[string]Registry) - for i := range config.Registries { - // TODO: should we warn if we see the same prefix being used multiple times? - prefix := config.Registries[i].Prefix - if _, exists := uniqueRegistries[prefix]; !exists { - uniqueRegistries[prefix] = config.Registries[i] - prefixes = append(prefixes, prefix) - } - } - sort.Strings(prefixes) - config.Registries = []Registry{} - for _, prefix := range prefixes { - config.Registries = append(config.Registries, uniqueRegistries[prefix]) - } - - return nil -} - -// ConfigPath returns the path to the system-wide registry configuration file. -// Deprecated: This API implies configuration is read from files, and that there is only one. -// Please use ConfigurationSourceDescription to obtain a string usable for error messages. -func ConfigPath(ctx *types.SystemContext) string { - return newConfigWrapper(ctx).configPath -} - -// ConfigDirPath returns the path to the directory for drop-in -// registry configuration files. -// Deprecated: This API implies configuration is read from directories, and that there is only one. -// Please use ConfigurationSourceDescription to obtain a string usable for error messages. -func ConfigDirPath(ctx *types.SystemContext) string { - configWrapper := newConfigWrapper(ctx) - if configWrapper.userConfigDirPath != "" { - return configWrapper.userConfigDirPath - } - return configWrapper.configDirPath -} - -// configWrapper is used to store the paths from ConfigPath and ConfigDirPath -// and acts as a key to the internal cache. -type configWrapper struct { - // path to the registries.conf file - configPath string - // path to system-wide registries.conf.d directory, or "" if not used - configDirPath string - // path to user specified registries.conf.d directory, or "" if not used - userConfigDirPath string -} - -// newConfigWrapper returns a configWrapper for the specified SystemContext. -func newConfigWrapper(ctx *types.SystemContext) configWrapper { - return newConfigWrapperWithHomeDir(ctx, homedir.Get()) -} - -// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper, -// it exists only to allow testing it with an artificial home directory. -func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper { - var wrapper configWrapper - userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile) - userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) - - // decide configPath using per-user path or system file - if ctx != nil && ctx.SystemRegistriesConfPath != "" { - wrapper.configPath = ctx.SystemRegistriesConfPath - } else if err := fileutils.Exists(userRegistriesFilePath); err == nil { - // per-user registries.conf exists, not reading system dir - // return config dirs from ctx or per-user one - wrapper.configPath = userRegistriesFilePath - if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { - wrapper.configDirPath = ctx.SystemRegistriesConfDirPath - } else { - wrapper.userConfigDirPath = userRegistriesDirPath - } - - return wrapper - } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { - wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) - } else { - wrapper.configPath = systemRegistriesConfPath - } - - // potentially use both system and per-user dirs if not using per-user config file - if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { - // dir explicitly chosen: use only that one - wrapper.configDirPath = ctx.SystemRegistriesConfDirPath - } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { - wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath) - wrapper.userConfigDirPath = userRegistriesDirPath - } else { - wrapper.configDirPath = systemRegistriesConfDirPath - wrapper.userConfigDirPath = userRegistriesDirPath - } - - return wrapper -} - -// ConfigurationSourceDescription returns a string containers paths of registries.conf and registries.conf.d -func ConfigurationSourceDescription(ctx *types.SystemContext) string { - wrapper := newConfigWrapper(ctx) - configSources := []string{wrapper.configPath} - if wrapper.configDirPath != "" { - configSources = append(configSources, wrapper.configDirPath) - } - if wrapper.userConfigDirPath != "" { - configSources = append(configSources, wrapper.userConfigDirPath) - } - return strings.Join(configSources, ", ") -} - -// configMutex is used to synchronize concurrent accesses to configCache. -var configMutex = sync.Mutex{} - -// configCache caches already loaded configs with config paths as keys and is -// used to avoid redundantly parsing configs. Concurrent accesses to the cache -// are synchronized via configMutex. -var configCache = make(map[configWrapper]*parsedConfig) - -// InvalidateCache invalidates the registry cache. This function is meant to be -// used for long-running processes that need to reload potential changes made to -// the cached registry config files. -func InvalidateCache() { - configMutex.Lock() - defer configMutex.Unlock() - configCache = make(map[configWrapper]*parsedConfig) -} - -// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. -func getConfig(ctx *types.SystemContext) (*parsedConfig, error) { - wrapper := newConfigWrapper(ctx) - configMutex.Lock() - if config, inCache := configCache[wrapper]; inCache { - configMutex.Unlock() - return config, nil - } - configMutex.Unlock() - - return tryUpdatingCache(ctx, wrapper) -} - -// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d -// directory. -func dropInConfigs(wrapper configWrapper) ([]string, error) { - var ( - configs []string - dirPaths []string - ) - if wrapper.configDirPath != "" { - dirPaths = append(dirPaths, wrapper.configDirPath) - } - if wrapper.userConfigDirPath != "" { - dirPaths = append(dirPaths, wrapper.userConfigDirPath) - } - for _, dirPath := range dirPaths { - err := filepath.WalkDir(dirPath, - // WalkFunc to read additional configs - func(path string, d fs.DirEntry, err error) error { - switch { - case err != nil: - // return error (could be a permission problem) - return err - case d == nil: - // this should only happen when err != nil but let's be sure - return nil - case d.IsDir(): - if path != dirPath { - // make sure to not recurse into sub-directories - return filepath.SkipDir - } - // ignore directories - return nil - default: - // only add *.conf files - if strings.HasSuffix(path, ".conf") { - configs = append(configs, path) - } - return nil - } - }, - ) - - if err != nil && !os.IsNotExist(err) { - // Ignore IsNotExist errors: most systems won't have a registries.conf.d - // directory. - return nil, fmt.Errorf("reading registries.conf.d: %w", err) - } - } - - return configs, nil -} - -// TryUpdatingCache loads the configuration from the provided `SystemContext` -// without using the internal cache. On success, the loaded configuration will -// be added into the internal registry cache. -// It returns the resulting configuration; this is DEPRECATED and may not correctly -// reflect any future data handled by this package. -func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { - config, err := tryUpdatingCache(ctx, newConfigWrapper(ctx)) - if err != nil { - return nil, err - } - return &config.partialV2, err -} - -// tryUpdatingCache implements TryUpdatingCache with an additional configWrapper -// argument to avoid redundantly calculating the config paths. -func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedConfig, error) { - configMutex.Lock() - defer configMutex.Unlock() - - // load the config - config, err := loadConfigFile(wrapper.configPath, false) - if err != nil { - // Continue with an empty []Registry if we use the default config, which - // implies that the config path of the SystemContext isn't set. - // - // Note: if ctx.SystemRegistriesConfPath points to the default config, - // we will still return an error. - if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { - config = &parsedConfig{} - config.partialV2 = V2RegistriesConf{Registries: []Registry{}} - config.aliasCache, err = newShortNameAliasCache("", &shortNameAliasConf{}) - if err != nil { - return nil, err // Should never happen - } - } else { - return nil, fmt.Errorf("loading registries configuration %q: %w", wrapper.configPath, err) - } - } - - // Load the configs from the conf directory path. - dinConfigs, err := dropInConfigs(wrapper) - if err != nil { - return nil, err - } - for _, path := range dinConfigs { - // Enforce v2 format for drop-in-configs. - dropIn, err := loadConfigFile(path, true) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - // file must have been removed between the directory listing - // and the open call, ignore that as it is a expected race - continue - } - return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err) - } - config.updateWithConfigurationFrom(dropIn) - } - - if config.shortNameMode == types.ShortNameModeInvalid { - config.shortNameMode = defaultShortNameMode - } - - if len(config.partialV2.CredentialHelpers) == 0 { - config.partialV2.CredentialHelpers = []string{AuthenticationFileHelper} - } - - // populate the cache - configCache[wrapper] = config - return config, nil -} - -// GetRegistries has been deprecated. Use FindRegistry instead. -// -// GetRegistries loads and returns the registries specified in the config. -// Note the parsed content of registry config files is cached. For reloading, -// use `InvalidateCache` and re-call `GetRegistries`. -func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - return config.partialV2.Registries, nil -} - -// UnqualifiedSearchRegistries returns a list of host[:port] entries to try -// for unqualified image search, in the returned order) -func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { - registries, _, err := UnqualifiedSearchRegistriesWithOrigin(ctx) - return registries, err -} - -// UnqualifiedSearchRegistriesWithOrigin returns a list of host[:port] entries -// to try for unqualified image search, in the returned order. It also returns -// a human-readable description of where these entries are specified (e.g., a -// registries.conf file). -func UnqualifiedSearchRegistriesWithOrigin(ctx *types.SystemContext) ([]string, string, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, "", err - } - return config.partialV2.UnqualifiedSearchRegistries, config.unqualifiedSearchRegistriesOrigin, nil -} - -// parseShortNameMode translates the string into well-typed -// types.ShortNameMode. -func parseShortNameMode(mode string) (types.ShortNameMode, error) { - switch mode { - case "disabled": - return types.ShortNameModeDisabled, nil - case "enforcing": - return types.ShortNameModeEnforcing, nil - case "permissive": - return types.ShortNameModePermissive, nil - default: - return types.ShortNameModeInvalid, fmt.Errorf("invalid short-name mode: %q", mode) - } -} - -// GetShortNameMode returns the configured types.ShortNameMode. -func GetShortNameMode(ctx *types.SystemContext) (types.ShortNameMode, error) { - if ctx != nil && ctx.ShortNameMode != nil { - return *ctx.ShortNameMode, nil - } - config, err := getConfig(ctx) - if err != nil { - return -1, err - } - return config.shortNameMode, err -} - -// CredentialHelpers returns the global top-level credential helpers. -func CredentialHelpers(sys *types.SystemContext) ([]string, error) { - config, err := getConfig(sys) - if err != nil { - return nil, err - } - return config.partialV2.CredentialHelpers, nil -} - -// AdditionalLayerStoreAuthHelper returns the helper for passing registry -// credentials to Additional Layer Store. -func AdditionalLayerStoreAuthHelper(sys *types.SystemContext) (string, error) { - config, err := getConfig(sys) - if err != nil { - return "", err - } - return config.partialV2.AdditionalLayerStoreAuthHelper, nil -} - -// refMatchingSubdomainPrefix returns the length of ref -// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!), -// matches a Registry.Prefix value containing wildcarded subdomains in the -// format: *.example.com. Wildcards are only accepted at the beginning, so -// other formats like example.*.com will not work. Wildcarded prefixes also -// cannot contain port numbers or namespaces in them. -func refMatchingSubdomainPrefix(ref, prefix string) int { - index := strings.Index(ref, prefix[1:]) - if index == -1 { - return -1 - } - if strings.Contains(ref[:index], "/") { - return -1 - } - index += len(prefix[1:]) - if index == len(ref) { - return index - } - switch ref[index] { - case ':', '/', '@': - return index - default: - return -1 - } -} - -// refMatchingPrefix returns the length of the prefix iff ref, -// which is a registry, repository namespace, repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!), -// matches a Registry.Prefix value. -// (This is split from the caller primarily to make testing easier.) -func refMatchingPrefix(ref, prefix string) int { - switch { - case strings.HasPrefix(prefix, "*."): - return refMatchingSubdomainPrefix(ref, prefix) - case len(ref) < len(prefix): - return -1 - case len(ref) == len(prefix): - if ref == prefix { - return len(prefix) - } - return -1 - case len(ref) > len(prefix): - if !strings.HasPrefix(ref, prefix) { - return -1 - } - c := ref[len(prefix)] - // This allows "example.com:5000" to match "example.com", - // which is unintended; that will get fixed eventually, DON'T RELY - // ON THE CURRENT BEHAVIOR. - if c == ':' || c == '/' || c == '@' { - return len(prefix) - } - return -1 - default: - panic("Internal error: impossible comparison outcome") - } -} - -// FindRegistry returns the Registry with the longest prefix for ref, -// which is a registry, repository namespace repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!). -// If no Registry prefixes the image, nil is returned. -func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - - return findRegistryWithParsedConfig(config, ref) -} - -// findRegistryWithParsedConfig implements `FindRegistry` with a pre-loaded -// parseConfig. -func findRegistryWithParsedConfig(config *parsedConfig, ref string) (*Registry, error) { - reg := Registry{} - prefixLen := 0 - for _, r := range config.partialV2.Registries { - if refMatchingPrefix(ref, r.Prefix) != -1 { - length := len(r.Prefix) - if length > prefixLen { - reg = r - prefixLen = length - } - } - } - if prefixLen != 0 { - return ®, nil - } - return nil, nil -} - -// loadConfigFile loads and unmarshals a single config file. -// Use forceV2 if the config must in the v2 format. -func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { - logrus.Debugf("Loading registries configuration %q", path) - - // tomlConfig allows us to unmarshal either V1 or V2 simultaneously. - type tomlConfig struct { - V2RegistriesConf - V1RegistriesConf // for backwards compatibility with sysregistries v1 - } - - // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. - var combinedTOML tomlConfig - meta, err := toml.DecodeFile(path, &combinedTOML) - if err != nil { - return nil, err - } - if keys := meta.Undecoded(); len(keys) > 0 { - logrus.Debugf("Failed to decode keys %q from %q", keys, path) - } - - if combinedTOML.V1RegistriesConf.hasSetField() { - // Enforce the v2 format if requested. - if forceV2 { - return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"} - } - - // Convert a v1 config into a v2 config. - if combinedTOML.V2RegistriesConf.hasSetField() { - return nil, &InvalidRegistries{s: fmt.Sprintf("mixing sysregistry v1/v2 is not supported: %#v", combinedTOML)} - } - converted, err := combinedTOML.V1RegistriesConf.ConvertToV2() - if err != nil { - return nil, err - } - combinedTOML.V1RegistriesConf = V1RegistriesConf{} - combinedTOML.V2RegistriesConf = *converted - } - - res := parsedConfig{partialV2: combinedTOML.V2RegistriesConf} - - // Post process registries, set the correct prefixes, sanity checks, etc. - if err := res.partialV2.postProcessRegistries(); err != nil { - return nil, err - } - - res.unqualifiedSearchRegistriesOrigin = path - - if len(res.partialV2.ShortNameMode) > 0 { - mode, err := parseShortNameMode(res.partialV2.ShortNameMode) - if err != nil { - return nil, err - } - res.shortNameMode = mode - } else { - res.shortNameMode = types.ShortNameModeInvalid - } - - // Valid wildcarded prefixes must be in the format: *.example.com - // FIXME: Move to postProcessRegistries - // https://github.com/containers/image/pull/1191#discussion_r610623829 - for i := range res.partialV2.Registries { - prefix := res.partialV2.Registries[i].Prefix - if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") { - msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix) - return nil, &InvalidRegistries{s: msg} - } - } - - // Parse and validate short-name aliases. - cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf) - if err != nil { - return nil, fmt.Errorf("validating short-name aliases: %w", err) - } - res.aliasCache = cache - // Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and - // reduce memory consumption. We're consulting aliasCache for lookups. - res.partialV2.shortNameAliasConf = shortNameAliasConf{} - - return &res, nil -} - -// updateWithConfigurationFrom updates c with configuration from updates. -// -// Fields present in updates will typically replace already set fields in c. -// The [[registry]] and alias tables are merged. -func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { - // == Merge Registries: - registryMap := make(map[string]Registry) - for i := range c.partialV2.Registries { - registryMap[c.partialV2.Registries[i].Prefix] = c.partialV2.Registries[i] - } - // Merge the freshly loaded registries. - for i := range updates.partialV2.Registries { - registryMap[updates.partialV2.Registries[i].Prefix] = updates.partialV2.Registries[i] - } - - // Go maps have a non-deterministic order when iterating the keys, so - // we sort the keys to enforce some order in Registries slice. - // Some consumers of c/image (e.g., CRI-O) log the configuration - // and a non-deterministic order could easily cause confusion. - prefixes := slices.Sorted(maps.Keys(registryMap)) - - c.partialV2.Registries = []Registry{} - for _, prefix := range prefixes { - c.partialV2.Registries = append(c.partialV2.Registries, registryMap[prefix]) - } - - // == Merge UnqualifiedSearchRegistries: - // This depends on an subtlety of the behavior of the TOML decoder, where a missing array field - // is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled - // as a non-nil []string{}. - if updates.partialV2.UnqualifiedSearchRegistries != nil { - c.partialV2.UnqualifiedSearchRegistries = updates.partialV2.UnqualifiedSearchRegistries - c.unqualifiedSearchRegistriesOrigin = updates.unqualifiedSearchRegistriesOrigin - } - - // == Merge credential helpers: - if updates.partialV2.CredentialHelpers != nil { - c.partialV2.CredentialHelpers = updates.partialV2.CredentialHelpers - } - - // == Merge shortNameMode: - // We don’t maintain c.partialV2.ShortNameMode. - if updates.shortNameMode != types.ShortNameModeInvalid { - c.shortNameMode = updates.shortNameMode - } - - // == Merge AdditionalLayerStoreAuthHelper: - if updates.partialV2.AdditionalLayerStoreAuthHelper != "" { - c.partialV2.AdditionalLayerStoreAuthHelper = updates.partialV2.AdditionalLayerStoreAuthHelper - } - - // == Merge aliasCache: - // We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf. - c.aliasCache.updateWithConfigurationFrom(updates.aliasCache) -} diff --git a/vendor/go.podman.io/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/go.podman.io/image/v5/pkg/tlsclientconfig/tlsclientconfig.go deleted file mode 100644 index 4e0ee57e9..000000000 --- a/vendor/go.podman.io/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ /dev/null @@ -1,101 +0,0 @@ -package tlsclientconfig - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "net" - "net/http" - "os" - "path/filepath" - "slices" - "strings" - "time" - - "github.com/sirupsen/logrus" -) - -// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc -func SetupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := os.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - logrus.Debugf(" crt: %s", fullPath) - data, err := os.ReadFile(fullPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - // file must have been removed between the directory listing - // and the open call, ignore that as it is a expected race - continue - } - return err - } - if tlsc.RootCAs == nil { - systemPool, err := x509.SystemCertPool() - if err != nil { - return fmt.Errorf("unable to get system cert pool: %w", err) - } - tlsc.RootCAs = systemPool - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if base, ok := strings.CutSuffix(f.Name(), ".cert"); ok { - certName := f.Name() - keyName := base + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert) - } - if base, ok := strings.CutSuffix(f.Name(), ".key"); ok { - keyName := f.Name() - certName := base + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return fmt.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.DirEntry, name string) bool { - return slices.ContainsFunc(files, func(f os.DirEntry) bool { - return f.Name() == name - }) -} - -// NewTransport Creates a default transport -func NewTransport() *http.Transport { - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - } - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: direct.DialContext, - TLSHandshakeTimeout: 10 * time.Second, - IdleConnTimeout: 90 * time.Second, - MaxIdleConns: 100, - } - return tr -} diff --git a/vendor/go.podman.io/image/v5/transports/stub.go b/vendor/go.podman.io/image/v5/transports/stub.go deleted file mode 100644 index ae44fd428..000000000 --- a/vendor/go.podman.io/image/v5/transports/stub.go +++ /dev/null @@ -1,36 +0,0 @@ -package transports - -import ( - "fmt" - - "go.podman.io/image/v5/types" -) - -// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -type stubTransport string - -// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -func NewStubTransport(name string) types.ImageTransport { - return stubTransport(name) -} - -// Name returns the name of the transport, which must be unique among other transports. -func (s stubTransport) Name() string { - return string(s) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { - return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { - // Allowing any reference in here allows tools with some transports stubbed-out to still - // use signature verification policies which refer to these stubbed-out transports. - // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . - return nil -} diff --git a/vendor/go.podman.io/image/v5/transports/transports.go b/vendor/go.podman.io/image/v5/transports/transports.go deleted file mode 100644 index 04f05292b..000000000 --- a/vendor/go.podman.io/image/v5/transports/transports.go +++ /dev/null @@ -1,90 +0,0 @@ -package transports - -import ( - "fmt" - "sort" - "sync" - - "go.podman.io/image/v5/internal/set" - "go.podman.io/image/v5/types" -) - -// knownTransports is a registry of known ImageTransport instances. -type knownTransports struct { - transports map[string]types.ImageTransport - mu sync.Mutex -} - -func (kt *knownTransports) Get(k string) types.ImageTransport { - kt.mu.Lock() - t := kt.transports[k] - kt.mu.Unlock() - return t -} - -func (kt *knownTransports) Remove(k string) { - kt.mu.Lock() - delete(kt.transports, k) - kt.mu.Unlock() -} - -func (kt *knownTransports) Add(t types.ImageTransport) { - kt.mu.Lock() - defer kt.mu.Unlock() - name := t.Name() - if t := kt.transports[name]; t != nil { - panic(fmt.Sprintf("Duplicate image transport name %s", name)) - } - kt.transports[name] = t -} - -var kt *knownTransports - -func init() { - kt = &knownTransports{ - transports: make(map[string]types.ImageTransport), - } -} - -// Get returns the transport specified by name or nil when unavailable. -func Get(name string) types.ImageTransport { - return kt.Get(name) -} - -// Delete deletes a transport from the registered transports. -func Delete(name string) { - kt.Remove(name) -} - -// Register registers a transport. -func Register(t types.ImageTransport) { - kt.Add(t) -} - -// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that -// ParseImageName(ImageName(reference)) returns an equivalent reference. -// -// This is the generally recommended way to refer to images in the UI. -// -// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; -// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. -func ImageName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.StringWithinTransport() -} - -var deprecatedTransports = set.NewWithValues("atomic", "ostree") - -// ListNames returns a list of non deprecated transport names. -// Deprecated transports can be used, but are not presented to users. -func ListNames() []string { - kt.mu.Lock() - defer kt.mu.Unlock() - var names []string - for _, transport := range kt.transports { - if !deprecatedTransports.Contains(transport.Name()) { - names = append(names, transport.Name()) - } - } - sort.Strings(names) - return names -} diff --git a/vendor/go.podman.io/image/v5/types/types.go b/vendor/go.podman.io/image/v5/types/types.go deleted file mode 100644 index de25dabcd..000000000 --- a/vendor/go.podman.io/image/v5/types/types.go +++ /dev/null @@ -1,735 +0,0 @@ -package types - -import ( - "context" - "io" - "net/url" - "time" - - digest "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.podman.io/image/v5/docker/reference" - compression "go.podman.io/image/v5/pkg/compression/types" -) - -// ImageTransport is a top-level namespace for ways to store/load an image. -// It should generally correspond to ImageSource/ImageDestination implementations. -// -// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. -// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS -// (or, even, IPv4 or IPv6). -// -// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. -// For example, several different ImageTransport implementations may be based on local filesystem paths, -// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) -// -// See also transports.KnownTransports. -type ImageTransport interface { - // Name returns the name of the transport, which must be unique among other transports. - Name() string - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. - ParseReference(reference string) (ImageReference, error) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys - // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). - // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. - // scope passed to this function will not be "", that value is always allowed. - ValidatePolicyConfigurationScope(scope string) error -} - -// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. -// -// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening -// within an ImageTransport.ParseReference() or equivalent API creating the reference object. -// That's also why the various identification/formatting methods of this type do not support returning errors. -// -// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside -// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. -type ImageReference interface { - Transport() ImageTransport - // StringWithinTransport returns a string representation of the reference, which MUST be such that - // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. - // NOTE: The returned string is not promised to be equal to the original input to ParseReference; - // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. - // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; - // instead, see transports.ImageName(). - StringWithinTransport() string - - // DockerReference returns a Docker reference associated with this reference - // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, - // not e.g. after redirect or alias processing), or nil if unknown/not applicable. - DockerReference() reference.Named - - // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. - // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; - // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical - // (i.e. various references with exactly the same semantics should return the same configuration identity) - // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but - // not required/guaranteed that it will be a valid input to Transport().ParseReference(). - // Returns "" if configuration identities for these references are not supported. - PolicyConfigurationIdentity() string - - // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search - // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed - // in order, terminating on first match, and an implicit "" is always checked at the end. - // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), - // and each following element to be a prefix of the element preceding it. - PolicyConfigurationNamespaces() []string - - // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned ImageCloser. - // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, - // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. - NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) - // NewImageSource returns a types.ImageSource for this reference. - // The caller must call .Close() on the returned ImageSource. - NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) - // NewImageDestination returns a types.ImageDestination for this reference. - // The caller must call .Close() on the returned ImageDestination. - NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) - - // DeleteImage deletes the named image from the registry, if supported. - DeleteImage(ctx context.Context, sys *SystemContext) error -} - -// LayerCompression indicates if layers must be compressed, decompressed or preserved -type LayerCompression int - -const ( - // PreserveOriginal indicates the layer must be preserved, ie - // no compression or decompression. - PreserveOriginal LayerCompression = iota - // Decompress indicates the layer must be decompressed - Decompress - // Compress indicates the layer must be compressed - Compress -) - -// LayerCrypto indicates if layers have been encrypted or decrypted or none -type LayerCrypto int - -const ( - // PreserveOriginalCrypto indicates the layer must be preserved, ie - // no encryption/decryption - PreserveOriginalCrypto LayerCrypto = iota - // Encrypt indicates the layer is encrypted - Encrypt - // Decrypt indicates the layer is decrypted - Decrypt -) - -// BlobInfo collects known information about a blob (layer/config). -// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. -type BlobInfo struct { - Digest digest.Digest // "" if unknown. - Size int64 // -1 if unknown - URLs []string - Annotations map[string]string - MediaType string - - // NOTE: The following fields contain desired _edits_ to blob infos. - // Conceptually then don't belong in the BlobInfo object at all; - // the edits should be provided specifically as parameters to the edit implementation. - // We can’t remove the fields without breaking compatibility, but don’t - // add any more. - - // CompressionOperation is used in Image.UpdateLayerInfos to instruct - // whether the original layer's "compressed or not" should be preserved, - // possibly while changing the compression algorithm from one to another, - // or if it should be changed to compressed or decompressed. - // The field defaults to preserve the original layer's compressedness. - // TODO: To remove together with CryptoOperation in re-design to remove - // field out of BlobInfo. - CompressionOperation LayerCompression - // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct - // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be - // set when `CompressionOperation == Compress` and MAY be set when - // `CompressionOperation == PreserveOriginal` and the compression type is - // being changed for an already-compressed layer. - CompressionAlgorithm *compression.Algorithm - // CryptoOperation is used in Image.UpdateLayerInfos to instruct - // whether the original layer was encrypted/decrypted - // TODO: To remove together with CompressionOperation in re-design to - // remove field out of BlobInfo. - CryptoOperation LayerCrypto - // Before adding any fields to this struct, read the NOTE above. -} - -// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. -// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest). -// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. -// -// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different -// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, -// at least by not failing hard when encountering unknown data. -type BICTransportScope struct { - Opaque string -} - -// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. -// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob -// can look it up using BlobInfoCache.CandidateLocations. -// -// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different -// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, -// at least by not failing hard when encountering unknown data. -type BICLocationReference struct { - Opaque string -} - -// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. -type BICReplacementCandidate struct { - Digest digest.Digest - Location BICLocationReference -} - -// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies. -// -// It records two kinds of data: -// -// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: -// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. -// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression), -// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ -// -// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known -// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). -// -// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently -// compress/decompress blobs for their own purposes. -// -// - Known blob locations, managed by individual transports: -// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), -// recording transport-specific information that allows the transport to reuse the blob in the future; -// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. -// -// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs -// can be directly reused within a registry, or mounted across registries within a registry server.) -// -// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; -// users of the cache should just fall back to copying the blobs the usual way. -// -// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by -// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. -type BlobInfoCache interface { - // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. - // May return anyDigest if it is known to be uncompressed. - // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). - UncompressedDigest(anyDigest digest.Digest) digest.Digest - // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. - // It’s allowed for anyDigest == uncompressed. - // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. - // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. - // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) - RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) - - // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, - // and can be reused given the opaque location data. - RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) - // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused - // within the specified (transport scope) (if they still exist, which is not guaranteed). - // - // If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, - // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same - // uncompressed digest. - CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate -} - -// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). -// This is primarily useful for copying images around; for examining their properties, Image (below) -// is usually more useful. -// Each ImageSource should eventually be closed by calling Close(). -// -// WARNING: Various methods which return an object identified by digest generally do not -// validate that the returned data actually matches that digest; this is the caller’s responsibility. -// See the individual methods’ documentation for potentially more details. -type ImageSource interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Close removes resources associated with an initialized ImageSource, if any. - Close() error - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). - // It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); - // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). - // - // WARNING: This is a raw access to the data as provided by the source; if the reference contains a digest, or instanceDigest is set, - // callers must enforce the digest match themselves, typically by using image.UnparsedInstance to access the manifest instead - // of calling this directly. (Compare the generic warning applicable to all of the [ImageSource] interface.) - GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. - // - // WARNING: This is a raw access to the data as provided by the source; callers must validate the contents - // against the blob’s digest themselves. (Compare the generic warning applicable to all of the [ImageSource] interface.) - GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) - // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. - HasThreadSafeGetBlob() bool - // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer - // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() - // to read the image's layers. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error) -} - -// ImageDestination is a service, possibly remote (= slow), to store components of a single image. -// -// There is a specific required order for some of the calls: -// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) -// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) -// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. -// -// Each ImageDestination should eventually be closed by calling Close(). -type ImageDestination interface { - // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, - // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. - Reference() ImageReference - // Close removes resources associated with an initialized ImageDestination, if any. - Close() error - - // SupportedManifestMIMETypes tells which manifest mime types the destination supports - // If an empty slice or nil it's returned, then any mime type can be tried to upload - SupportedManifestMIMETypes() []string - // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. - // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. - SupportsSignatures(ctx context.Context) error - // DesiredLayerCompression indicates the kind of compression to apply on layers - DesiredLayerCompression() LayerCompression - // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually - // uploaded to the image destination, true otherwise. - AcceptsForeignLayerURLs() bool - // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. - MustMatchRuntimeOS() bool - // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), - // and would prefer to receive an unmodified manifest instead of one modified for the destination. - // Does not make a difference if Reference().DockerReference() is nil. - IgnoresEmbeddedDockerReference() bool - - // PutBlob writes contents of stream and returns data representing the result. - // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. - // inputInfo.Size is the expected length of stream, if known. - // inputInfo.MediaType describes the blob format, if known. - // May update cache. - // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available - // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) - // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. - HasThreadSafePutBlob() bool - // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination - // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). - // info.Digest must not be empty. - // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. - // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may - // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be - // reflected in the manifest that will be written. - // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. - // May use and/or update cache. - TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) - // PutManifest writes manifest to the destination. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for - // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. - // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated - // by `manifest.Digest()`. - // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. - // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), - // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. - PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error - // PutSignatures writes a set of signatures to the destination. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for - // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. - // MUST be called after PutManifest (signatures may reference manifest contents). - PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error - // Commit marks the process of storing the image as successful and asks for the image to be persisted. - // unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list - // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the - // original manifest list digest, if desired. - // WARNING: This does not have any transactional semantics: - // - Uploaded data MAY be visible to others before Commit() is called - // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) - Commit(ctx context.Context, unparsedToplevel UnparsedImage) error -} - -// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, -// refuses specifically this manifest type, but may accept a different manifest type. -type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. - Err error -} - -func (e ManifestTypeRejectedError) Error() string { - return e.Err.Error() -} - -// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. -// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, -// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. -// This also makes the UnparsedImage→Image conversion an explicitly visible step. -// -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -type UnparsedImage interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. - Manifest(ctx context.Context) ([]byte, string, error) - // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. - Signatures(ctx context.Context) ([][]byte, error) -} - -// Image is the primary API for inspecting properties of images. -// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The Image must not be used after the underlying ImageSource is Close()d. -type Image interface { - // Note that Reference may return nil in the return value of UpdatedImage! - UnparsedImage - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*v1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []BlobInfo - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy(context.Context) ([]BlobInfo, error) - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. - // This does not change the state of the original Image object. - // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if - // manifests of type options.ManifestMIMEType can not include layers that are compressed - // in accordance with the CompressionOperation and CompressionAlgorithm specified in one - // or more options.LayerInfos items, though retrying with a different - // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm - // values might succeed. - UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) - // SupportsEncryption returns an indicator that the image supports encryption - // - // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since - // the process of updating a manifest between different manifest types was to update then convert. - // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 - SupportsEncryption(ctx context.Context) bool - // Size returns an approximation of the amount of disk space which is consumed by the image in its current - // location. If the size is not known, -1 will be returned. - Size() (int64, error) -} - -// ImageCloser is an Image with a Close() method which must be called by the user. -// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, -// to ensure that the ImageSource is closed. -type ImageCloser interface { - Image - // Close removes resources associated with an initialized ImageCloser. - Close() error -} - -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage -type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. - EmbeddedDockerReference reference.Named - ManifestMIMEType string - // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. - InformationOnly ManifestUpdateInformation -} - -// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here -// only to make writing struct literals possible. -type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) - LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) - LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. -} - -// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. -// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported -// for other manifest types. -type ImageInspectInfo struct { - Tag string - Created *time.Time - DockerVersion string - Labels map[string]string - Architecture string - Variant string - Os string - Layers []string - LayersData []ImageInspectLayer - Env []string - Author string -} - -// ImageInspectLayer is a set of metadata describing an image layers' detail -type ImageInspectLayer struct { - MIMEType string // "" if unknown. - Digest digest.Digest - Size int64 // -1 if unknown. - Annotations map[string]string -} - -// DockerAuthConfig contains authorization information for connecting to a registry. -// the value of Username and Password can be empty for accessing the registry anonymously -type DockerAuthConfig struct { - Username string - Password string - // IdentityToken can be used as an refresh_token in place of username and - // password to obtain the bearer/access token in oauth2 flow. If identity - // token is set, password should not be set. - // Ref: https://docs.docker.com/registry/spec/auth/oauth/ - IdentityToken string -} - -// OptionalBool is a boolean with an additional undefined value, which is meant -// to be used in the context of user input to distinguish between a -// user-specified value and a default value. -type OptionalBool byte - -const ( - // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. - OptionalBoolUndefined OptionalBool = iota - // OptionalBoolTrue represents the boolean true. - OptionalBoolTrue - // OptionalBoolFalse represents the boolean false. - OptionalBoolFalse -) - -// NewOptionalBool converts the input bool into either OptionalBoolTrue or -// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. -func NewOptionalBool(b bool) OptionalBool { - o := OptionalBoolFalse - if b { - o = OptionalBoolTrue - } - return o -} - -// ShortNameMode defines the mode of short-name resolution. -// -// The use of unqualified-search registries entails an ambiguity as it's -// unclear from which registry a given image, referenced by a short name, may -// be pulled from. -// -// The ShortNameMode type defines how short names should resolve. -type ShortNameMode int - -const ( - ShortNameModeInvalid ShortNameMode = iota - // Use all configured unqualified-search registries without prompting - // the user. - ShortNameModeDisabled - // If stdout and stdin are a TTY, prompt the user to select a configured - // unqualified-search registry. Otherwise, use all configured - // unqualified-search registries. - // - // Note that if only one unqualified-search registry is set, it will be - // used without prompting. - ShortNameModePermissive - // Always prompt the user to select a configured unqualified-search - // registry. Throw an error if stdout or stdin is not a TTY as - // prompting isn't possible. - // - // Note that if only one unqualified-search registry is set, it will be - // used without prompting. - ShortNameModeEnforcing -) - -// SystemContext allows parameterizing access to implicitly-accessed resources, -// like configuration files in /etc and users' login state in their home directory. -// Various components can share the same field only if their semantics is exactly -// the same; if in doubt, add a new field. -// It is always OK to pass nil instead of a SystemContext. -type SystemContext struct { - // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). - // Not used for any of the more specific path overrides available in this struct. - // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). - // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . - // and there is no need to worry about the environment.) - // NOTE: This does NOT affect paths starting by $HOME. - RootForImplicitAbsolutePaths string - - // === Global configuration overrides === - // If not "", overrides the system's default path for signature.Policy configuration. - SignaturePolicyPath string - // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) - RegistriesDirPath string - // Path to the system-wide registries configuration file - SystemRegistriesConfPath string - // Path to the system-wide registries configuration directory - SystemRegistriesConfDirPath string - // Path to the user-specific short-names configuration file - UserShortNameAliasConfPath string - // If set, short-name resolution in pkg/shortnames must follow the specified mode - ShortNameMode *ShortNameMode - // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and - // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce - // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this - // specific context. - PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool - // If not "", overrides the default path for the registry authentication file, but only new format files - AuthFilePath string - // if not "", overrides the default path for the registry authentication file, but with the legacy format; - // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; - // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) - // in locations other than the home dir; openshift components should then set this field in those cases; - // this field is ignored if `AuthFilePath` is set (we favor the newer format); - // only reading of this data is supported; - LegacyFormatAuthFilePath string - // If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed. - // This must not be set if AuthFilePath is set. - // Only credentials and credential helpers in this file apre processed, not any other configuration in this file. - DockerCompatAuthFilePath string - // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. - ArchitectureChoice string - // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. - OSChoice string - // If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match. - VariantChoice string - // If not "", overrides the system's default directory containing a blob info cache. - BlobInfoCacheDir string - // Additional tags when creating or copying a docker-archive. - DockerArchiveAdditionalTags []reference.NamedTagged - // If not "", overrides the temporary directory to use for storing big files - BigFilesTemporaryDir string - - // === OCI.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when downloading OCI image layers. - OCICertPath string - // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - OCIInsecureSkipTLSVerify bool - // If not "", use a shared directory for storing blobs rather than within OCI layouts - OCISharedBlobDirPath string - // Allow UnCompress image layer for OCI image layer - OCIAcceptUncompressedLayers bool - - // === docker.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when talking to a container registry. - DockerCertPath string - // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. - // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - // Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - DockerInsecureSkipTLSVerify OptionalBool - // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials - // Ignored if DockerBearerRegistryToken is non-empty. - DockerAuthConfig *DockerAuthConfig - // if not "", the library uses this registry token to authenticate to the registry - DockerBearerRegistryToken string - // if not "", an User-Agent header is added to each request when contacting a registry. - DockerRegistryUserAgent string - // if true, a V1 ping attempt isn't done to give users a better error. Default is false. - // Note that this field is used mainly to integrate containers/image into projectatomic/docker - // in order to not break any existing docker's integration tests. - // Deprecated: The V1 container registry detection is no longer performed, so setting this flag has no effect. - DockerDisableV1Ping bool - // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list - DockerDisableDestSchema1MIMETypes bool - // If true, the physical pull source of docker transport images logged as info level - DockerLogMirrorChoice bool - // Directory to use for OSTree temporary files - // - // Deprecated: The OSTree transport has been removed. - OSTreeTmpDirPath string - // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry. - // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, - // when the digest for a blob is unknown. - DockerRegistryPushPrecomputeDigests bool - // DockerProxyURL specifies proxy configuration schema (like socks5://username:password@ip:port) - DockerProxyURL *url.URL - // DockerProxy is a function that determines the proxy URL for a given request URL. - // If set, this takes precedence over DockerProxyURL. The function should return the proxy URL to use, - // or nil if no proxy should be used for the given request. - DockerProxy func(reqURL *url.URL) (*url.URL, error) - - // === docker/daemon.Transport overrides === - // A directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when talking to a Docker daemon. - DockerDaemonCertPath string - // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. - DockerDaemonHost string - // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. - DockerDaemonInsecureSkipTLSVerify bool - - // === dir.Transport overrides === - // DirForceCompress compresses the image layers if set to true - DirForceCompress bool - // DirForceDecompress decompresses the image layers if set to true - DirForceDecompress bool - - // CompressionFormat is the format to use for the compression of the blobs - CompressionFormat *compression.Algorithm - // CompressionLevel specifies what compression level is used - CompressionLevel *int -} - -// ProgressEvent is the type of events a progress reader can produce -// Warning: new event types may be added any time. -type ProgressEvent uint - -const ( - // ProgressEventNewArtifact will be fired on progress reader setup - ProgressEventNewArtifact ProgressEvent = iota - - // ProgressEventRead indicates that the artifact download is currently in - // progress - ProgressEventRead - - // ProgressEventDone is fired when the data transfer has been finished for - // the specific artifact - ProgressEventDone - - // ProgressEventSkipped is fired when the artifact has been skipped because - // its already available at the destination - ProgressEventSkipped -) - -// ProgressProperties is used to pass information from the copy code to a monitor which -// can use the real-time information to produce output or react to changes. -type ProgressProperties struct { - // The event indicating what - Event ProgressEvent - - // The artifact which has been updated in this interval - Artifact BlobInfo - - // The currently downloaded size in bytes - // Increases from 0 to the final Artifact size - Offset uint64 - - // The additional offset which has been downloaded inside the last update - // interval. Will be reset after each ProgressEventRead event. - OffsetUpdate uint64 -} diff --git a/vendor/go.podman.io/image/v5/version/version.go b/vendor/go.podman.io/image/v5/version/version.go deleted file mode 100644 index 8dc2af7d8..000000000 --- a/vendor/go.podman.io/image/v5/version/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package version - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 5 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 39 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 1 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/go.podman.io/storage/AUTHORS b/vendor/go.podman.io/storage/AUTHORS deleted file mode 100644 index 129dd3969..000000000 --- a/vendor/go.podman.io/storage/AUTHORS +++ /dev/null @@ -1,1523 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron Lehmann -Aaron Welch -Abel Muiño -Abhijeet Kasurde -Abhinav Ajgaonkar -Abhishek Chanda -Abin Shahab -Adam Miller -Adam Singer -Aditi Rajagopal -Aditya -Adria Casas -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akihiro Suda -Al Tobey -alambike -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre González -Alexandru Sfirlogea -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Ali Dehghani -Allen Madsen -Allen Sun -almoehi -Alvin Richards -amangoel -Amen Belayneh -Amit Bakshi -Amit Krishnan -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew Munsell -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Ankush Agarwal -Anonmily -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -ArikaChen -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Firshman -Ben Golub -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benoit Chesneau -Bernerd Schaefer -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -Bill W -bin liu -Blake Geno -Boaz Shuster -bobby abbott -boucher -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -buddhamagnet -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Carl Henrik Lunde -Carl X. Su -Carlos Alexandro Becker -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander G -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Hanxiao -cheney90 -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dituri -Chris Fordham -Chris Khoo -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Wahl -Chris Weyl -chrismckinnel -Christian Berendt -Christian Böhme -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -ChristoperBiscardi -Christophe Mehay -Christophe Troestler -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Coenraad Loubser -Colin Dunklau -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Cory Forsyth -cressie176 -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams -Daniel Antlinger -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Hiltgen -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel YC Lin -Daniel Zhang -Daniel, Dao Quang Minh -Danny Berger -Danny Yates -Darren Coxall -Darren Shepherd -Darren Stahl -Dave Barboza -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Corking -David Cramer -David Currie -David Davis -David Gageot -David Gebler -David Lawrence -David Mackey -David Mat -David Mcanulty -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Xia -David Young -Davide Ceretti -Dawn Chen -dcylabs -decadent -deed02392 -Deng Guangxing -Deni Bertovic -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dieter Reuter -Dima Stopel -Dimitri John Ledkov -Dimitry Andric -Dinesh Subhraveti -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitry Demeshchuk -Dmitry Gusev -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donovan Jones -Doug Davis -Doug MacEachern -Doug Tangren -Dr Nic Williams -dragon788 -Dražen Lučanin -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivind Uggedal -Elan Ruusamäe -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Windisch -Eric Yang -Eric-Olivier Lamey -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Euan -Eugene Yakubovich -eugenkrizo -evalle -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Fabiano Rosas -Fabio Falci -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangyuan Gao <21551127@zju.edu.cn> -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Schindler -Ferenc Szabo -Fernando -Fero Volar -Filipe Brandenburger -Filipe Oliveira -fl0yd -Flavio Castelli -FLGMwt -Florian -Florian Klein -Florian Maier -Florian Weingarten -Florin Asavoaie -Francesc Campoy -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -fy2462 -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Monroy -GabrielNicolasAvellaneda -Galen Sampson -Gareth Rushgrove -Garrett Barboza -Gaurav -gautam, prasanna -GennadySpb -Geoffrey Bachelet -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gianluca Borello -Gildas Cuisinier -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Thornton -grossws -grunny -gs11 -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Gurjeet Singh -Guruprasad -gwx296173 -Günter Zöchbauer -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harry Zhang -He Simei -heartlock <21521209@zju.edu.cn> -Hector Castro -Henning Sprang -Hobofan -Hollie Teal -Hong Xu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -hyp3rdino -Hyzhou <1187766782@qq.com> -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Lee -Ian Main -Ian Truslove -Iavael -Icaro Seara -Igor Dolzhikov -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -ILYA Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Isaac Dupree -Isabel Jimenez -Isao Jonas -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -J Bruni -J. Nunn -Jack Danger Canty -Jacob Atzen -Jacob Edelman -Jake Champlin -Jake Moshenko -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nugent -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Paul Calderone -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -jgeiger -Jhon Honce -Jian Zhang -jianbosun -Jilles Oldenbeuving -Jim Alateras -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -jimmyxian -Jinsoo Park -Jiri Popelka -Jiří Župka -jjy -jmzwcn -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Howard (VM) -John OBrien III -John Starks -John Tims -John Warwick -John Willis -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Dowland -Jonathan Lebon -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Joost Cassee -Jordan -Jordan Arentsen -Jordan Sissel -Jordan Williams -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Josh -Josh Hawn -Josh Poimboeuf -Josiah Kiehl -José Tomás Albornoz -JP -jrabbit -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Pervillé -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Plock -Justin Simonelis -Justin Terry -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu(Kennan) -Kamil Domański -kamjar gerami -Kanstantsin Shautsou -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -kayrus -Ke Xu -Keli Hu -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickael Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Clark -Kevin J. Lynagh -Kevin Menard -Kevin P. Kucharczyk -Kevin Shi -Kevin Wallace -Kevin Yap -kevinmeredith -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Pelykh -Krasimir Georgiev -Kristian Haugene -Kristina Zabunova -krrg -Kun Zhang -Kunal Kushwaha -Kyle Conroy -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -lalyos -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Laszlo Meszaros -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee, Meng-Han -leeplay -Lei Jitang -Len Weincier -Lennie -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Marshall -Lewis Peckover -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liaoqingwei -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -LIZAO LI -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Louis Opter -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Luis Martínez de Bartolomé Izquierdo -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -lukemarsden -Lynda O'Leary -Lénaïc Huard -Ma Shimiao -Mabin -Madhav Puri -Madhu Venugopal -Mageee <21521230.zju.edu.cn> -Mahesh Tiyyagura -malnick -Malte Janduda -manchoz -Manfred Touron -Manfred Zabarauskas -mansinahar -Manuel Meurer -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcelo Salazar -Marco Hennings -Marcus Farkas -Marcus Linke -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark McGranaghan -Mark McKinstry -Mark West -Marko Mikulicic -Marko Tibold -Markus Fix -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Redmond -Mary Anthony -Masahito Zembutsu -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Le Marec - Pasquet -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt McCormick -Matt Moore -Matt Robenolt -Matthew Heon -Matthew Mayer -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -mattymo -mattyw -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mengdi Gao -Mert Yazıcıoğlu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Käufl -Michael Neale -Michael Prokop -Michael Scharf -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minar -Michaël Pailloncy -Michał Czeraszkiewicz -Michiel@unhosted -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Gaffney -Mike Goelzer -Mike Leone -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miloslav Trmač -mingqing -Mingzhen Feng -Mitch Capper -mlarcher -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohit Soni -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mqliang -Mrunal Patel -msabansal -mschurenko -muge -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Neal McBurnett -Nelson Chen -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Irvine -Nick Parker -Nick Payne -Nick Stenning -Nick Stinemates -Nicolas Borboën -Nicolas De loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolás Hock Isaza -Nigel Poulton -NikolaMandic -nikolas -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -OddBloke -odk- -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -Ole Reifschneider -Oliver Neal -Olivier Gambier -Olle Jonsson -Oriol Francès -Otto Kekäläinen -oyld -ozlerhakan -paetling -pandrew -panticz -Paolo G. Giarrusso -Pascal Borreli -Pascal Hartig -Patrick Devine -Patrick Hemmer -Patrick Stapleton -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Hammond -Paul Jimenez -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Pavel Lobashov -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Peeyush Gupta -Peggy Li -Pei Su -Penghan Wang -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Phil -Phil Estes -Phil Spitler -Philip Monroe -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Prasanna Gautam -Prayag Verma -Przemek Hejman -pysqz -qg <1373319223@qq.com> -qhuang -Qiang Huang -qq690388648 <690388648@qq.com> -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Rajat Pandit -Rajdeep Dua -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon van Alteren -Ray Tsang -ReadmeCritic -Recursive Madman -Regan McCooey -Remi Rampin -Renato Riccieri Santos Zannon -resouer -rgstephens -Rhys Hiltner -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Stern -Robert Wallis -Roberto G. Hashioka -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Strashkin -Ron Smits -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Rozhnov Alexandr -rsmoorthy -Rudolph Gottesheim -Rui Lopes -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -RyanDeng -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Satnam Singh -satoru -Satoshi Amemiya -scaleoutsean -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean OMeara -Sean P. Kane -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sevki Hasirci -Shane Canon -Shane da Silva -shaunol -Shawn Landden -Shawn Siefkas -Shekhar Gulati -Sheng Yang -Shengbo Song -Shih-Yuan Lee -Shijiang Wei -Shishir Mahajan -shuai-z -Shuwei Hao -Sian Lerk Lau -sidharthamani -Silas Sewell -Simei He -Simon Eskildsen -Simon Leinen -Simon Taranto -Sindhu S -Sjoerd Langkemper -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -srinsriv -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephen Crosby -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Subhajit Ghosh -Sujith Haridasan -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq -TAGOMORI Satoshi -tang0th -Tangi COLIN -Tatsuki Sugiura -Tatsushi Inagaki -Taylor Jones -tbonza -Ted M. Young -Tehmasp Chaudhri -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas LEVEIL -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Tianon Gravi -Tibor Vass -Tiffany Low -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonis Tiigi -Tonny Xu -Tony Daws -Tony Miller -toogley -Torstein Husebø -tpng -tracylihui <793912329@qq.com> -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -trishnaguha -Tristan Carel -Troy Denton -Tyler Brock -Tzu-Jung Lee -Tõnis Tiigi -Ulysse Carion -unknown -vagrant -Vaidas Jablonskis -Veres Lajos -vgeta -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -WANG Chao -Wang Xing -Ward Vandewege -WarheadsSE -Wayne Chang -Wei-Ting Kuo -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wes Morgan -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Riancho -William Thurston -WiseTrem -wlan0 -Wolfgang Powisch -wonderflow -xamyzhao -XiaoBing Jiang -Xiaoxu Chen -xiekeyang -Xinzi Zhou -Xiuming Chen -xlgao-zju -xuzhaokui -Yahya -YAMADA Tsuyoshi -Yan Feng -Yang Bai -yangshukui -Yasunori Mahata -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongzhi Pan -yorkie -Youcef YEKHLEF -Yuan Sun -yuchangchun -yuchengxia -Yurii Rashkovskii -yuzou -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -Zhenan Ye <21551168@zju.edu.cn> -Zhu Guihua -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -搏通 diff --git a/vendor/go.podman.io/storage/LICENSE b/vendor/go.podman.io/storage/LICENSE deleted file mode 100644 index 8f3fee627..000000000 --- a/vendor/go.podman.io/storage/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.podman.io/storage/NOTICE b/vendor/go.podman.io/storage/NOTICE deleted file mode 100644 index 8a37c1c7b..000000000 --- a/vendor/go.podman.io/storage/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2016 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go b/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go deleted file mode 100644 index 4f340ae3c..000000000 --- a/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go +++ /dev/null @@ -1,64 +0,0 @@ -package rawfilelock - -import ( - "os" -) - -type LockType byte - -const ( - ReadLock LockType = iota - WriteLock -) - -type FileHandle = fileHandle - -// OpenLock opens a file for locking -// WARNING: This is the underlying file locking primitive of the OS; -// because closing FileHandle releases the lock, it is not suitable for use -// if there is any chance of two concurrent goroutines attempting to use the same lock. -// Most users should use the higher-level operations from internal/staging_lockfile or pkg/lockfile. -func OpenLock(path string, readOnly bool) (FileHandle, error) { - flags := os.O_CREATE - if readOnly { - flags |= os.O_RDONLY - } else { - flags |= os.O_RDWR - } - - fd, err := openHandle(path, flags) - if err == nil { - return fd, nil - } - - return fd, &os.PathError{Op: "open", Path: path, Err: err} -} - -// TryLockFile attempts to lock a file handle -func TryLockFile(fd FileHandle, lockType LockType) error { - return lockHandle(fd, lockType, true) -} - -// LockFile locks a file handle -func LockFile(fd FileHandle, lockType LockType) error { - return lockHandle(fd, lockType, false) -} - -// UnlockAndClose unlocks and closes a file handle -func UnlockAndCloseHandle(fd FileHandle) { - unlockAndCloseHandle(fd) -} - -// CloseHandle closes a file handle without unlocking -// -// WARNING: This is a last-resort function for error handling only! -// On Unix systems, closing a file descriptor automatically releases any locks, -// so "closing without unlocking" is impossible. This function will release -// the lock as a side effect of closing the file. -// -// This function should only be used in error paths where the lock state -// is already corrupted or when giving up on lock management entirely. -// Normal code should use UnlockAndCloseHandle instead. -func CloseHandle(fd FileHandle) { - closeHandle(fd) -} diff --git a/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go b/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go deleted file mode 100644 index 268554076..000000000 --- a/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !windows - -package rawfilelock - -import ( - "time" - - "golang.org/x/sys/unix" -) - -type fileHandle uintptr - -func openHandle(path string, mode int) (fileHandle, error) { - mode |= unix.O_CLOEXEC - fd, err := unix.Open(path, mode, 0o644) - return fileHandle(fd), err -} - -func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error { - fType := unix.F_RDLCK - if lType != ReadLock { - fType = unix.F_WRLCK - } - lk := unix.Flock_t{ - Type: int16(fType), - Whence: int16(unix.SEEK_SET), - Start: 0, - Len: 0, - } - cmd := unix.F_SETLKW - if nonblocking { - cmd = unix.F_SETLK - } - for { - err := unix.FcntlFlock(uintptr(fd), cmd, &lk) - if err == nil || nonblocking { - return err - } - time.Sleep(10 * time.Millisecond) - } -} - -func unlockAndCloseHandle(fd fileHandle) { - unix.Close(int(fd)) -} - -func closeHandle(fd fileHandle) { - unix.Close(int(fd)) -} diff --git a/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go b/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go deleted file mode 100644 index 9c0d692f8..000000000 --- a/vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -//go:build windows - -package rawfilelock - -import ( - "golang.org/x/sys/windows" -) - -const ( - reserved = 0 - allBytes = ^uint32(0) -) - -type fileHandle windows.Handle - -func openHandle(path string, mode int) (fileHandle, error) { - mode |= windows.O_CLOEXEC - fd, err := windows.Open(path, mode, windows.S_IWRITE) - return fileHandle(fd), err -} - -func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error { - flags := 0 - if lType != ReadLock { - flags = windows.LOCKFILE_EXCLUSIVE_LOCK - } - if nonblocking { - flags |= windows.LOCKFILE_FAIL_IMMEDIATELY - } - ol := new(windows.Overlapped) - if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { - if nonblocking { - return err - } - panic(err) - } - return nil -} - -func unlockAndCloseHandle(fd fileHandle) { - ol := new(windows.Overlapped) - windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) - closeHandle(fd) -} - -func closeHandle(fd fileHandle) { - windows.Close(windows.Handle(fd)) -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/exists_freebsd.go b/vendor/go.podman.io/storage/pkg/fileutils/exists_freebsd.go deleted file mode 100644 index eeecc9f75..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/exists_freebsd.go +++ /dev/null @@ -1,38 +0,0 @@ -package fileutils - -import ( - "errors" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -// Exists checks whether a file or directory exists at the given path. -// If the path is a symlink, the symlink is followed. -func Exists(path string) error { - // It uses unix.Faccessat which is a faster operation compared to os.Stat for - // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) - if err != nil { - return &os.PathError{Op: "faccessat", Path: path, Err: err} - } - return nil -} - -// Lexists checks whether a file or directory exists at the given path. -// If the path is a symlink, the symlink itself is checked. -func Lexists(path string) error { - // FreeBSD before 15.0 does not support the AT_SYMLINK_NOFOLLOW flag for - // faccessat. In this case, the call to faccessat will return EINVAL and - // we fall back to using Lstat. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) - if err != nil { - if errors.Is(err, syscall.EINVAL) { - _, err = os.Lstat(path) - return err - } - return &os.PathError{Op: "faccessat", Path: path, Err: err} - } - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/exists_unix.go b/vendor/go.podman.io/storage/pkg/fileutils/exists_unix.go deleted file mode 100644 index 04cfafcd5..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/exists_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !windows && !freebsd - -package fileutils - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// Exists checks whether a file or directory exists at the given path. -// If the path is a symlink, the symlink is followed. -func Exists(path string) error { - // It uses unix.Faccessat which is a faster operation compared to os.Stat for - // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS) - if err != nil { - return &os.PathError{Op: "faccessat", Path: path, Err: err} - } - return nil -} - -// Lexists checks whether a file or directory exists at the given path. -// If the path is a symlink, the symlink itself is checked. -func Lexists(path string) error { - // It uses unix.Faccessat which is a faster operation compared to os.Stat for - // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS) - if err != nil { - return &os.PathError{Op: "faccessat", Path: path, Err: err} - } - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/exists_windows.go b/vendor/go.podman.io/storage/pkg/fileutils/exists_windows.go deleted file mode 100644 index 355cf0464..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/exists_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package fileutils - -import ( - "os" -) - -// Exists checks whether a file or directory exists at the given path. -func Exists(path string) error { - _, err := os.Stat(path) - return err -} - -// Lexists checks whether a file or directory exists at the given path, without -// resolving symlinks -func Lexists(path string) error { - _, err := os.Lstat(path) - return err -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go b/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go deleted file mode 100644 index 434979825..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,369 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/sirupsen/logrus" -) - -// PatternMatcher allows checking paths against a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// NewPatternMatcher creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = strings.TrimPrefix(filepath.Clean(p[1:]), "/") - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Deprecated: Please use the `MatchesResult` method instead. -// Matches matches path against all the patterns. Matches is not safe to be -// called concurrently -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - - for _, pattern := range pm.patterns { - negative := false - - if pattern.exclusion { - negative = true - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -type MatchResult struct { - isMatched bool - matches, excludes uint -} - -// Excludes returns true if the overall result is matched -func (m *MatchResult) IsMatched() bool { - return m.isMatched -} - -// Excludes returns the amount of matches of an MatchResult -func (m *MatchResult) Matches() uint { - return m.matches -} - -// Excludes returns the amount of excludes of an MatchResult -func (m *MatchResult) Excludes() uint { - return m.excludes -} - -// MatchesResult verifies the provided filepath against all patterns. -// It returns the `*MatchResult` result for the patterns on success, otherwise -// an error. This method is not safe to be called concurrently. -func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) { - file = filepath.FromSlash(file) - res = &MatchResult{false, 0, 0} - - for _, pattern := range pm.patterns { - negative := false - - if pattern.exclusion { - negative = true - } - - match, err := pattern.match(file) - if err != nil { - return nil, err - } - - if match { - res.isMatched = !negative - if negative { - res.excludes++ - } else { - res.matches++ - } - } - } - - if res.matches > 0 { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return res, nil -} - -// IsMatch verifies the provided filepath against all patterns and returns true -// if it matches. A match is valid if the last match is a positive one. -// It returns an error on failure and is not safe to be called concurrently. -func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) { - res, err := pm.MatchesResult(file) - if err != nil { - return false, err - } - return res.isMatched, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used to filter file paths. -type Pattern struct { - cleanedPattern string - regexp *regexp.Regexp - exclusion bool -} - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - if p.regexp == nil { - if err := p.compile(); err != nil { - return false, filepath.ErrBadPattern - } - } - - b := p.regexp.MatchString(path) - - return b, nil -} - -func (p *Pattern) compile() error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - const bs = `\` - if sl == bs { - escSL += bs - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if ch == '.' || ch == '$' { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += bs + string(ch) - } else if ch == '\\' { - // escape next char. - if sl == bs { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += bs + string(scan.Next()) - } else { - return filepath.ErrBadPattern - } - } else { - regStr += string(ch) - } - } - - regStr += "(" + escSL + ".*)?$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.IsMatch(file) -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and removes -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// ReadSymlinkedPath returns the target directory of a symlink. -// The target of the symbolic link can be a file and a directory. -func ReadSymlinkedPath(path string) (realPath string, err error) { - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) - } - if err := Exists(realPath); err != nil { - return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if err := Exists(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0o755) - } - if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0o755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/fileutils_darwin.go b/vendor/go.podman.io/storage/pkg/fileutils/fileutils_darwin.go deleted file mode 100644 index ccd648fac..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/fileutils_darwin.go +++ /dev/null @@ -1,27 +0,0 @@ -package fileutils - -import ( - "os" - "os/exec" - "strconv" - "strings" -) - -// GetTotalUsedFds returns the number of used File Descriptors by -// executing `lsof -p PID` -func GetTotalUsedFds() int { - pid := os.Getpid() - - cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) - - output, err := cmd.CombinedOutput() - if err != nil { - return -1 - } - - outputStr := strings.TrimSpace(string(output)) - - fds := strings.Split(outputStr, "\n") - - return len(fds) - 1 -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/fileutils_solaris.go b/vendor/go.podman.io/storage/pkg/fileutils/fileutils_solaris.go deleted file mode 100644 index 0f2cb7ab9..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/fileutils_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. -// On Solaris these limits are per process and not systemwide -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/fileutils_unix.go b/vendor/go.podman.io/storage/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index 3cb250c5a..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build linux || freebsd - -package fileutils - -import ( - "fmt" - "os" - - "github.com/sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("%v", err) - } else { - return len(fds) - } - return -1 -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/fileutils_windows.go b/vendor/go.podman.io/storage/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cace..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/reflink_linux.go b/vendor/go.podman.io/storage/pkg/fileutils/reflink_linux.go deleted file mode 100644 index 9f5c6c90b..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/reflink_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -package fileutils - -import ( - "io" - "os" - - "golang.org/x/sys/unix" -) - -// ReflinkOrCopy attempts to reflink the source to the destination fd. -// If reflinking fails or is unsupported, it falls back to io.Copy(). -func ReflinkOrCopy(src, dst *os.File) error { - err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd())) - if err == nil { - return nil - } - - _, err = io.Copy(dst, src) - return err -} diff --git a/vendor/go.podman.io/storage/pkg/fileutils/reflink_unsupported.go b/vendor/go.podman.io/storage/pkg/fileutils/reflink_unsupported.go deleted file mode 100644 index c0a30e670..000000000 --- a/vendor/go.podman.io/storage/pkg/fileutils/reflink_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !linux - -package fileutils - -import ( - "io" - "os" -) - -// ReflinkOrCopy attempts to reflink the source to the destination fd. -// If reflinking fails or is unsupported, it falls back to io.Copy(). -func ReflinkOrCopy(src, dst *os.File) error { - _, err := io.Copy(dst, src) - return err -} diff --git a/vendor/go.podman.io/storage/pkg/homedir/homedir.go b/vendor/go.podman.io/storage/pkg/homedir/homedir.go deleted file mode 100644 index 7eb63b67a..000000000 --- a/vendor/go.podman.io/storage/pkg/homedir/homedir.go +++ /dev/null @@ -1,37 +0,0 @@ -package homedir - -import ( - "errors" - "os" - "path/filepath" -) - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetCacheHome returns XDG_CACHE_HOME. -// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetCacheHome() (string, error) { - if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { - return xdgCacheHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CACHE_HOME or HOME") - } - return filepath.Join(home, ".cache"), nil -} diff --git a/vendor/go.podman.io/storage/pkg/homedir/homedir_unix.go b/vendor/go.podman.io/storage/pkg/homedir/homedir_unix.go deleted file mode 100644 index b088c2a85..000000000 --- a/vendor/go.podman.io/storage/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,182 +0,0 @@ -//go:build !windows - -package homedir - -// Copyright 2013-2018 Docker, Inc. -// NOTE: this package has originally been copied from github.com/docker/docker. - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - - "github.com/sirupsen/logrus" - "go.podman.io/storage/pkg/unshare" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - homedir, _ := unshare.HomeDir() - return homedir -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} - -// StickRuntimeDirContents sets the sticky bit on files that are under -// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. -// -// StickyRuntimeDir returns slice of sticked files. -// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func StickRuntimeDirContents(files []string) ([]string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - // ignore error if runtimeDir is empty - return nil, nil //nolint: nilerr - } - runtimeDir, err = filepath.Abs(runtimeDir) - if err != nil { - return nil, err - } - var sticked []string - for _, f := range files { - f, err = filepath.Abs(f) - if err != nil { - return sticked, err - } - if strings.HasPrefix(f, runtimeDir+"/") { - if err = stick(f); err != nil { - return sticked, err - } - sticked = append(sticked, f) - } - } - return sticked, nil -} - -func stick(f string) error { - st, err := os.Stat(f) - if err != nil { - return err - } - m := st.Mode() - m |= os.ModeSticky - return os.Chmod(f, m) -} - -var ( - rootlessConfigHomeDirError error - rootlessConfigHomeDirOnce sync.Once - rootlessConfigHomeDir string - rootlessRuntimeDirOnce sync.Once - rootlessRuntimeDir string -) - -// isWriteableOnlyByOwner checks that the specified permission mask allows write -// access only to the owner. -func isWriteableOnlyByOwner(perm os.FileMode) bool { - return (perm & 0o722) == 0o700 -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - rootlessConfigHomeDirOnce.Do(func() { - cfgHomeDir := os.Getenv("XDG_CONFIG_HOME") - if cfgHomeDir == "" { - home := Get() - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err) - return - } - tmpDir := filepath.Join(resolvedHome, ".config") - _ = os.MkdirAll(tmpDir, 0o700) - st, err := os.Stat(tmpDir) - if err != nil { - rootlessConfigHomeDirError = err - return - } else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() { - cfgHomeDir = tmpDir - } else { - rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir) - return - } - } - rootlessConfigHomeDir = cfgHomeDir - }) - - return rootlessConfigHomeDir, rootlessConfigHomeDirError -} - -// GetRuntimeDir returns a directory suitable to store runtime files. -// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable -// directory for the current user. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - var rootlessRuntimeDirError error - - rootlessRuntimeDirOnce.Do(func() { - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - - if runtimeDir != "" { - rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir) - return - } - - uid := strconv.Itoa(unshare.GetRootlessUID()) - if runtimeDir == "" { - tmpDir := filepath.Join("/run", "user", uid) - if err := os.MkdirAll(tmpDir, 0o700); err != nil { - logrus.Debug(err) - } - st, err := os.Lstat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("storage-run-%s", uid)) - if err := os.MkdirAll(tmpDir, 0o700); err != nil { - logrus.Debug(err) - } - st, err := os.Lstat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { - runtimeDir = tmpDir - } else { - rootlessRuntimeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir) - return - } - } - rootlessRuntimeDir = runtimeDir - }) - - return rootlessRuntimeDir, rootlessRuntimeDirError -} diff --git a/vendor/go.podman.io/storage/pkg/homedir/homedir_windows.go b/vendor/go.podman.io/storage/pkg/homedir/homedir_windows.go deleted file mode 100644 index a76610f90..000000000 --- a/vendor/go.podman.io/storage/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -package homedir - -// Copyright 2013-2018 Docker, Inc. -// NOTE: this package has originally been copied from github.com/docker/docker. - -import ( - "os" - "path/filepath" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home != "" { - return home - } - home, _ = os.UserHomeDir() - return home -} - -// GetConfigHome returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func GetConfigHome() (string, error) { - return filepath.Join(Get(), ".config"), nil -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} - -// StickRuntimeDirContents is a no-op on Windows -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, nil -} - -// GetRuntimeDir returns a directory suitable to store runtime files. -// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable -// directory for the current user. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - data, err := GetDataHome() - if err != nil { - return "", err - } - runtimeDir := filepath.Join(data, "containers", "storage") - return runtimeDir, nil -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools.go b/vendor/go.podman.io/storage/pkg/idtools/idtools.go deleted file mode 100644 index 6fcba9b33..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools.go +++ /dev/null @@ -1,620 +0,0 @@ -package idtools - -import ( - "bufio" - "errors" - "fmt" - "io/fs" - "os" - "os/user" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "syscall" - - "github.com/sirupsen/logrus" - "go.podman.io/storage/pkg/system" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" - ContainersOverrideXattr = "user.containers.override_stat" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -// Deprecated: Use MkdirAllAndChown -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -// Deprecated: Use MkdirAndChown with a IDPair -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, true, true) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, false, true) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, true, false) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - var err error - if len(uidMap) == 1 && uidMap[0].Size == 1 { - uid = uidMap[0].HostID - } else { - uid, err = RawToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - } - if len(gidMap) == 1 && gidMap[0].Size == 1 { - gid = gidMap[0].HostID - } else { - gid, err = RawToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - } - return uid, gid, nil -} - -// RawToContainer takes an id mapping, and uses it to translate a host ID to -// the remapped ID. If no map is provided, then the translation assumes a -// 1-to-1 mapping and returns the passed in id. -// -// If you wish to map a (uid,gid) combination you should use the corresponding -// IDMappings methods, which ensure that you are mapping the correct ID against -// the correct mapping. -func RawToContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) -} - -// RawToHost takes an id mapping and a remapped ID, and translates the ID to -// the mapped host ID. If no map is provided, then the translation assumes a -// 1-to-1 mapping and returns the passed in id. -// -// If you wish to map a (uid,gid) combination you should use the corresponding -// IDMappings methods, which ensure that you are mapping the correct ID against -// the correct mapping. -func RawToHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) -} - -// IDPair is a UID and GID pair -type IDPair struct { - UID int - GID int -} - -// IDMappings contains a mappings of UIDs and GIDs -type IDMappings struct { - uids []IDMap - gids []IDMap -} - -// NewIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func NewIDMappings(username, groupname string) (*IDMappings, error) { - subuidRanges, err := readSubuid(username) - if err != nil { - return nil, err - } - subgidRanges, err := readSubgid(groupname) - if err != nil { - return nil, err - } - if len(subuidRanges) == 0 { - return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName) - } - if len(subgidRanges) == 0 { - return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName) - } - - return &IDMappings{ - uids: createIDMap(subuidRanges), - gids: createIDMap(subgidRanges), - }, nil -} - -// NewIDMappingsFromMaps creates a new mapping from two slices -// Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { - return &IDMappings{uids: uids, gids: gids} -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i *IDMappings) RootPair() IDPair { - uid, gid, _ := GetRootUIDGID(i.uids, i.gids) - return IDPair{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { - var err error - var target IDPair - - target.UID, err = RawToHost(pair.UID, i.uids) - if err != nil { - return target, err - } - - target.GID, err = RawToHost(pair.GID, i.gids) - return target, err -} - -var ( - overflowUIDOnce sync.Once - overflowGIDOnce sync.Once - overflowUID int - overflowGID int -) - -// getOverflowUID returns the UID mapped to the overflow user -func getOverflowUID() int { - overflowUIDOnce.Do(func() { - // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present - overflowUID = 65534 - if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { - if tmp, err := strconv.Atoi(string(content)); err == nil { - overflowUID = tmp - } - } - }) - return overflowUID -} - -// getOverflowGID returns the GID mapped to the overflow user -func getOverflowGID() int { - overflowGIDOnce.Do(func() { - // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present - overflowGID = 65534 - if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { - if tmp, err := strconv.Atoi(string(content)); err == nil { - overflowGID = tmp - } - } - }) - return overflowGID -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -// If the mapping is not possible because the target ID is not mapped into -// the namespace, then the overflow ID is used. -func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = RawToHost(pair.UID, i.uids) - if err != nil { - target.UID = getOverflowUID() - logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) - } - } - - if pair.GID != target.GID { - target.GID, err = RawToHost(pair.GID, i.gids) - if err != nil { - target.GID = getOverflowGID() - logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) - } - } - return target, nil -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := RawToContainer(pair.UID, i.uids) - if err != nil { - return -1, -1, err - } - gid, err := RawToContainer(pair.GID, i.gids) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i *IDMappings) Empty() bool { - return len(i.uids) == 0 && len(i.gids) == 0 -} - -// UIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) UIDs() []IDMap { - return i.uids -} - -// GIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) GIDs() []IDMap { - return i.gids -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var ( - rangeList ranges - uidstr string - ) - if u, err := user.Lookup(username); err == nil { - uidstr = u.Uid - } - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} - -func checkChownErr(err error, name string, uid, gid int) error { - var e *os.PathError - if errors.As(err, &e) && e.Err == syscall.EINVAL { - return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err) - } - return err -} - -// Stat contains file states that can be overridden with ContainersOverrideXattr. -type Stat struct { - IDs IDPair - Mode os.FileMode - Major int - Minor int -} - -// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string -// that can be used as the value for the ContainersOverrideXattr xattr. -func FormatContainersOverrideXattr(uid, gid, mode int) string { - return FormatContainersOverrideXattrDevice(uid, gid, fs.FileMode(mode), 0, 0) -} - -// FormatContainersOverrideXattrDevice will format the given uid, gid, and mode into a string -// that can be used as the value for the ContainersOverrideXattr xattr. For devices, it also -// needs the major and minor numbers. -func FormatContainersOverrideXattrDevice(uid, gid int, mode fs.FileMode, major, minor int) string { - typ := "" - switch mode & os.ModeType { - case os.ModeDir: - typ = "dir" - case os.ModeSymlink: - typ = "symlink" - case os.ModeNamedPipe: - typ = "pipe" - case os.ModeSocket: - typ = "socket" - case os.ModeDevice: - typ = fmt.Sprintf("block-%d-%d", major, minor) - case os.ModeDevice | os.ModeCharDevice: - typ = fmt.Sprintf("char-%d-%d", major, minor) - default: - typ = "file" - } - unixMode := mode & os.ModePerm - if mode&os.ModeSetuid != 0 { - unixMode |= 0o4000 - } - if mode&os.ModeSetgid != 0 { - unixMode |= 0o2000 - } - if mode&os.ModeSticky != 0 { - unixMode |= 0o1000 - } - return fmt.Sprintf("%d:%d:%04o:%s", uid, gid, unixMode, typ) -} - -// GetContainersOverrideXattr will get and decode ContainersOverrideXattr. -func GetContainersOverrideXattr(path string) (Stat, error) { - xstat, err := system.Lgetxattr(path, ContainersOverrideXattr) - if err != nil { - return Stat{}, err - } - return parseOverrideXattr(xstat) // This will fail if (xstat, err) == (nil, nil), i.e. the xattr does not exist. -} - -func parseOverrideXattr(xstat []byte) (Stat, error) { - var stat Stat - attrs := strings.Split(string(xstat), ":") - if len(attrs) < 3 { - return stat, fmt.Errorf("the number of parts in %s is less than 3", - ContainersOverrideXattr) - } - - value, err := strconv.ParseUint(attrs[0], 10, 32) - if err != nil { - return stat, fmt.Errorf("failed to parse UID: %w", err) - } - stat.IDs.UID = int(value) - - value, err = strconv.ParseUint(attrs[1], 10, 32) - if err != nil { - return stat, fmt.Errorf("failed to parse GID: %w", err) - } - stat.IDs.GID = int(value) - - value, err = strconv.ParseUint(attrs[2], 8, 32) - if err != nil { - return stat, fmt.Errorf("failed to parse mode: %w", err) - } - stat.Mode = os.FileMode(value) & os.ModePerm - if value&0o1000 != 0 { - stat.Mode |= os.ModeSticky - } - if value&0o2000 != 0 { - stat.Mode |= os.ModeSetgid - } - if value&0o4000 != 0 { - stat.Mode |= os.ModeSetuid - } - - if len(attrs) > 3 { - typ := attrs[3] - if strings.HasPrefix(typ, "file") { - } else if strings.HasPrefix(typ, "dir") { - stat.Mode |= os.ModeDir - } else if strings.HasPrefix(typ, "symlink") { - stat.Mode |= os.ModeSymlink - } else if strings.HasPrefix(typ, "pipe") { - stat.Mode |= os.ModeNamedPipe - } else if strings.HasPrefix(typ, "socket") { - stat.Mode |= os.ModeSocket - } else if strings.HasPrefix(typ, "block") { - stat.Mode |= os.ModeDevice - stat.Major, stat.Minor, err = parseDevice(typ) - if err != nil { - return stat, err - } - } else if strings.HasPrefix(typ, "char") { - stat.Mode |= os.ModeDevice | os.ModeCharDevice - stat.Major, stat.Minor, err = parseDevice(typ) - if err != nil { - return stat, err - } - } else { - return stat, fmt.Errorf("invalid file type %s", typ) - } - } - return stat, nil -} - -func parseDevice(typ string) (int, int, error) { - parts := strings.Split(typ, "-") - // If there are more than 3 parts, just ignore them to be forward compatible - if len(parts) < 3 { - return 0, 0, fmt.Errorf("invalid device type %s", typ) - } - if parts[0] != "block" && parts[0] != "char" { - return 0, 0, fmt.Errorf("invalid device type %s", typ) - } - major, err := strconv.Atoi(parts[1]) - if err != nil { - return 0, 0, fmt.Errorf("failed to parse major number: %w", err) - } - minor, err := strconv.Atoi(parts[2]) - if err != nil { - return 0, 0, fmt.Errorf("failed to parse minor number: %w", err) - } - return major, minor, nil -} - -// SetContainersOverrideXattr will encode and set ContainersOverrideXattr. -func SetContainersOverrideXattr(path string, stat Stat) error { - value := FormatContainersOverrideXattrDevice(stat.IDs.UID, stat.IDs.GID, stat.Mode, stat.Major, stat.Minor) - return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0) -} - -func SafeChown(name string, uid, gid int) error { - if runtime.GOOS == "darwin" { - stat := Stat{ - Mode: os.FileMode(0o0700), - } - xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) - if err == nil && xstat != nil { - stat, err = parseOverrideXattr(xstat) - if err != nil { - return err - } - } else { - st, err := os.Stat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode. - if err != nil { - return err - } - stat.Mode = st.Mode() - } - stat.IDs = IDPair{UID: uid, GID: gid} - if err = SetContainersOverrideXattr(name, stat); err != nil { - return err - } - uid = os.Getuid() - gid = os.Getgid() - } - if stat, statErr := system.Stat(name); statErr == nil { - if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { - return nil - } - } - return checkChownErr(os.Chown(name, uid, gid), name, uid, gid) -} - -func SafeLchown(name string, uid, gid int) error { - if runtime.GOOS == "darwin" { - stat := Stat{ - Mode: os.FileMode(0o0700), - } - xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) - if err == nil && xstat != nil { - stat, err = parseOverrideXattr(xstat) - if err != nil { - return err - } - } else { - st, err := os.Lstat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode. - if err != nil { - return err - } - stat.Mode = st.Mode() - } - stat.IDs = IDPair{UID: uid, GID: gid} - if err = SetContainersOverrideXattr(name, stat); err != nil { - return err - } - uid = os.Getuid() - gid = os.Getgid() - } - if stat, statErr := system.Lstat(name); statErr == nil { - if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { - return nil - } - } - return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid) -} - -type sortByHostID []IDMap - -func (e sortByHostID) Len() int { return len(e) } -func (e sortByHostID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e sortByHostID) Less(i, j int) bool { return e[i].HostID < e[j].HostID } - -type sortByContainerID []IDMap - -func (e sortByContainerID) Len() int { return len(e) } -func (e sortByContainerID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e sortByContainerID) Less(i, j int) bool { return e[i].ContainerID < e[j].ContainerID } - -// IsContiguous checks if the specified mapping is contiguous and doesn't -// have any hole. -func IsContiguous(mappings []IDMap) bool { - if len(mappings) < 2 { - return true - } - - var mh sortByHostID = mappings[:] - sort.Sort(mh) - for i := 1; i < len(mh); i++ { - if mh[i].HostID != mh[i-1].HostID+mh[i-1].Size { - return false - } - } - - var mc sortByContainerID = mappings[:] - sort.Sort(mc) - for i := 1; i < len(mc); i++ { - if mc[i].ContainerID != mc[i-1].ContainerID+mc[i-1].Size { - return false - } - } - return true -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go b/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go deleted file mode 100644 index 8a3076a0f..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools_supported.go +++ /dev/null @@ -1,97 +0,0 @@ -//go:build linux && cgo && libsubid - -package idtools - -import ( - "errors" - "os/user" - "sync" - "unsafe" -) - -/* -#cgo LDFLAGS: -l subid -#include -#include -#include - -struct subid_range get_range(struct subid_range *ranges, int i) -{ - return ranges[i]; -} - -// helper for stderr to avoid referencing C.stderr from Go code, -// which breaks cgo on musl due to stderr being declared as FILE *const -static FILE *subid_stderr(void) { - return stderr; -} - -#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) -# define subid_init libsubid_init -# define subid_get_uid_ranges get_subuid_ranges -# define subid_get_gid_ranges get_subgid_ranges -#endif - -*/ -import "C" - -var onceInit sync.Once - -func readSubid(username string, isUser bool) (ranges, error) { - var ret ranges - uidstr := "" - - if username == "ALL" { - return nil, errors.New("username ALL not supported") - } - - if u, err := user.Lookup(username); err == nil { - uidstr = u.Uid - } - - onceInit.Do(func() { - C.subid_init(C.CString("storage"), C.subid_stderr()) - }) - - cUsername := C.CString(username) - defer C.free(unsafe.Pointer(cUsername)) - - cuidstr := C.CString(uidstr) - defer C.free(unsafe.Pointer(cuidstr)) - - var nRanges C.int - var cRanges *C.struct_subid_range - if isUser { - nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) - if nRanges <= 0 { - nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges) - } - } else { - nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) - if nRanges <= 0 { - nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges) - } - } - if nRanges < 0 { - return nil, errors.New("cannot read subids") - } - defer C.free(unsafe.Pointer(cRanges)) - - for i := 0; i < int(nRanges); i++ { - r := C.get_range(cRanges, C.int(i)) - newRange := subIDRange{ - Start: int(r.start), - Length: int(r.count), - } - ret = append(ret, newRange) - } - return ret, nil -} - -func readSubuid(username string) (ranges, error) { - return readSubid(username, true) -} - -func readSubgid(username string) (ranges, error) { - return readSubid(username, false) -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go b/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go deleted file mode 100644 index e7c264367..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,212 +0,0 @@ -//go:build !windows - -package idtools - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sync" - "syscall" - - "github.com/moby/sys/user" - "go.podman.io/storage/pkg/fileutils" - "go.podman.io/storage/pkg/system" -) - -var ( - entOnce sync.Once - getentCmd string -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - st, err := os.Stat(path) - if err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil { - if !st.IsDir() { - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - if chownExisting { - // short-circuit--we were called with an existing directory and chown was requested - return SafeChown(path, ownerUID, ownerGID) - } - // nothing to do; directory exists and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - if !filepath.IsAbs(dirPath) { - return fmt.Errorf("path: %s should be absolute", dirPath) - } - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if err := fileutils.Exists(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := os.MkdirAll(path, mode); err != nil { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair IDPair) bool { - statInfo, err := system.Stat(path) - if err != nil { - return false - } - fileMode := os.FileMode(statInfo.Mode()) - permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(pair.UID), - statInfo.GID() == uint32(pair.GID), permBits) -} - -func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0o100 == 0o100) { - return true - } - if isGroup && (perms&0o010 == 0o010) { - return true - } - if perms&0o001 == 0o001 { - return true - } - return false -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(username string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(username) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(username) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%d", uid)) -} - -func getentUser(key string) (user.User, error) { - reader, err := callGetent("passwd", key) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", key) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(groupname string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(groupname) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(groupname) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%d", gid)) -} - -func getentGroup(key string) (user.Group, error) { - reader, err := callGetent("group", key) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", key) - } - return groups[0], nil -} - -func callGetent(db, key string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("") - } - out, err := execCmd(getentCmd, db, key) - if err != nil { - exitCode, errC := system.GetExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, db) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - - } - return bytes.NewReader(out), nil -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go b/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go deleted file mode 100644 index e6f5c1ba6..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !linux || !libsubid || !cgo - -package idtools - -func readSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func readSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools_windows.go b/vendor/go.podman.io/storage/pkg/idtools/idtools_windows.go deleted file mode 100644 index ec6a3a046..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build windows - -package idtools - -import ( - "os" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := os.MkdirAll(path, mode); err != nil { - return err - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, pair IDPair) bool { - return true -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/parser.go b/vendor/go.podman.io/storage/pkg/idtools/parser.go deleted file mode 100644 index 042d0ea95..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/parser.go +++ /dev/null @@ -1,59 +0,0 @@ -package idtools - -import ( - "fmt" - "math" - "math/bits" - "strconv" - "strings" -) - -func parseTriple(spec []string) (container, host, size uint32, err error) { - cid, err := strconv.ParseUint(spec[0], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err) - } - hid, err := strconv.ParseUint(spec[1], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err) - } - sz, err := strconv.ParseUint(spec[2], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err) - } - return uint32(cid), uint32(hid), uint32(sz), nil -} - -// ParseIDMap parses idmap triples from string. -func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { - stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) - for _, idMapSpec := range mapSpec { - if idMapSpec == "" { - continue - } - idSpec := strings.Split(idMapSpec, ":") - if len(idSpec)%3 != 0 { - return nil, stdErr - } - for i := range idSpec { - if i%3 != 0 { - continue - } - cid, hid, size, err := parseTriple(idSpec[i : i+3]) - if err != nil { - return nil, stdErr - } - // Avoid possible integer overflow on 32bit builds - if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) { - return nil, stdErr - } - mapping := IDMap{ - ContainerID: int(cid), - HostID: int(hid), - Size: int(size), - } - idmap = append(idmap, mapping) - } - } - return idmap, nil -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go b/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index ee80ce6a7..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,158 +0,0 @@ -package idtools - -import ( - "fmt" - "slices" - "sort" - "strconv" - "strings" - "sync" - - "go.podman.io/storage/pkg/regexp" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand []string // command, args…, to be finished by adding an user name - - idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`) - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 -) - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("adding user %q: %w", name, err) - } - - // Query the system for the created uid and gid pair - out, err := execCmd("id", name) - if err != nil { - return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err) - } - return uid, gid, nil -} - -func addUser(userName string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = []string{"adduser", "--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group"} - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = []string{"useradd", "-r", "-s", "/bin/false"} - } - }) - if userCommand == nil { - return fmt.Errorf("cannot add user; no useradd/adduser binary found") - } - args := append(slices.Clone(userCommand), userName) - out, err := execCmd(args[0], args[1:]...) - if err != nil { - return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := readSubuid(name) - if err != nil { - return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("can't find available subuid range: %w", err) - } - out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) - if err != nil { - return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err) - } - } - - ranges, err = readSubgid(name) - if err != nil { - return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("can't find available subgid range: %w", err) - } - out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) - if err != nil { - return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := readSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := readSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index e37c4540c..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("no support for adding users or groups on this OS") -} diff --git a/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go b/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go deleted file mode 100644 index 10606fba8..000000000 --- a/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !windows - -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - // only return no error if the final resolved binary basename - // matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -func execCmd(cmd string, args ...string) ([]byte, error) { - execCmd := exec.Command(cmd, args...) - return execCmd.CombinedOutput() -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/buffer.go b/vendor/go.podman.io/storage/pkg/ioutils/buffer.go deleted file mode 100644 index 3d737b3e1..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go b/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go deleted file mode 100644 index 47ab34507..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,184 +0,0 @@ -package ioutils - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := min(b.Cap()*2, maxCap) - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (int, error) { - var n int - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - bp.mu.Unlock() - return 0, bp.closeErr - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return n, nil -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() any { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/fswriters.go b/vendor/go.podman.io/storage/pkg/ioutils/fswriters.go deleted file mode 100644 index fd6addd73..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,284 +0,0 @@ -package ioutils - -import ( - "io" - "os" - "path/filepath" - "time" -) - -// AtomicFileWriterOptions specifies options for creating the atomic file writer. -type AtomicFileWriterOptions struct { - // NoSync specifies whether the sync call must be skipped for the file. - // If NoSync is not specified, the file is synced to the - // storage after it has been written and before it is moved to - // the specified path. - NoSync bool - // On successful return from Close() this is set to the mtime of the - // newly written file. - ModTime time.Time - // Specifies whether Commit() must be explicitly called to write state - // to the destination. This allows an application to preserve the original - // file when an error occurs during processing (and not just during write) - // The default is false, which will auto-commit on Close - ExplicitCommit bool -} - -type CommittableWriter interface { - io.WriteCloser - - // Commit closes the temporary file associated with this writer, and - // provided no errors (during commit or previously during write operations), - // will publish the completed file under the intended destination. - Commit() error -} - -var defaultWriterOptions = AtomicFileWriterOptions{} - -// SetDefaultOptions overrides the default options used when creating an -// atomic file writer. -func SetDefaultOptions(opts AtomicFileWriterOptions) { - defaultWriterOptions = opts -} - -// NewAtomicFileWriterWithOpts returns a CommittableWriter so that writing to it -// writes to a temporary file, which can later be committed to a destination path, -// either by Closing in the case of auto-commit, or manually calling commit if the -// ExplicitCommit option is enabled. Writing and closing concurrently is not -// allowed. -func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (CommittableWriter, error) { - return newAtomicFileWriter(filename, perm, opts) -} - -// newAtomicFileWriter returns a CommittableWriter so that writing to it writes to -// a temporary file, which can later be committed to a destination path, either by -// Closing in the case of auto-commit, or manually calling commit if the -// ExplicitCommit option is enabled. Writing and closing concurrently is not allowed. -func newAtomicFileWriter(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (*atomicFileWriter, error) { - f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - if opts == nil { - opts = &defaultWriterOptions - } - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - noSync: opts.NoSync, - explicitCommit: opts.ExplicitCommit, - }, nil -} - -// NewAtomicFileWriterWithOpts returns a CommittableWriter, with auto-commit enabled. -// Writing to it writes to a temporary file and closing it atomically changes the -// temporary file to destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (CommittableWriter, error) { - return NewAtomicFileWriterWithOpts(filename, perm, nil) -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error { - f, err := newAtomicFileWriter(filename, perm, opts) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - - if opts != nil { - opts.ModTime = f.modTime - } - - return err -} - -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - return AtomicWriteFileWithOpts(filename, data, perm, nil) -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode - noSync bool - modTime time.Time - closed bool - explicitCommit bool -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) closeTempFile() error { - if w.closed { - return nil - } - - w.closed = true - return w.f.Close() -} - -func (w *atomicFileWriter) Close() error { - return w.complete(!w.explicitCommit) -} - -func (w *atomicFileWriter) Commit() error { - return w.complete(true) -} - -func (w *atomicFileWriter) complete(commit bool) (retErr error) { - if w == nil || w.closed { - return nil - } - - defer func() { - err := w.closeTempFile() - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - if retErr == nil { - retErr = err - } - }() - - if commit { - return w.commitState() - } - - return nil -} - -func (w *atomicFileWriter) commitState() error { - // Perform a data only sync (fdatasync()) if supported - if err := w.postDataWrittenSync(); err != nil { - return err - } - - // Capture fstat before closing the fd - info, err := w.f.Stat() - if err != nil { - return err - } - w.modTime = info.ModTime() - - if err := w.f.Chmod(w.perm); err != nil { - return err - } - - // Perform full sync on platforms that need it - if err := w.preRenameSync(); err != nil { - return err - } - - // Some platforms require closing before rename (Windows) - if err := w.closeTempFile(); err != nil { - return err - } - - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := os.MkdirTemp(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - if !defaultWriterOptions.NoSync { - return w.File.Close() - } - err := dataOrFullSync(w.File) - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/fswriters_linux.go b/vendor/go.podman.io/storage/pkg/ioutils/fswriters_linux.go deleted file mode 100644 index 10ed48cfd..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/fswriters_linux.go +++ /dev/null @@ -1,23 +0,0 @@ -package ioutils - -import ( - "os" - - "golang.org/x/sys/unix" -) - -func dataOrFullSync(f *os.File) error { - return unix.Fdatasync(int(f.Fd())) -} - -func (w *atomicFileWriter) postDataWrittenSync() error { - if w.noSync { - return nil - } - return unix.Fdatasync(int(w.f.Fd())) -} - -func (w *atomicFileWriter) preRenameSync() error { - // On Linux data can be reliably flushed to media without metadata, so defer - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/fswriters_other.go b/vendor/go.podman.io/storage/pkg/ioutils/fswriters_other.go deleted file mode 100644 index 2ccdc3108..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/fswriters_other.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !linux - -package ioutils - -import ( - "os" -) - -func dataOrFullSync(f *os.File) error { - return f.Sync() -} - -func (w *atomicFileWriter) postDataWrittenSync() error { - // many platforms (Mac, Windows) require a full sync to reliably flush to media - return nil -} - -func (w *atomicFileWriter) preRenameSync() error { - if w.noSync { - return nil - } - - // fsync() on Non-linux Unix, FlushFileBuffers (Windows), F_FULLFSYNC (Mac) - return w.f.Sync() -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/readers.go b/vendor/go.podman.io/storage/pkg/ioutils/readers.go deleted file mode 100644 index aed1cb033..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/readers.go +++ /dev/null @@ -1,170 +0,0 @@ -package ioutils - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "io" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -type readWriteToCloserWrapper struct { - io.Reader - io.WriterTo - closer func() error -} - -func (r *readWriteToCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - if wt, ok := r.(io.WriterTo); ok { - return &readWriteToCloserWrapper{ - Reader: r, - WriterTo: wt, - closer: closer, - } - } - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return n, err -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/temp_unix.go b/vendor/go.podman.io/storage/pkg/ioutils/temp_unix.go deleted file mode 100644 index 257b064c5..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows - -package ioutils - -import "os" - -// TempDir on Unix systems is equivalent to os.MkdirTemp. -func TempDir(dir, prefix string) (string, error) { - return os.MkdirTemp(dir, prefix) -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/temp_windows.go b/vendor/go.podman.io/storage/pkg/ioutils/temp_windows.go deleted file mode 100644 index b60ad8f49..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build windows - -package ioutils - -import ( - "os" - - "go.podman.io/storage/pkg/longpath" -) - -// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := os.MkdirTemp(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/writeflusher.go b/vendor/go.podman.io/storage/pkg/ioutils/writeflusher.go deleted file mode 100644 index 52a4901ad..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/vendor/go.podman.io/storage/pkg/ioutils/writers.go b/vendor/go.podman.io/storage/pkg/ioutils/writers.go deleted file mode 100644 index 2a8007e44..000000000 --- a/vendor/go.podman.io/storage/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(w io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: w, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (int, error) { - count, err := wc.Writer.Write(p) - wc.Count += int64(count) - return count, err -} diff --git a/vendor/go.podman.io/storage/pkg/lockfile/lastwrite.go b/vendor/go.podman.io/storage/pkg/lockfile/lastwrite.go deleted file mode 100644 index 93fb1fea8..000000000 --- a/vendor/go.podman.io/storage/pkg/lockfile/lastwrite.go +++ /dev/null @@ -1,82 +0,0 @@ -package lockfile - -import ( - "bytes" - cryptorand "crypto/rand" - "encoding/binary" - "os" - "sync/atomic" - "time" -) - -// LastWrite is an opaque identifier of the last write to some *LockFile. -// It can be used by users of a *LockFile to determine if the lock indicates changes -// since the last check. -// -// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back. -type LastWrite struct { - // Never modify fields of a LastWrite object; it has value semantics. - state []byte // Contents of the lock file. -} - -var lastWriterIDCounter uint64 // Private state for newLastWriterID - -const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) -// newLastWrite returns a new "last write" ID. -// The value must be different on every call, and also differ from values -// generated by other processes. -func newLastWrite() LastWrite { - // The ID is (PID, time, per-process counter, random) - // PID + time represents both a unique process across reboots, - // and a specific time within the process; the per-process counter - // is an extra safeguard for in-process concurrency. - // The random part disambiguates across process namespaces - // (where PID values might collide), serves as a general-purpose - // extra safety, _and_ is used to pad the output to lastWriterIDSize, - // because other versions of this code exist and they don't work - // efficiently if the size of the value changes. - pid := os.Getpid() - tm := time.Now().UnixNano() - counter := atomic.AddUint64(&lastWriterIDCounter, 1) - - res := make([]byte, lastWriterIDSize) - binary.LittleEndian.PutUint64(res[0:8], uint64(tm)) - binary.LittleEndian.PutUint64(res[8:16], counter) - binary.LittleEndian.PutUint32(res[16:20], uint32(pid)) - if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 { - panic(err) // This shouldn't happen - } - - return LastWrite{ - state: res, - } -} - -// serialize returns bytes to write to the lock file to represent the specified write. -func (lw LastWrite) serialize() []byte { - if lw.state == nil { - panic("LastWrite.serialize on an uninitialized object") - } - return lw.state -} - -// Equals returns true if lw matches other -func (lw LastWrite) equals(other LastWrite) bool { - if lw.state == nil { - panic("LastWrite.equals on an uninitialized object") - } - if other.state == nil { - panic("LastWrite.equals with an uninitialized counterparty") - } - return bytes.Equal(lw.state, other.state) -} - -// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize -func newLastWriteFromData(serialized []byte) LastWrite { - if serialized == nil { - panic("newLastWriteFromData with nil data") - } - return LastWrite{ - state: serialized, - } -} diff --git a/vendor/go.podman.io/storage/pkg/lockfile/lockfile.go b/vendor/go.podman.io/storage/pkg/lockfile/lockfile.go deleted file mode 100644 index 3a8a4bc39..000000000 --- a/vendor/go.podman.io/storage/pkg/lockfile/lockfile.go +++ /dev/null @@ -1,450 +0,0 @@ -package lockfile - -import ( - "fmt" - "os" - "path/filepath" - "sync" - "time" - - "go.podman.io/storage/internal/rawfilelock" -) - -// A Locker represents a file lock where the file is used to cache an -// identifier of the last party that made changes to whatever's being protected -// by the lock. -// -// Deprecated: Refer directly to *LockFile, the provided implementation, instead. -type Locker interface { - // Acquire a writer lock. - // The default unix implementation panics if: - // - opening the lockfile failed - // - tried to lock a read-only lock-file - Lock() - - // Unlock the lock. - // The default unix implementation panics if: - // - unlocking an unlocked lock - // - if the lock counter is corrupted - Unlock() - - // Acquire a reader lock. - RLock() - - // Touch records, for others sharing the lock, that the caller was the - // last writer. It should only be called with the lock held. - // - // Deprecated: Use *LockFile.RecordWrite. - Touch() error - - // Modified() checks if the most recent writer was a party other than the - // last recorded writer. It should only be called with the lock held. - // Deprecated: Use *LockFile.ModifiedSince. - Modified() (bool, error) - - // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. - TouchedSince(when time.Time) bool - - // IsReadWrite() checks if the lock file is read-write - IsReadWrite() bool - - // AssertLocked() can be used by callers that _know_ that they hold the lock (for reading or writing), for sanity checking. - // It might do nothing at all, or it may panic if the caller is not the owner of this lock. - AssertLocked() - - // AssertLockedForWriting() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking. - // It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing. - AssertLockedForWriting() -} - -// LockFile represents a file lock where the file is used to cache an -// identifier of the last party that made changes to whatever's being protected -// by the lock. -// -// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. -type LockFile struct { - // The following fields are only set when constructing *LockFile, and must never be modified afterwards. - // They are safe to access without any other locking. - file string - ro bool - - // rwMutex serializes concurrent reader-writer acquisitions in the same process space - rwMutex *sync.RWMutex - // stateMutex is used to synchronize concurrent accesses to the state below - stateMutex *sync.Mutex - counter int64 - lw LastWrite // A global value valid as of the last .Touch() or .Modified() - lockType rawfilelock.LockType - locked bool - // The following fields are only modified on transitions between counter == 0 / counter != 0. - // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. - // In other cases, they need to be protected using stateMutex. - fd rawfilelock.FileHandle -} - -var ( - lockFiles map[string]*LockFile - lockFilesLock sync.Mutex -) - -// GetLockFile opens a read-write lock file, creating it if necessary. The -// *LockFile object may already be locked if the path has already been requested -// by the current process. -func GetLockFile(path string) (*LockFile, error) { - return getLockfile(path, false) -} - -// GetLockfile opens a read-write lock file, creating it if necessary. The -// Locker object may already be locked if the path has already been requested -// by the current process. -// -// Deprecated: Use GetLockFile -func GetLockfile(path string) (Locker, error) { - return GetLockFile(path) -} - -// GetROLockFile opens a read-only lock file, creating it if necessary. The -// *LockFile object may already be locked if the path has already been requested -// by the current process. -func GetROLockFile(path string) (*LockFile, error) { - return getLockfile(path, true) -} - -// GetROLockfile opens a read-only lock file, creating it if necessary. The -// Locker object may already be locked if the path has already been requested -// by the current process. -// -// Deprecated: Use GetROLockFile -func GetROLockfile(path string) (Locker, error) { - return GetROLockFile(path) -} - -// Lock locks the lockfile as a writer. Panic if the lock is a read-only one. -func (l *LockFile) Lock() { - if l.ro { - panic("can't take write lock on read-only lock file") - } - l.lock(rawfilelock.WriteLock) -} - -// RLock locks the lockfile as a reader. -func (l *LockFile) RLock() { - l.lock(rawfilelock.ReadLock) -} - -// TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one. -func (l *LockFile) TryLock() error { - if l.ro { - panic("can't take write lock on read-only lock file") - } - return l.tryLock(rawfilelock.WriteLock) -} - -// TryRLock attempts to lock the lockfile as a reader. -func (l *LockFile) TryRLock() error { - return l.tryLock(rawfilelock.ReadLock) -} - -// Unlock unlocks the lockfile. -func (l *LockFile) Unlock() { - l.stateMutex.Lock() - if !l.locked { - // Panic when unlocking an unlocked lock. That's a violation - // of the lock semantics and will reveal such. - panic("calling Unlock on unlocked lock") - } - l.counter-- - if l.counter < 0 { - // Panic when the counter is negative. There is no way we can - // recover from a corrupted lock and we need to protect the - // storage from corruption. - panic(fmt.Sprintf("lock %q has been unlocked too often", l.file)) - } - if l.counter == 0 { - // We should only release the lock when the counter is 0 to - // avoid releasing read-locks too early; a given process may - // acquire a read lock multiple times. - l.locked = false - // Close the file descriptor on the last unlock, releasing the - // file lock. - rawfilelock.UnlockAndCloseHandle(l.fd) - } - if l.lockType == rawfilelock.ReadLock { - l.rwMutex.RUnlock() - } else { - l.rwMutex.Unlock() - } - l.stateMutex.Unlock() -} - -func (l *LockFile) AssertLocked() { - // DO NOT provide a variant that returns the value of l.locked. - // - // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and - // we can’t tell the difference. - // - // Hence, this “AssertLocked” method, which exists only for sanity checks. - - // Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true - // with no possible writers. - // If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data - // without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers. - if !l.locked { - panic("internal error: lock is not held by the expected owner") - } -} - -func (l *LockFile) AssertLockedForWriting() { - // DO NOT provide a variant that returns the current lock state. - // - // The same caveats as for AssertLocked apply equally. - - l.AssertLocked() - // Like AssertLocked, don’t even bother with l.stateMutex. - if l.lockType == rawfilelock.ReadLock { - panic("internal error: lock is not held for writing") - } -} - -// ModifiedSince checks if the lock has been changed since a provided LastWrite value, -// and returns the one to record instead. -// -// If ModifiedSince reports no modification, the previous LastWrite value -// is still valid and can continue to be used. -// -// If this function fails, the LastWriter value of the lock is indeterminate; -// the caller should fail and keep using the previously-recorded LastWrite value, -// so that it continues failing until the situation is resolved. Similarly, -// it should only update the recorded LastWrite value after processing the update: -// -// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) -// if err != nil { /* fail */ } -// state.lastWrite = lw2 -// if modified { -// if err := reload(); err != nil { /* fail */ } -// state.lastWrite = lw2 -// } -// -// The caller must hold the lock (for reading or writing). -func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { - l.AssertLocked() - currentLW, err := l.GetLastWrite() - if err != nil { - return LastWrite{}, false, err - } - modified := !previous.equals(currentLW) - return currentLW, modified, nil -} - -// Modified indicates if the lockfile has been updated since the last time it -// was loaded. -// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile. -// Callers cannot, in general, rely on this, because that might have happened for some other -// owner of the same *LockFile who created it previously. -// -// Deprecated: Use *LockFile.ModifiedSince. -func (l *LockFile) Modified() (bool, error) { - l.stateMutex.Lock() - if !l.locked { - panic("attempted to check last-writer in lockfile without locking it first") - } - defer l.stateMutex.Unlock() - oldLW := l.lw - // Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it. - currentLW, modified, err := l.ModifiedSince(oldLW) - if err != nil { - return true, err - } - l.lw = currentLW - return modified, nil -} - -// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data. -// -// Deprecated: Use *LockFile.RecordWrite. -func (l *LockFile) Touch() error { - lw, err := l.RecordWrite() - if err != nil { - return err - } - l.stateMutex.Lock() - if !l.locked || (l.lockType == rawfilelock.ReadLock) { - panic("attempted to update last-writer in lockfile without the write lock") - } - defer l.stateMutex.Unlock() - l.lw = lw - return nil -} - -// IsReadWrite indicates if the lock file is a read-write lock. -func (l *LockFile) IsReadWrite() bool { - return !l.ro -} - -// getLockFile returns a *LockFile object, possibly (depending on the platform) -// working inter-process, and associated with the specified path. -// -// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the -// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. -// -// WARNING: -// - The lock may or MAY NOT be inter-process. -// - There may or MAY NOT be an actual object on the filesystem created for the specified path. -// - Even if ro, the lock MAY be exclusive. -func getLockfile(path string, ro bool) (*LockFile, error) { - lockFilesLock.Lock() - defer lockFilesLock.Unlock() - if lockFiles == nil { - lockFiles = make(map[string]*LockFile) - } - cleanPath, err := filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err) - } - if lockFile, ok := lockFiles[cleanPath]; ok { - if ro && lockFile.IsReadWrite() { - return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath) - } - if !ro && !lockFile.IsReadWrite() { - return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath) - } - return lockFile, nil - } - lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile - if err != nil { - return nil, err - } - lockFiles[cleanPath] = lockFile - return lockFile, nil -} - -// openLock opens a lock file at the specified path, creating the parent directory if it does not exist. -func openLock(path string, readOnly bool) (rawfilelock.FileHandle, error) { - fd, err := rawfilelock.OpenLock(path, readOnly) - if err == nil { - return fd, nil - } - - // the directory of the lockfile seems to be removed, try to create it - if os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { - return fd, fmt.Errorf("creating lock file directory: %w", err) - } - - return openLock(path, readOnly) - } - return fd, &os.PathError{Op: "open", Path: path, Err: err} -} - -// createLockFileForPath returns new *LockFile object, possibly (depending on the platform) -// working inter-process and associated with the specified path. -// -// This function will be called at most once for each path value within a single process. -// -// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the -// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. -// -// WARNING: -// - The lock may or MAY NOT be inter-process. -// - There may or MAY NOT be an actual object on the filesystem created for the specified path. -// - Even if ro, the lock MAY be exclusive. -func createLockFileForPath(path string, ro bool) (*LockFile, error) { - // Check if we can open the lock. - fd, err := openLock(path, ro) - if err != nil { - return nil, err - } - rawfilelock.UnlockAndCloseHandle(fd) - - lType := rawfilelock.WriteLock - if ro { - lType = rawfilelock.ReadLock - } - - return &LockFile{ - file: path, - ro: ro, - - rwMutex: &sync.RWMutex{}, - stateMutex: &sync.Mutex{}, - lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change. - lockType: lType, - locked: false, - }, nil -} - -// lock locks the lockfile via syscall based on the specified type and -// command. -func (l *LockFile) lock(lType rawfilelock.LockType) { - if lType == rawfilelock.ReadLock { - l.rwMutex.RLock() - } else { - l.rwMutex.Lock() - } - l.stateMutex.Lock() - defer l.stateMutex.Unlock() - if l.counter == 0 { - // If we're the first reference on the lock, we need to open the file again. - fd, err := openLock(l.file, l.ro) - if err != nil { - panic(err) - } - l.fd = fd - - // Optimization: only use the (expensive) syscall when - // the counter is 0. In this case, we're either the first - // reader lock or a writer lock. - if err := rawfilelock.LockFile(l.fd, lType); err != nil { - panic(err) - } - } - l.lockType = lType - l.locked = true - l.counter++ -} - -// lock locks the lockfile via syscall based on the specified type and -// command. -func (l *LockFile) tryLock(lType rawfilelock.LockType) error { - var success bool - var rwMutexUnlocker func() - if lType == rawfilelock.ReadLock { - success = l.rwMutex.TryRLock() - rwMutexUnlocker = l.rwMutex.RUnlock - } else { - success = l.rwMutex.TryLock() - rwMutexUnlocker = l.rwMutex.Unlock - } - if !success { - return fmt.Errorf("resource temporarily unavailable") - } - if !l.stateMutex.TryLock() { - rwMutexUnlocker() - return fmt.Errorf("resource temporarily unavailable") - } - defer l.stateMutex.Unlock() - if l.counter == 0 { - // If we're the first reference on the lock, we need to open the file again. - fd, err := openLock(l.file, l.ro) - if err != nil { - rwMutexUnlocker() - return err - } - l.fd = fd - - // Optimization: only use the (expensive) syscall when - // the counter is 0. In this case, we're either the first - // reader lock or a writer lock. - if err = rawfilelock.TryLockFile(l.fd, lType); err != nil { - rawfilelock.CloseHandle(fd) - rwMutexUnlocker() - return err - } - } - l.lockType = lType - l.locked = true - l.counter++ - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/lockfile/lockfile_unix.go b/vendor/go.podman.io/storage/pkg/lockfile/lockfile_unix.go deleted file mode 100644 index 780ad8aef..000000000 --- a/vendor/go.podman.io/storage/pkg/lockfile/lockfile_unix.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build !windows - -package lockfile - -import ( - "time" - - "go.podman.io/storage/pkg/system" - "golang.org/x/sys/unix" -) - -// GetLastWrite returns a LastWrite value corresponding to current state of the lock. -// This is typically called before (_not after_) loading the state when initializing a consumer -// of the data protected by the lock. -// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. -// -// The caller must hold the lock (for reading or writing). -func (l *LockFile) GetLastWrite() (LastWrite, error) { - l.AssertLocked() - contents := make([]byte, lastWriterIDSize) - n, err := unix.Pread(int(l.fd), contents, 0) - if err != nil { - return LastWrite{}, err - } - // It is important to handle the partial read case, because - // the initial size of the lock file is zero, which is a valid - // state (no writes yet) - contents = contents[:n] - return newLastWriteFromData(contents), nil -} - -// RecordWrite updates the lock with a new LastWrite value, and returns the new value. -// -// If this function fails, the LastWriter value of the lock is indeterminate; -// the caller should keep using the previously-recorded LastWrite value, -// and possibly detecting its own modification as an external one: -// -// lw, err := state.lock.RecordWrite() -// if err != nil { /* fail */ } -// state.lastWrite = lw -// -// The caller must hold the lock for writing. -func (l *LockFile) RecordWrite() (LastWrite, error) { - l.AssertLockedForWriting() - lw := newLastWrite() - lockContents := lw.serialize() - n, err := unix.Pwrite(int(l.fd), lockContents, 0) - if err != nil { - return LastWrite{}, err - } - if n != len(lockContents) { - return LastWrite{}, unix.ENOSPC - } - return lw, nil -} - -// TouchedSince indicates if the lock file has been touched since the specified time -func (l *LockFile) TouchedSince(when time.Time) bool { - st, err := system.Fstat(int(l.fd)) - if err != nil { - return true - } - mtim := st.Mtim() - touched := time.Unix(mtim.Unix()) - return when.Before(touched) -} diff --git a/vendor/go.podman.io/storage/pkg/lockfile/lockfile_windows.go b/vendor/go.podman.io/storage/pkg/lockfile/lockfile_windows.go deleted file mode 100644 index e66f7bfbb..000000000 --- a/vendor/go.podman.io/storage/pkg/lockfile/lockfile_windows.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build windows - -package lockfile - -import ( - "os" - "time" - - "golang.org/x/sys/windows" -) - -const ( - reserved = 0 - allBytes = ^uint32(0) -) - -// GetLastWrite returns a LastWrite value corresponding to current state of the lock. -// This is typically called before (_not after_) loading the state when initializing a consumer -// of the data protected by the lock. -// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. -// -// The caller must hold the lock (for reading or writing) before this function is called. -func (l *LockFile) GetLastWrite() (LastWrite, error) { - l.AssertLocked() - contents := make([]byte, lastWriterIDSize) - ol := new(windows.Overlapped) - var n uint32 - err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol) - if err != nil && err != windows.ERROR_HANDLE_EOF { - return LastWrite{}, err - } - // It is important to handle the partial read case, because - // the initial size of the lock file is zero, which is a valid - // state (no writes yet) - contents = contents[:n] - return newLastWriteFromData(contents), nil -} - -// RecordWrite updates the lock with a new LastWrite value, and returns the new value. -// -// If this function fails, the LastWriter value of the lock is indeterminate; -// the caller should keep using the previously-recorded LastWrite value, -// and possibly detecting its own modification as an external one: -// -// lw, err := state.lock.RecordWrite() -// if err != nil { /* fail */ } -// state.lastWrite = lw -// -// The caller must hold the lock for writing. -func (l *LockFile) RecordWrite() (LastWrite, error) { - l.AssertLockedForWriting() - lw := newLastWrite() - lockContents := lw.serialize() - ol := new(windows.Overlapped) - var n uint32 - err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol) - if err != nil { - return LastWrite{}, err - } - if int(n) != len(lockContents) { - return LastWrite{}, windows.ERROR_DISK_FULL - } - return lw, nil -} - -// TouchedSince indicates if the lock file has been touched since the specified time -func (l *LockFile) TouchedSince(when time.Time) bool { - stat, err := os.Stat(l.file) - if err != nil { - return true - } - return when.Before(stat.ModTime()) -} diff --git a/vendor/go.podman.io/storage/pkg/longpath/longpath.go b/vendor/go.podman.io/storage/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff4..000000000 --- a/vendor/go.podman.io/storage/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/go.podman.io/storage/pkg/mount/flags.go b/vendor/go.podman.io/storage/pkg/mount/flags.go deleted file mode 100644 index 9325e2597..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/flags.go +++ /dev/null @@ -1,149 +0,0 @@ -package mount - -import ( - "fmt" - "strings" -) - -var flags = map[string]struct { - clear bool - flag int -}{ - "defaults": {false, 0}, - "ro": {false, RDONLY}, - "rw": {true, RDONLY}, - "suid": {true, NOSUID}, - "nosuid": {false, NOSUID}, - "dev": {true, NODEV}, - "nodev": {false, NODEV}, - "exec": {true, NOEXEC}, - "noexec": {false, NOEXEC}, - "sync": {false, SYNCHRONOUS}, - "async": {true, SYNCHRONOUS}, - "dirsync": {false, DIRSYNC}, - "remount": {false, REMOUNT}, - "mand": {false, MANDLOCK}, - "nomand": {true, MANDLOCK}, - "atime": {true, NOATIME}, - "noatime": {false, NOATIME}, - "diratime": {true, NODIRATIME}, - "nodiratime": {false, NODIRATIME}, - "bind": {false, BIND}, - "rbind": {false, RBIND}, - "unbindable": {false, UNBINDABLE}, - "runbindable": {false, RUNBINDABLE}, - "private": {false, PRIVATE}, - "rprivate": {false, RPRIVATE}, - "shared": {false, SHARED}, - "rshared": {false, RSHARED}, - "slave": {false, SLAVE}, - "rslave": {false, RSLAVE}, - "relatime": {false, RELATIME}, - "norelatime": {true, RELATIME}, - "strictatime": {false, STRICTATIME}, - "nostrictatime": {true, STRICTATIME}, -} - -var validFlags = map[string]bool{ - "": true, - "size": true, - "mode": true, - "uid": true, - "gid": true, - "nr_inodes": true, - "nr_blocks": true, - "mpol": true, -} - -var propagationFlags = map[string]bool{ - "bind": true, - "rbind": true, - "unbindable": true, - "runbindable": true, - "private": true, - "rprivate": true, - "shared": true, - "rshared": true, - "slave": true, - "rslave": true, -} - -// MergeTmpfsOptions merge mount options to make sure there is no duplicate. -func MergeTmpfsOptions(options []string) ([]string, error) { - // We use collisions maps to remove duplicates. - // For flag, the key is the flag value (the key for propagation flag is -1) - // For data=value, the key is the data - flagCollisions := map[int]bool{} - dataCollisions := map[string]bool{} - - var newOptions []string - // We process in reverse order - for i := len(options) - 1; i >= 0; i-- { - option := options[i] - if option == "defaults" { - continue - } - if f, ok := flags[option]; ok && f.flag != 0 { - // There is only one propagation mode - key := f.flag - if propagationFlags[option] { - key = -1 - } - // Check to see if there is collision for flag - if !flagCollisions[key] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - flagCollisions[key] = true - } - continue - } - opt, _, ok := strings.Cut(option, "=") - if !ok || !validFlags[opt] { - return nil, fmt.Errorf("invalid tmpfs option %q", opt) - } - if !dataCollisions[opt] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - dataCollisions[opt] = true - } - } - - return newOptions, nil -} - -// ParseOptions parses fstab type mount options into mount() flags -// and device specific data -func ParseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - for o := range strings.SplitSeq(options, ",") { - // If the option does not exist in the flags table or the flag - // is not supported on the platform, - // then it is a data value for a specific fs type - if f, exists := flags[o]; exists && f.flag != 0 { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := ParseOptions(options) - for o := range strings.SplitSeq(data, ",") { - opt, _, _ := strings.Cut(o, "=") - if !validFlags[opt] { - return 0, "", fmt.Errorf("invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/vendor/go.podman.io/storage/pkg/mount/flags_freebsd.go b/vendor/go.podman.io/storage/pkg/mount/flags_freebsd.go deleted file mode 100644 index 3ba99cf93..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/flags_freebsd.go +++ /dev/null @@ -1,48 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = unix.MNT_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = unix.MNT_NOSUID - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MNT_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MNT_SYNCHRONOUS - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = unix.MNT_UPDATE - - // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MNT_NOATIME - - mntDetach = unix.MNT_FORCE - - NODIRATIME = 0 - NODEV = 0 - DIRSYNC = 0 - MANDLOCK = 0 - BIND = 0 - RBIND = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SLAVE = 0 - RSLAVE = 0 - SHARED = 0 - RSHARED = 0 - RELATIME = 0 - STRICTATIME = 0 -) diff --git a/vendor/go.podman.io/storage/pkg/mount/flags_linux.go b/vendor/go.podman.io/storage/pkg/mount/flags_linux.go deleted file mode 100644 index 0425d0dd6..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/flags_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = unix.MS_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = unix.MS_NOSUID - - // NODEV will not interpret character or block special devices on the file - // system. - NODEV = unix.MS_NODEV - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MS_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MS_SYNCHRONOUS - - // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: create, link, - // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = unix.MS_DIRSYNC - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = unix.MS_REMOUNT - - // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = unix.MS_MANDLOCK - - // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MS_NOATIME - - // NODIRATIME will not update the directory access time. - NODIRATIME = unix.MS_NODIRATIME - - // BIND remounts a subtree somewhere else. - BIND = unix.MS_BIND - - // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = unix.MS_BIND | unix.MS_REC - - // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = unix.MS_UNBINDABLE - - // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC - - // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = unix.MS_PRIVATE - - // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = unix.MS_PRIVATE | unix.MS_REC - - // SLAVE creates a mount which receives propagation from its master, but not - // vice versa. - SLAVE = unix.MS_SLAVE - - // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = unix.MS_SLAVE | unix.MS_REC - - // SHARED creates a mount which provides the ability to create mirrors of - // that mount such that mounts and unmounts within any of the mirrors - // propagate to the other mirrors. - SHARED = unix.MS_SHARED - - // RSHARED marks the entire mount tree as SHARED. - RSHARED = unix.MS_SHARED | unix.MS_REC - - // RELATIME updates inode access times relative to modify or change time. - RELATIME = unix.MS_RELATIME - - // STRICTATIME allows to explicitly request full atime updates. This makes - // it possible for the kernel to default to relatime or noatime but still - // allow userspace to override it. - STRICTATIME = unix.MS_STRICTATIME - - mntDetach = unix.MNT_DETACH -) diff --git a/vendor/go.podman.io/storage/pkg/mount/flags_unsupported.go b/vendor/go.podman.io/storage/pkg/mount/flags_unsupported.go deleted file mode 100644 index e581d64eb..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/flags_unsupported.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !linux && !freebsd - -package mount - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NOATIME = 0 - NODEV = 0 - NODIRATIME = 0 - NOEXEC = 0 - NOSUID = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - RELATIVE = 0 - REMOUNT = 0 - STRICTATIME = 0 - SYNCHRONOUS = 0 - RDONLY = 0 - mntDetach = 0 -) diff --git a/vendor/go.podman.io/storage/pkg/mount/mount.go b/vendor/go.podman.io/storage/pkg/mount/mount.go deleted file mode 100644 index 23c5c44ac..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/mount.go +++ /dev/null @@ -1,110 +0,0 @@ -package mount - -import ( - "sort" - "strconv" - "strings" -) - -// mountError holds an error from a mount or unmount operation -type mountError struct { - op string - source, target string - flags uintptr - data string - err error -} - -// Error returns a string representation of mountError -func (e *mountError) Error() string { - out := e.op + " " - - if e.source != "" { - out += e.source + ":" + e.target - } else { - out += e.target - } - - if e.flags != uintptr(0) { - out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) - } - if e.data != "" { - out += ", data: " + e.data - } - - out += ": " + e.err.Error() - return out -} - -// Cause returns the underlying cause of the error -func (e *mountError) Cause() error { - return e.err -} - -// Unwrap returns the underlying cause of the error -func (e *mountError) Unwrap() error { - return e.err -} - -// Mount will mount filesystem according to the specified configuration, on the -// condition that the target path is *not* already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, data := ParseOptions(options) - if flag&REMOUNT != REMOUNT { - if mounted, err := Mounted(target); err != nil || mounted { - return err - } - } - return mount(device, target, mType, uintptr(flag), data) -} - -// ForceMount will mount a filesystem according to the specified configuration, -// *regardless* if the target path is not already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func ForceMount(device, target, mType, options string) error { - flag, data := ParseOptions(options) - return mount(device, target, mType, uintptr(flag), data) -} - -// Unmount lazily unmounts a filesystem on supported platforms, otherwise -// does a normal unmount. -func Unmount(target string) error { - return unmount(target, mntDetach) -} - -// RecursiveUnmount unmounts the target and all mounts underneath, starting with -// the deepest mount first. -func RecursiveUnmount(target string) error { - mounts, err := GetMounts() - if err != nil { - return err - } - - // Make the deepest mount be first - sort.Slice(mounts, func(i, j int) bool { - return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) - }) - - for i, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, target) { - continue - } - if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { - return err - // Ignore errors for submounts and continue trying to unmount others - // The final unmount should fail if there are any submounts remaining - } - } - return nil -} - -// ForceUnmount lazily unmounts a filesystem on supported platforms, -// otherwise does a normal unmount. -// -// Deprecated: please use Unmount instead, it is identical. -func ForceUnmount(target string) error { - return unmount(target, mntDetach) -} diff --git a/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go b/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go deleted file mode 100644 index 1c99ff461..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go +++ /dev/null @@ -1,62 +0,0 @@ -//go:build freebsd && cgo - -package mount - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "unsafe" -) - -func allocateIOVecs(options []string) []C.struct_iovec { - out := make([]C.struct_iovec, len(options)) - for i, option := range options { - out[i].iov_base = unsafe.Pointer(C.CString(option)) - out[i].iov_len = C.size_t(len(option) + 1) - } - return out -} - -func mount(device, target, mType string, flag uintptr, data string) error { - isNullFS := false - - options := []string{"fspath", target} - - if data != "" { - for x := range strings.SplitSeq(data, ",") { - if x == "bind" { - isNullFS = true - continue - } - name, val, _ := strings.Cut(x, "=") - options = append(options, name) - options = append(options, val) - } - } - - if isNullFS { - options = append(options, "fstype", "nullfs", "target", device) - } else { - options = append(options, "fstype", mType, "from", device) - } - rawOptions := allocateIOVecs(options) - for _, rawOption := range rawOptions { - defer C.free(rawOption.iov_base) - } - - if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("failed to call nmount: %s", reason) - } - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/mount/mounter_linux.go b/vendor/go.podman.io/storage/pkg/mount/mounter_linux.go deleted file mode 100644 index 594cd0881..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/mounter_linux.go +++ /dev/null @@ -1,74 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // ptypes is the set propagation types. - ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE - - // pflags is the full set valid flags for a change propagation call. - pflags = ptypes | unix.MS_REC | unix.MS_SILENT - - // broflags is the combination of bind and read only - broflags = unix.MS_BIND | unix.MS_RDONLY - - none = "none" -) - -// isremount returns true if either device name or flags identify a remount request, false otherwise. -func isremount(device string, flags uintptr) bool { - switch { - // We treat device "" and "none" as a remount request to provide compatibility with - // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. - case flags&unix.MS_REMOUNT != 0, device == "", device == none: - return true - default: - return false - } -} - -func mount(device, target, mType string, flags uintptr, data string) error { - oflags := flags &^ ptypes - if !isremount(device, flags) || data != "" { - // Initial call applying all non-propagation flags for mount - // or remount with changed data - if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return &mountError{ - op: "mount", - source: device, - target: target, - flags: oflags, - data: data, - err: err, - } - } - } - - if flags&ptypes != 0 { - // Change the propagation type. - if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return &mountError{ - op: "remount", - target: target, - flags: flags & pflags, - err: err, - } - } - } - - if oflags&broflags == broflags { - // Remount the bind to apply read only. - if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { - return &mountError{ - op: "remount-ro", - target: target, - flags: oflags | unix.MS_REMOUNT, - err: err, - } - } - } - - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/mount/mounter_unsupported.go b/vendor/go.podman.io/storage/pkg/mount/mounter_unsupported.go deleted file mode 100644 index b9dc82d3f..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/mounter_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !linux && !(freebsd && cgo) - -package mount - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") -} diff --git a/vendor/go.podman.io/storage/pkg/mount/mountinfo.go b/vendor/go.podman.io/storage/pkg/mount/mountinfo.go deleted file mode 100644 index bb2da474f..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/mountinfo.go +++ /dev/null @@ -1,13 +0,0 @@ -package mount - -import ( - "github.com/moby/sys/mountinfo" -) - -type Info = mountinfo.Info - -var Mounted = mountinfo.Mounted - -func GetMounts() ([]*Info, error) { - return mountinfo.GetMounts(nil) -} diff --git a/vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go b/vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go deleted file mode 100644 index 2d9e75ea1..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -package mount - -import ( - "fmt" - "os" - - "github.com/moby/sys/mountinfo" -) - -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return mountinfo.GetMountsFromReader(f, nil) -} diff --git a/vendor/go.podman.io/storage/pkg/mount/sharedsubtree_linux.go b/vendor/go.podman.io/storage/pkg/mount/sharedsubtree_linux.go deleted file mode 100644 index 80922ad5c..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/sharedsubtree_linux.go +++ /dev/null @@ -1,64 +0,0 @@ -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, SHARED) -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, RSHARED) -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, PRIVATE) -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, RPRIVATE) -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, SLAVE) -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, RSLAVE) -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, UNBINDABLE) -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, RUNBINDABLE) -} - -func ensureMountedAs(mnt string, flags int) error { - mounted, err := Mounted(mnt) - if err != nil { - return err - } - - if !mounted { - if err := mount(mnt, mnt, "none", uintptr(BIND), ""); err != nil { - return err - } - } - - return mount("", mnt, "none", uintptr(flags), "") -} diff --git a/vendor/go.podman.io/storage/pkg/mount/unmount_unix.go b/vendor/go.podman.io/storage/pkg/mount/unmount_unix.go deleted file mode 100644 index 331272e0c..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/unmount_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build !windows - -package mount - -import ( - "time" - - "golang.org/x/sys/unix" -) - -func unmount(target string, flags int) error { - var err error - for range 50 { - err = unix.Unmount(target, flags) - switch err { - case unix.EBUSY: - time.Sleep(50 * time.Millisecond) - continue - case unix.EINVAL, nil: - // Ignore "not mounted" error here. Note the same error - // can be returned if flags are invalid, so this code - // assumes that the flags value is always correct. - return nil - } - break - } - - return &mountError{ - op: "umount", - target: target, - flags: uintptr(flags), - err: err, - } -} diff --git a/vendor/go.podman.io/storage/pkg/mount/unmount_unsupported.go b/vendor/go.podman.io/storage/pkg/mount/unmount_unsupported.go deleted file mode 100644 index 3c942bfb2..000000000 --- a/vendor/go.podman.io/storage/pkg/mount/unmount_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build windows - -package mount - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/go.podman.io/storage/pkg/reexec/README.md b/vendor/go.podman.io/storage/pkg/reexec/README.md deleted file mode 100644 index 6658f69b6..000000000 --- a/vendor/go.podman.io/storage/pkg/reexec/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# reexec - -The `reexec` package facilitates the busybox style reexec of the docker binary that we require because -of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of -the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/go.podman.io/storage/pkg/reexec/command_freebsd.go b/vendor/go.podman.io/storage/pkg/reexec/command_freebsd.go deleted file mode 100644 index 171cd81e7..000000000 --- a/vendor/go.podman.io/storage/pkg/reexec/command_freebsd.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build freebsd - -package reexec - -import ( - "context" - "os" - "os/exec" - - "golang.org/x/sys/unix" -) - -// Self returns the path to the current process's binary. -// Uses sysctl. -func Self() string { - path, err := unix.SysctlArgs("kern.proc.pathname", -1) - if err == nil { - return path - } - return os.Args[0] -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - cmd := exec.Command(Self()) - cmd.Args = args - return cmd -} - -// CommandContext returns *exec.Cmd which has Path as current binary. -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, Self()) - cmd.Args = args - return cmd -} diff --git a/vendor/go.podman.io/storage/pkg/reexec/command_linux.go b/vendor/go.podman.io/storage/pkg/reexec/command_linux.go deleted file mode 100644 index 025aef60a..000000000 --- a/vendor/go.podman.io/storage/pkg/reexec/command_linux.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build linux - -package reexec - -import ( - "context" - "os/exec" -) - -// Self returns the path to the current process's binary. -// Returns "/proc/self/exe". -func Self() string { - return "/proc/self/exe" -} - -// Command returns *exec.Cmd which has Path as current binary. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func Command(args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.Command(Self()) - cmd.Args = args - return cmd -} - -// CommandContext returns *exec.Cmd which has Path as current binary. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.CommandContext(ctx, Self()) - cmd.Args = args - return cmd -} diff --git a/vendor/go.podman.io/storage/pkg/reexec/command_unix.go b/vendor/go.podman.io/storage/pkg/reexec/command_unix.go deleted file mode 100644 index eefddea41..000000000 --- a/vendor/go.podman.io/storage/pkg/reexec/command_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build solaris || darwin - -package reexec - -import ( - "context" - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.Command(Self()) - cmd.Args = args - return cmd -} - -// CommandContext returns *exec.Cmd which has Path as current binary. -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.CommandContext(ctx, Self()) - cmd.Args = args - return cmd -} diff --git a/vendor/go.podman.io/storage/pkg/reexec/command_unsupported.go b/vendor/go.podman.io/storage/pkg/reexec/command_unsupported.go deleted file mode 100644 index a78b548a5..000000000 --- a/vendor/go.podman.io/storage/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !linux && !windows && !freebsd && !solaris && !darwin - -package reexec - -import ( - "context" - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. -func Command(args ...string) *exec.Cmd { - panicIfNotInitialized() - return nil -} - -// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - panicIfNotInitialized() - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/reexec/command_windows.go b/vendor/go.podman.io/storage/pkg/reexec/command_windows.go deleted file mode 100644 index ba2f0f847..000000000 --- a/vendor/go.podman.io/storage/pkg/reexec/command_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build windows - -package reexec - -import ( - "context" - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.Command(Self()) - cmd.Args = args - return cmd -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.CommandContext(ctx, Self()) - cmd.Args = args - return cmd -} diff --git a/vendor/go.podman.io/storage/pkg/reexec/reexec.go b/vendor/go.podman.io/storage/pkg/reexec/reexec.go deleted file mode 100644 index a1938cd4f..000000000 --- a/vendor/go.podman.io/storage/pkg/reexec/reexec.go +++ /dev/null @@ -1,66 +0,0 @@ -package reexec - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" -) - -var ( - registeredInitializers = make(map[string]func()) - initWasCalled = false -) - -// Register adds an initialization func under the specified name -func Register(name string, initializer func()) { - if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registered under name %q", name)) - } - - registeredInitializers[name] = initializer -} - -// Init is called as the first part of the exec process and returns true if an -// initialization function was called. -func Init() bool { - initializer, exists := registeredInitializers[os.Args[0]] - initWasCalled = true - if exists { - initializer() - - return true - } - return false -} - -func panicIfNotInitialized() { - if !initWasCalled { - // The reexec package is used to run subroutines in - // subprocesses which would otherwise have unacceptable side - // effects on the main thread. If you found this error, then - // your program uses a package which needs to do this. In - // order for that to work, main() should start with this - // boilerplate, or an equivalent: - // if reexec.Init() { - // return - // } - panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") - } -} - -func naiveSelf() string { - name := os.Args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - return lp - } - } - // handle conversion of relative paths to absolute - if absName, err := filepath.Abs(name); err == nil { - return absName - } - // if we couldn't get absolute name, return original - // (NOTE: Go only errors on Abs() if os.Getwd fails) - return name -} diff --git a/vendor/go.podman.io/storage/pkg/regexp/regexp.go b/vendor/go.podman.io/storage/pkg/regexp/regexp.go deleted file mode 100644 index 1a3333dba..000000000 --- a/vendor/go.podman.io/storage/pkg/regexp/regexp.go +++ /dev/null @@ -1,234 +0,0 @@ -package regexp - -import ( - "io" - "regexp" - "sync" -) - -// Regexp is a wrapper struct used for wrapping MustCompile regex expressions -// used as global variables. Using this structure helps speed the startup time -// of apps that want to use global regex variables. This library initializes them on -// first use as opposed to the start of the executable. -type Regexp struct { - *regexpStruct -} - -type regexpStruct struct { - _ noCopy - once sync.Once - regexp *regexp.Regexp - val string -} - -func Delayed(val string) Regexp { - re := ®expStruct{ - val: val, - } - if precompile { - re.regexp = regexp.MustCompile(re.val) - } - return Regexp{re} -} - -func (re *regexpStruct) compile() { - if precompile { - return - } - re.once.Do(func() { - re.regexp = regexp.MustCompile(re.val) - }) -} - -func (re *regexpStruct) Expand(dst []byte, template []byte, src []byte, match []int) []byte { - re.compile() - return re.regexp.Expand(dst, template, src, match) -} - -func (re *regexpStruct) ExpandString(dst []byte, template string, src string, match []int) []byte { - re.compile() - return re.regexp.ExpandString(dst, template, src, match) -} - -func (re *regexpStruct) Find(b []byte) []byte { - re.compile() - return re.regexp.Find(b) -} - -func (re *regexpStruct) FindAll(b []byte, n int) [][]byte { - re.compile() - return re.regexp.FindAll(b, n) -} - -func (re *regexpStruct) FindAllIndex(b []byte, n int) [][]int { - re.compile() - return re.regexp.FindAllIndex(b, n) -} - -func (re *regexpStruct) FindAllString(s string, n int) []string { - re.compile() - return re.regexp.FindAllString(s, n) -} - -func (re *regexpStruct) FindAllStringIndex(s string, n int) [][]int { - re.compile() - return re.regexp.FindAllStringIndex(s, n) -} - -func (re *regexpStruct) FindAllStringSubmatch(s string, n int) [][]string { - re.compile() - return re.regexp.FindAllStringSubmatch(s, n) -} - -func (re *regexpStruct) FindAllStringSubmatchIndex(s string, n int) [][]int { - re.compile() - return re.regexp.FindAllStringSubmatchIndex(s, n) -} - -func (re *regexpStruct) FindAllSubmatch(b []byte, n int) [][][]byte { - re.compile() - return re.regexp.FindAllSubmatch(b, n) -} - -func (re *regexpStruct) FindAllSubmatchIndex(b []byte, n int) [][]int { - re.compile() - return re.regexp.FindAllSubmatchIndex(b, n) -} - -func (re *regexpStruct) FindIndex(b []byte) (loc []int) { - re.compile() - return re.regexp.FindIndex(b) -} - -func (re *regexpStruct) FindReaderIndex(r io.RuneReader) (loc []int) { - re.compile() - return re.regexp.FindReaderIndex(r) -} - -func (re *regexpStruct) FindReaderSubmatchIndex(r io.RuneReader) []int { - re.compile() - return re.regexp.FindReaderSubmatchIndex(r) -} - -func (re *regexpStruct) FindString(s string) string { - re.compile() - return re.regexp.FindString(s) -} - -func (re *regexpStruct) FindStringIndex(s string) (loc []int) { - re.compile() - return re.regexp.FindStringIndex(s) -} - -func (re *regexpStruct) FindStringSubmatch(s string) []string { - re.compile() - return re.regexp.FindStringSubmatch(s) -} - -func (re *regexpStruct) FindStringSubmatchIndex(s string) []int { - re.compile() - return re.regexp.FindStringSubmatchIndex(s) -} - -func (re *regexpStruct) FindSubmatch(b []byte) [][]byte { - re.compile() - return re.regexp.FindSubmatch(b) -} - -func (re *regexpStruct) FindSubmatchIndex(b []byte) []int { - re.compile() - return re.regexp.FindSubmatchIndex(b) -} - -func (re *regexpStruct) LiteralPrefix() (prefix string, complete bool) { - re.compile() - return re.regexp.LiteralPrefix() -} - -func (re *regexpStruct) Longest() { - re.compile() - re.regexp.Longest() -} - -func (re *regexpStruct) Match(b []byte) bool { - re.compile() - return re.regexp.Match(b) -} - -func (re *regexpStruct) MatchReader(r io.RuneReader) bool { - re.compile() - return re.regexp.MatchReader(r) -} - -func (re *regexpStruct) MatchString(s string) bool { - re.compile() - return re.regexp.MatchString(s) -} - -func (re *regexpStruct) NumSubexp() int { - re.compile() - return re.regexp.NumSubexp() -} - -func (re *regexpStruct) ReplaceAll(src, repl []byte) []byte { - re.compile() - return re.regexp.ReplaceAll(src, repl) -} - -func (re *regexpStruct) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { - re.compile() - return re.regexp.ReplaceAllFunc(src, repl) -} - -func (re *regexpStruct) ReplaceAllLiteral(src, repl []byte) []byte { - re.compile() - return re.regexp.ReplaceAllLiteral(src, repl) -} - -func (re *regexpStruct) ReplaceAllLiteralString(src, repl string) string { - re.compile() - return re.regexp.ReplaceAllLiteralString(src, repl) -} - -func (re *regexpStruct) ReplaceAllString(src, repl string) string { - re.compile() - return re.regexp.ReplaceAllString(src, repl) -} - -func (re *regexpStruct) ReplaceAllStringFunc(src string, repl func(string) string) string { - re.compile() - return re.regexp.ReplaceAllStringFunc(src, repl) -} - -func (re *regexpStruct) Split(s string, n int) []string { - re.compile() - return re.regexp.Split(s, n) -} - -func (re *regexpStruct) String() string { - re.compile() - return re.regexp.String() -} - -func (re *regexpStruct) SubexpIndex(name string) int { - re.compile() - return re.regexp.SubexpIndex(name) -} - -func (re *regexpStruct) SubexpNames() []string { - re.compile() - return re.regexp.SubexpNames() -} - -// noCopy may be added to structs which must not be copied -// after the first use. -// -// See https://golang.org/issues/8005#issuecomment-190753527 -// for details. -// -// Note that it must not be embedded, due to the Lock and Unlock methods. -type noCopy struct{} - -// Lock is a no-op used by -copylocks checker from `go vet`. -func (*noCopy) Lock() {} -func (*noCopy) Unlock() {} diff --git a/vendor/go.podman.io/storage/pkg/regexp/regexp_dontprecompile.go b/vendor/go.podman.io/storage/pkg/regexp/regexp_dontprecompile.go deleted file mode 100644 index ccd9d0fb1..000000000 --- a/vendor/go.podman.io/storage/pkg/regexp/regexp_dontprecompile.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !regexp_precompile - -package regexp - -const precompile = false diff --git a/vendor/go.podman.io/storage/pkg/regexp/regexp_precompile.go b/vendor/go.podman.io/storage/pkg/regexp/regexp_precompile.go deleted file mode 100644 index fe4421b01..000000000 --- a/vendor/go.podman.io/storage/pkg/regexp/regexp_precompile.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build regexp_precompile - -package regexp - -const precompile = true diff --git a/vendor/go.podman.io/storage/pkg/system/chmod.go b/vendor/go.podman.io/storage/pkg/system/chmod.go deleted file mode 100644 index a01d8abfb..000000000 --- a/vendor/go.podman.io/storage/pkg/system/chmod.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "errors" - "os" - "syscall" -) - -func Chmod(name string, mode os.FileMode) error { - err := os.Chmod(name, mode) - - for err != nil && errors.Is(err, syscall.EINTR) { - err = os.Chmod(name, mode) - } - - return err -} diff --git a/vendor/go.podman.io/storage/pkg/system/chtimes.go b/vendor/go.podman.io/storage/pkg/system/chtimes.go deleted file mode 100644 index 056d19954..000000000 --- a/vendor/go.podman.io/storage/pkg/system/chtimes.go +++ /dev/null @@ -1,35 +0,0 @@ -package system - -import ( - "os" - "time" -) - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - if err := setCTime(name, mtime); err != nil { - return err - } - - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/chtimes_unix.go b/vendor/go.podman.io/storage/pkg/system/chtimes_unix.go deleted file mode 100644 index 892d56138..000000000 --- a/vendor/go.podman.io/storage/pkg/system/chtimes_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !windows - -package system - -import ( - "time" -) - -// setCTime will set the create time on a file. On Unix, the create -// time is updated as a side effect of setting the modified time, so -// no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/chtimes_windows.go b/vendor/go.podman.io/storage/pkg/system/chtimes_windows.go deleted file mode 100644 index f0d744eb8..000000000 --- a/vendor/go.podman.io/storage/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows - -package system - -import ( - "time" - - "golang.org/x/sys/windows" -) - -// setCTime will set the create time on a file. On Windows, this requires -// calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := windows.NsecToTimespec(ctime.UnixNano()) - pathp, e := windows.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer windows.Close(h) - c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) - return windows.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/go.podman.io/storage/pkg/system/errors.go b/vendor/go.podman.io/storage/pkg/system/errors.go deleted file mode 100644 index b87d419b5..000000000 --- a/vendor/go.podman.io/storage/pkg/system/errors.go +++ /dev/null @@ -1,8 +0,0 @@ -package system - -import ( - "errors" -) - -// ErrNotSupportedPlatform means the platform is not supported. -var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/go.podman.io/storage/pkg/system/exitcode.go b/vendor/go.podman.io/storage/pkg/system/exitcode.go deleted file mode 100644 index 4d7b5c880..000000000 --- a/vendor/go.podman.io/storage/pkg/system/exitcode.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return exitCode -} diff --git a/vendor/go.podman.io/storage/pkg/system/extattr_freebsd.go b/vendor/go.podman.io/storage/pkg/system/extattr_freebsd.go deleted file mode 100644 index 1314058f1..000000000 --- a/vendor/go.podman.io/storage/pkg/system/extattr_freebsd.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build freebsd - -package system - -import ( - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -const ( - EXTATTR_NAMESPACE_EMPTY = unix.EXTATTR_NAMESPACE_EMPTY - EXTATTR_NAMESPACE_USER = unix.EXTATTR_NAMESPACE_USER - EXTATTR_NAMESPACE_SYSTEM = unix.EXTATTR_NAMESPACE_SYSTEM -) - -// ExtattrGetLink retrieves the value of the extended attribute identified by attrname -// in the given namespace and associated with the given path in the file system. -// If the path is a symbolic link, the extended attribute is retrieved from the link itself. -// Returns a []byte slice if the extattr is set and nil otherwise. -func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { - size, errno := unix.ExtattrGetLink(path, attrnamespace, attrname, - uintptr(unsafe.Pointer(nil)), 0) - if errno != nil { - if errno == unix.ENOATTR { - return nil, nil - } - return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} - } - if size == 0 { - return []byte{}, nil - } - - dest := make([]byte, size) - size, errno = unix.ExtattrGetLink(path, attrnamespace, attrname, - uintptr(unsafe.Pointer(&dest[0])), size) - if errno != nil { - return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} - } - - return dest[:size], nil -} - -// ExtattrSetLink sets the value of extended attribute identified by attrname -// in the given namespace and associated with the given path in the file system. -// If the path is a symbolic link, the extended attribute is set on the link itself. -func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { - if len(data) == 0 { - data = []byte{} // ensure non-nil for empty data - } - if _, errno := unix.ExtattrSetLink(path, attrnamespace, attrname, - uintptr(unsafe.Pointer(&data[0])), len(data)); errno != nil { - return &os.PathError{Op: "extattr_set_link", Path: path, Err: errno} - } - - return nil -} - -// ExtattrListLink lists extended attributes associated with the given path -// in the specified namespace. If the path is a symbolic link, the attributes -// are listed from the link itself. -func ExtattrListLink(path string, attrnamespace int) ([]string, error) { - size, errno := unix.ExtattrListLink(path, attrnamespace, - uintptr(unsafe.Pointer(nil)), 0) - if errno != nil { - return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} - } - if size == 0 { - return []string{}, nil - } - - dest := make([]byte, size) - size, errno = unix.ExtattrListLink(path, attrnamespace, - uintptr(unsafe.Pointer(&dest[0])), size) - if errno != nil { - return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} - } - - var attrs []string - for i := 0; i < size; { - // Each attribute is preceded by a single byte length - length := int(dest[i]) - i++ - if i+length > size { - break - } - attrs = append(attrs, string(dest[i:i+length])) - i += length - } - - return attrs, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/extattr_unsupported.go b/vendor/go.podman.io/storage/pkg/system/extattr_unsupported.go deleted file mode 100644 index 07b67357f..000000000 --- a/vendor/go.podman.io/storage/pkg/system/extattr_unsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !freebsd - -package system - -const ( - EXTATTR_NAMESPACE_EMPTY = 0 - EXTATTR_NAMESPACE_USER = 0 - EXTATTR_NAMESPACE_SYSTEM = 0 -) - -// ExtattrGetLink is not supported on platforms other than FreeBSD. -func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// ExtattrSetLink is not supported on platforms other than FreeBSD. -func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { - return ErrNotSupportedPlatform -} - -// ExtattrListLink is not supported on platforms other than FreeBSD. -func ExtattrListLink(path string, attrnamespace int) ([]string, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/go.podman.io/storage/pkg/system/init.go b/vendor/go.podman.io/storage/pkg/system/init.go deleted file mode 100644 index 05642f603..000000000 --- a/vendor/go.podman.io/storage/pkg/system/init.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "time" - "unsafe" -) - -// maxTime is used by chtimes. -var maxTime time.Time - -func init() { - // chtimes initialization - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} diff --git a/vendor/go.podman.io/storage/pkg/system/init_windows.go b/vendor/go.podman.io/storage/pkg/system/init_windows.go deleted file mode 100644 index 5f6fea1d3..000000000 --- a/vendor/go.podman.io/storage/pkg/system/init_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package system - -import "os" - -// LCOWSupported determines if Linux Containers on Windows are supported. -// Note: This feature is in development (06/17) and enabled through an -// environment variable. At a future time, it will be enabled based -// on build number. @jhowardmsft -var lcowSupported = false - -func init() { - // LCOW initialization - if os.Getenv("LCOW_SUPPORTED") != "" { - lcowSupported = true - } -} diff --git a/vendor/go.podman.io/storage/pkg/system/lchflags_bsd.go b/vendor/go.podman.io/storage/pkg/system/lchflags_bsd.go deleted file mode 100644 index f9de938dd..000000000 --- a/vendor/go.podman.io/storage/pkg/system/lchflags_bsd.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build freebsd - -package system - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Flag values from -const ( - /* - * Definitions of flags stored in file flags word. - * - * Super-user and owner changeable flags. - */ - UF_SETTABLE uint32 = 0x0000ffff /* mask of owner changeable flags */ - UF_NODUMP uint32 = 0x00000001 /* do not dump file */ - UF_IMMUTABLE uint32 = 0x00000002 /* file may not be changed */ - UF_APPEND uint32 = 0x00000004 /* writes to file may only append */ - UF_OPAQUE uint32 = 0x00000008 /* directory is opaque wrt. union */ - UF_NOUNLINK uint32 = 0x00000010 /* file may not be removed or renamed */ - - UF_SYSTEM uint32 = 0x00000080 /* Windows system file bit */ - UF_SPARSE uint32 = 0x00000100 /* sparse file */ - UF_OFFLINE uint32 = 0x00000200 /* file is offline */ - UF_REPARSE uint32 = 0x00000400 /* Windows reparse point file bit */ - UF_ARCHIVE uint32 = 0x00000800 /* file needs to be archived */ - UF_READONLY uint32 = 0x00001000 /* Windows readonly file bit */ - /* This is the same as the MacOS X definition of UF_HIDDEN. */ - UF_HIDDEN uint32 = 0x00008000 /* file is hidden */ - - /* - * Super-user changeable flags. - */ - SF_SETTABLE uint32 = 0xffff0000 /* mask of superuser changeable flags */ - SF_ARCHIVED uint32 = 0x00010000 /* file is archived */ - SF_IMMUTABLE uint32 = 0x00020000 /* file may not be changed */ - SF_APPEND uint32 = 0x00040000 /* writes to file may only append */ - SF_NOUNLINK uint32 = 0x00100000 /* file may not be removed or renamed */ - SF_SNAPSHOT uint32 = 0x00200000 /* snapshot inode */ -) - -func Lchflags(path string, flags uint32) error { - p, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - _, _, e1 := unix.Syscall(unix.SYS_LCHFLAGS, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - return e1 - } - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/lchown.go b/vendor/go.podman.io/storage/pkg/system/lchown.go deleted file mode 100644 index eb2d8b464..000000000 --- a/vendor/go.podman.io/storage/pkg/system/lchown.go +++ /dev/null @@ -1,20 +0,0 @@ -package system - -import ( - "os" - "syscall" -) - -func Lchown(name string, uid, gid int) error { - err := syscall.Lchown(name, uid, gid) - - for err == syscall.EINTR { - err = syscall.Lchown(name, uid, gid) - } - - if err != nil { - return &os.PathError{Op: "lchown", Path: name, Err: err} - } - - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/lcow_unix.go b/vendor/go.podman.io/storage/pkg/system/lcow_unix.go deleted file mode 100644 index 037ccf59d..000000000 --- a/vendor/go.podman.io/storage/pkg/system/lcow_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows - -package system - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return false -} diff --git a/vendor/go.podman.io/storage/pkg/system/lcow_windows.go b/vendor/go.podman.io/storage/pkg/system/lcow_windows.go deleted file mode 100644 index e54d01e69..000000000 --- a/vendor/go.podman.io/storage/pkg/system/lcow_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return lcowSupported -} diff --git a/vendor/go.podman.io/storage/pkg/system/lstat_unix.go b/vendor/go.podman.io/storage/pkg/system/lstat_unix.go deleted file mode 100644 index 826c1f9c3..000000000 --- a/vendor/go.podman.io/storage/pkg/system/lstat_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !windows - -package system - -import ( - "os" - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} - } - return fromStatT(s) -} diff --git a/vendor/go.podman.io/storage/pkg/system/lstat_windows.go b/vendor/go.podman.io/storage/pkg/system/lstat_windows.go deleted file mode 100644 index e51df0daf..000000000 --- a/vendor/go.podman.io/storage/pkg/system/lstat_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package system - -import "os" - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return fromStatT(&fi) -} diff --git a/vendor/go.podman.io/storage/pkg/system/meminfo.go b/vendor/go.podman.io/storage/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e6..000000000 --- a/vendor/go.podman.io/storage/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/go.podman.io/storage/pkg/system/meminfo_freebsd.go b/vendor/go.podman.io/storage/pkg/system/meminfo_freebsd.go deleted file mode 100644 index 589cbeba7..000000000 --- a/vendor/go.podman.io/storage/pkg/system/meminfo_freebsd.go +++ /dev/null @@ -1,85 +0,0 @@ -//go:build freebsd && cgo - -package system - -import ( - "errors" - "fmt" - "unsafe" - - "golang.org/x/sys/unix" -) - -// #include -// #include -// #include -// #include -import "C" - -func getMemInfo() (int64, int64, error) { - data, err := unix.SysctlRaw("vm.vmtotal") - if err != nil { - return -1, -1, fmt.Errorf("can't get kernel info: %w", err) - } - if len(data) != C.sizeof_struct_vmtotal { - return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data)) - } - - total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0])) - - pagesize := int64(C.sysconf(C._SC_PAGESIZE)) - npages := int64(C.sysconf(C._SC_PHYS_PAGES)) - return pagesize * npages, pagesize * int64(total.t_free), nil -} - -func getSwapInfo() (int64, int64, error) { - var ( - total int64 = 0 - used int64 = 0 - ) - swapCount, err := unix.SysctlUint32("vm.nswapdev") - if err != nil { - return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err) - } - for i := 0; i < int(swapCount); i++ { - data, err := unix.SysctlRaw("vm.swap_info", i) - if err != nil { - return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err) - } - if len(data) != C.sizeof_struct_xswdev { - return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data)) - } - xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0])) - total += int64(xsw.xsw_nblks) - used += int64(xsw.xsw_used) - } - pagesize := int64(C.sysconf(C._SC_PAGESIZE)) - return pagesize * total, pagesize * (total - used), nil -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - MemTotal, MemFree, err := getMemInfo() - if err != nil { - return nil, fmt.Errorf("getting memory totals %w", err) - } - SwapTotal, SwapFree, err := getSwapInfo() - if err != nil { - return nil, fmt.Errorf("getting swap totals %w", err) - } - - if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { - return nil, errors.New("getting system memory info") - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/meminfo_linux.go b/vendor/go.podman.io/storage/pkg/system/meminfo_linux.go deleted file mode 100644 index 385f1d5e7..000000000 --- a/vendor/go.podman.io/storage/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,65 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/go-units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/meminfo_solaris.go b/vendor/go.podman.io/storage/pkg/system/meminfo_solaris.go deleted file mode 100644 index 17474e114..000000000 --- a/vendor/go.podman.io/storage/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,129 +0,0 @@ -//go:build solaris && cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo CFLAGS: -std=c99 -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("getting system memory info %w", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/meminfo_unsupported.go b/vendor/go.podman.io/storage/pkg/system/meminfo_unsupported.go deleted file mode 100644 index db0864275..000000000 --- a/vendor/go.podman.io/storage/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !linux && !windows && !solaris && !(freebsd && cgo) - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/go.podman.io/storage/pkg/system/meminfo_windows.go b/vendor/go.podman.io/storage/pkg/system/meminfo_windows.go deleted file mode 100644 index c833f30f7..000000000 --- a/vendor/go.podman.io/storage/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -package system - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/mknod.go b/vendor/go.podman.io/storage/pkg/system/mknod.go deleted file mode 100644 index ff679c5b1..000000000 --- a/vendor/go.podman.io/storage/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !windows && !freebsd - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev uint32) error { - return unix.Mknod(path, mode, int(dev)) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/go.podman.io/storage/pkg/system/mknod_freebsd.go b/vendor/go.podman.io/storage/pkg/system/mknod_freebsd.go deleted file mode 100644 index d94353600..000000000 --- a/vendor/go.podman.io/storage/pkg/system/mknod_freebsd.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build freebsd - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev uint64) error { - return unix.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint64 { - return uint64(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/go.podman.io/storage/pkg/system/mknod_windows.go b/vendor/go.podman.io/storage/pkg/system/mknod_windows.go deleted file mode 100644 index 752f90b14..000000000 --- a/vendor/go.podman.io/storage/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/go.podman.io/storage/pkg/system/path.go b/vendor/go.podman.io/storage/pkg/system/path.go deleted file mode 100644 index ca076f2bc..000000000 --- a/vendor/go.podman.io/storage/pkg/system/path.go +++ /dev/null @@ -1,20 +0,0 @@ -package system - -import "runtime" - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -func DefaultPathEnv(platform string) string { - if runtime.GOOS == "windows" { - if platform != runtime.GOOS && LCOWSupported() { - return defaultUnixPathEnv - } - // Deliberately empty on Windows containers on Windows as the default path will be set by - // the container. Docker has no context of what the default path should be. - return "" - } - return defaultUnixPathEnv -} diff --git a/vendor/go.podman.io/storage/pkg/system/path_unix.go b/vendor/go.podman.io/storage/pkg/system/path_unix.go deleted file mode 100644 index fc8de3e4d..000000000 --- a/vendor/go.podman.io/storage/pkg/system/path_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !windows - -package system - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/path_windows.go b/vendor/go.podman.io/storage/pkg/system/path_windows.go deleted file mode 100644 index 8838d9fd2..000000000 --- a/vendor/go.podman.io/storage/pkg/system/path_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("relative path not specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/process_unix.go b/vendor/go.podman.io/storage/pkg/system/process_unix.go deleted file mode 100644 index 5090f3042..000000000 --- a/vendor/go.podman.io/storage/pkg/system/process_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build linux || freebsd || solaris || darwin - -package system - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - err := unix.Kill(pid, syscall.Signal(0)) - if err == nil || err == unix.EPERM { - return true - } - - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - _ = unix.Kill(pid, unix.SIGKILL) -} diff --git a/vendor/go.podman.io/storage/pkg/system/rm.go b/vendor/go.podman.io/storage/pkg/system/rm.go deleted file mode 100644 index c151c1449..000000000 --- a/vendor/go.podman.io/storage/pkg/system/rm.go +++ /dev/null @@ -1,99 +0,0 @@ -package system - -import ( - "errors" - "fmt" - "os" - "syscall" - "time" - - "github.com/sirupsen/logrus" - "go.podman.io/storage/pkg/mount" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 1000 - - // Attempt a simple remove all first, this avoids the more expensive - // RecursiveUnmount call if not needed. - if err := os.RemoveAll(dir); err == nil { - return nil - } - - // Attempt to unmount anything beneath this dir first - if err := mount.RecursiveUnmount(dir); err != nil { - logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err) - } - - for { - err := os.RemoveAll(dir) - if err == nil { - return nil - } - - // If the RemoveAll fails with a permission error, we - // may have immutable files so try to remove the - // immutable flag and redo the RemoveAll. - if errors.Is(err, syscall.EPERM) { - if err = resetFileFlags(dir); err != nil { - return fmt.Errorf("resetting file flags: %w", err) - } - err = os.RemoveAll(dir) - if err == nil { - return nil - } - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if !IsEBUSY(pe.Err) { - return err - } - - if e := mount.Unmount(pe.Path); e != nil { - return fmt.Errorf("while removing %s: %w", dir, e) - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(10 * time.Millisecond) - } -} diff --git a/vendor/go.podman.io/storage/pkg/system/rm_common.go b/vendor/go.podman.io/storage/pkg/system/rm_common.go deleted file mode 100644 index db214c4cd..000000000 --- a/vendor/go.podman.io/storage/pkg/system/rm_common.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !freebsd - -package system - -// Reset file flags in a directory tree. This allows EnsureRemoveAll -// to delete trees which have the immutable flag set. -func resetFileFlags(dir string) error { - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/rm_freebsd.go b/vendor/go.podman.io/storage/pkg/system/rm_freebsd.go deleted file mode 100644 index 39a5de7aa..000000000 --- a/vendor/go.podman.io/storage/pkg/system/rm_freebsd.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "io/fs" - "path/filepath" -) - -// Reset file flags in a directory tree. This allows EnsureRemoveAll -// to delete trees which have the immutable flag set. -func resetFileFlags(dir string) error { - return filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { - if err := Lchflags(path, 0); err != nil { - return err - } - return nil - }) -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_common.go b/vendor/go.podman.io/storage/pkg/system/stat_common.go deleted file mode 100644 index 1d57b7f40..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_common.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !freebsd - -package system - -type platformStatT struct{} - -// Flags return file flags if supported or zero otherwise -func (s StatT) Flags() uint32 { - _ = s.platformStatT // Silence warnings that StatT.platformStatT is unused (on these platforms) - return 0 -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_darwin.go b/vendor/go.podman.io/storage/pkg/system/stat_darwin.go deleted file mode 100644 index 57850a883..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_darwin.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec, - }, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_freebsd.go b/vendor/go.podman.io/storage/pkg/system/stat_freebsd.go deleted file mode 100644 index 4b95073a3..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,28 +0,0 @@ -package system - -import "syscall" - -type platformStatT struct { - flags uint32 -} - -// Flags return file flags if supported or zero otherwise -func (s StatT) Flags() uint32 { - return s.flags -} - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - st := &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec, - dev: s.Dev, - } - st.flags = s.Flags - st.dev = s.Dev - return st, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_linux.go b/vendor/go.podman.io/storage/pkg/system/stat_linux.go deleted file mode 100644 index 0dee88d1b..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_linux.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), //nolint:unconvert - mtim: s.Mtim, - dev: uint64(s.Dev), //nolint:unconvert - }, nil -} - -// FromStatT converts a syscall.Stat_t type to a system.Stat_t type -// This is exposed on Linux as pkg/archive/changes uses it. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go b/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go deleted file mode 100644 index 57850a883..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_netbsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec, - }, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_openbsd.go b/vendor/go.podman.io/storage/pkg/system/stat_openbsd.go deleted file mode 100644 index a413e1714..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim, - }, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_solaris.go b/vendor/go.podman.io/storage/pkg/system/stat_solaris.go deleted file mode 100644 index a413e1714..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim, - }, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_unix.go b/vendor/go.podman.io/storage/pkg/system/stat_unix.go deleted file mode 100644 index ffe45f32d..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_unix.go +++ /dev/null @@ -1,87 +0,0 @@ -//go:build !windows - -package system - -import ( - "os" - "strconv" - "syscall" - - "golang.org/x/sys/unix" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec - dev uint64 - platformStatT -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// Dev returns a unique identifier for owning filesystem -func (s StatT) Dev() uint64 { - return s.dev -} - -func (s StatT) IsDir() bool { - return (s.mode & unix.S_IFDIR) != 0 -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, &os.PathError{Op: "Stat", Path: path, Err: err} - } - return fromStatT(s) -} - -// Fstat takes an open file descriptor and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file descriptor is invalid -func Fstat(fd int) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Fstat(fd, s); err != nil { - return nil, &os.PathError{Op: "Fstat", Path: strconv.Itoa(fd), Err: err} - } - return fromStatT(s) -} diff --git a/vendor/go.podman.io/storage/pkg/system/stat_windows.go b/vendor/go.podman.io/storage/pkg/system/stat_windows.go deleted file mode 100644 index 828be2088..000000000 --- a/vendor/go.podman.io/storage/pkg/system/stat_windows.go +++ /dev/null @@ -1,74 +0,0 @@ -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like permission, size, etc about a file. -type StatT struct { - mode os.FileMode - size int64 - mtim time.Time - platformStatT -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return os.FileMode(s.mode) -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() time.Time { - return time.Time(s.mtim) -} - -// UID returns file's user id of owner. -// -// on windows this is always 0 because there is no concept of UID -func (s StatT) UID() uint32 { - return 0 -} - -// GID returns file's group id of owner. -// -// on windows this is always 0 because there is no concept of GID -func (s StatT) GID() uint32 { - return 0 -} - -// Dev returns a unique identifier for owning filesystem -func (s StatT) Dev() uint64 { - return 0 -} - -func (s StatT) IsDir() bool { - return s.Mode().IsDir() -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - return fromStatT(&fi) -} - -// fromStatT converts a os.FileInfo type to a system.StatT type -func fromStatT(fi *os.FileInfo) (*StatT, error) { - return &StatT{ - size: (*fi).Size(), - mode: (*fi).Mode(), - mtim: (*fi).ModTime(), - }, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/syscall_unix.go b/vendor/go.podman.io/storage/pkg/system/syscall_unix.go deleted file mode 100644 index d1b41f34d..000000000 --- a/vendor/go.podman.io/storage/pkg/system/syscall_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows - -package system - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return unix.Unmount(dest, 0) -} - -// CommandLineToArgv should not be used on Unix. -// It simply returns commandLine in the only element in the returned array. -func CommandLineToArgv(commandLine string) ([]string, error) { - return []string{commandLine}, nil -} - -// IsEBUSY checks if the specified error is EBUSY. -func IsEBUSY(err error) bool { - return errors.Is(err, unix.EBUSY) -} diff --git a/vendor/go.podman.io/storage/pkg/system/syscall_windows.go b/vendor/go.podman.io/storage/pkg/system/syscall_windows.go deleted file mode 100644 index f4d8692cd..000000000 --- a/vendor/go.podman.io/storage/pkg/system/syscall_windows.go +++ /dev/null @@ -1,127 +0,0 @@ -package system - -import ( - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -// IsWindowsClient returns true if the SKU is client -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// IsIoTCore returns true if the currently running image is based off of -// Windows 10 IoT Core. -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsIoTCore() bool { - var returnedProductType uint32 - r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) - if r1 == 0 { - logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) - return false - } - const productIoTUAP = 0x0000007B - const productIoTUAPCommercial = 0x00000083 - return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} - -// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. -func CommandLineToArgv(commandLine string) ([]string, error) { - var argc int32 - - argsPtr, err := windows.UTF16PtrFromString(commandLine) - if err != nil { - return nil, err - } - - argv, err := windows.CommandLineToArgv(argsPtr, &argc) - if err != nil { - return nil, err - } - defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) - - newArgs := make([]string, argc) - for i, v := range (*argv)[:argc] { - newArgs[i] = string(windows.UTF16ToString((*v)[:])) - } - - return newArgs, nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} - -// IsEBUSY checks if the specified error is EBUSY. -func IsEBUSY(err error) bool { - return false -} diff --git a/vendor/go.podman.io/storage/pkg/system/umask.go b/vendor/go.podman.io/storage/pkg/system/umask.go deleted file mode 100644 index 9b02a1887..000000000 --- a/vendor/go.podman.io/storage/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !windows - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return unix.Umask(newmask), nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/umask_windows.go b/vendor/go.podman.io/storage/pkg/system/umask_windows.go deleted file mode 100644 index c0b69ab1b..000000000 --- a/vendor/go.podman.io/storage/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/go.podman.io/storage/pkg/system/utimes_freebsd.go b/vendor/go.podman.io/storage/pkg/system/utimes_freebsd.go deleted file mode 100644 index edc588a63..000000000 --- a/vendor/go.podman.io/storage/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,25 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD - - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/utimes_linux.go b/vendor/go.podman.io/storage/pkg/system/utimes_linux.go deleted file mode 100644 index edc588a63..000000000 --- a/vendor/go.podman.io/storage/pkg/system/utimes_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD - - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/utimes_unsupported.go b/vendor/go.podman.io/storage/pkg/system/utimes_unsupported.go deleted file mode 100644 index b6c36339d..000000000 --- a/vendor/go.podman.io/storage/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !linux && !freebsd - -package system - -import "syscall" - -// LUtimesNano is only supported on linux and freebsd. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go b/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go deleted file mode 100644 index d574e9e61..000000000 --- a/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go +++ /dev/null @@ -1,84 +0,0 @@ -package system - -import ( - "bytes" - "os" - - "golang.org/x/sys/unix" -) - -const ( - // Value is larger than the maximum size allowed - E2BIG unix.Errno = unix.E2BIG - - // Operation not supported - ENOTSUP unix.Errno = unix.ENOTSUP -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// Returns a []byte slice if the xattr is set and nil otherwise. -func Lgetxattr(path string, attr string) ([]byte, error) { - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Lgetxattr(path, attr, []byte{}) - if errno != nil { - return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} - } - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - } - - switch { - case errno == unix.ENOATTR: - return nil, nil - case errno != nil: - return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} - } - - return dest[:sz], nil -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - if err := unix.Lsetxattr(path, attr, data, flags); err != nil { - return &os.PathError{Op: "lsetxattr", Path: path, Err: err} - } - - return nil -} - -// Llistxattr lists extended attributes associated with the given path -// in the file system. -func Llistxattr(path string) ([]string, error) { - dest := make([]byte, 128) - sz, errno := unix.Llistxattr(path, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Llistxattr(path, []byte{}) - if errno != nil { - return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} - } - - dest = make([]byte, sz) - sz, errno = unix.Llistxattr(path, dest) - } - if errno != nil { - return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} - } - - var attrs []string - for token := range bytes.SplitSeq(dest[:sz], []byte{0}) { - if len(token) > 0 { - attrs = append(attrs, string(token)) - } - } - - return attrs, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go b/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go deleted file mode 100644 index f62f5f745..000000000 --- a/vendor/go.podman.io/storage/pkg/system/xattrs_freebsd.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -import ( - "strings" - - "golang.org/x/sys/unix" -) - -const ( - // Value is larger than the maximum size allowed - E2BIG unix.Errno = unix.E2BIG - - // Operation not supported - ENOTSUP unix.Errno = unix.ENOTSUP - - // Value is too small or too large for maximum size allowed - EOVERFLOW unix.Errno = unix.EOVERFLOW -) - -var namespaceMap = map[string]int{ - "user": EXTATTR_NAMESPACE_USER, - "system": EXTATTR_NAMESPACE_SYSTEM, -} - -func xattrToExtattr(xattr string) (namespace int, extattr string, err error) { - namespaceName, extattr, found := strings.Cut(xattr, ".") - if !found { - return -1, "", ENOTSUP - } - - namespace, ok := namespaceMap[namespaceName] - if !ok { - return -1, "", ENOTSUP - } - return namespace, extattr, nil -} - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// Returns a []byte slice if the xattr is set and nil otherwise. -func Lgetxattr(path string, attr string) ([]byte, error) { - namespace, extattr, err := xattrToExtattr(attr) - if err != nil { - return nil, err - } - return ExtattrGetLink(path, namespace, extattr) -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, value []byte, flags int) error { - if flags != 0 { - // FIXME: Flags are not supported on FreeBSD, but we can implement - // them mimicking the behavior of the Linux implementation. - // See lsetxattr(2) on Linux for more information. - return ENOTSUP - } - - namespace, extattr, err := xattrToExtattr(attr) - if err != nil { - return err - } - return ExtattrSetLink(path, namespace, extattr, value) -} - -// Llistxattr lists extended attributes associated with the given path -// in the file system. -func Llistxattr(path string) ([]string, error) { - attrs := []string{} - - for namespaceName, namespace := range namespaceMap { - namespaceAttrs, err := ExtattrListLink(path, namespace) - if err != nil { - return nil, err - } - - for _, attr := range namespaceAttrs { - attrs = append(attrs, namespaceName+"."+attr) - } - } - - return attrs, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go b/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go deleted file mode 100644 index 3322707a4..000000000 --- a/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -package system - -import ( - "bytes" - "os" - - "golang.org/x/sys/unix" -) - -const ( - // Value is larger than the maximum size allowed - E2BIG unix.Errno = unix.E2BIG - - // Operation not supported - ENOTSUP unix.Errno = unix.ENOTSUP - - // Value is too small or too large for maximum size allowed - EOVERFLOW unix.Errno = unix.EOVERFLOW -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// Returns a []byte slice if the xattr is set and nil otherwise. -func Lgetxattr(path string, attr string) ([]byte, error) { - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Lgetxattr(path, attr, []byte{}) - if errno != nil { - return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} - } - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - } - - switch { - case errno == unix.ENODATA: - return nil, nil - case errno != nil: - return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} - } - - return dest[:sz], nil -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - if err := unix.Lsetxattr(path, attr, data, flags); err != nil { - return &os.PathError{Op: "lsetxattr", Path: path, Err: err} - } - - return nil -} - -// Llistxattr lists extended attributes associated with the given path -// in the file system. -func Llistxattr(path string) ([]string, error) { - dest := make([]byte, 128) - sz, errno := unix.Llistxattr(path, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Llistxattr(path, []byte{}) - if errno != nil { - return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} - } - - dest = make([]byte, sz) - sz, errno = unix.Llistxattr(path, dest) - } - if errno != nil { - return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} - } - - var attrs []string - for token := range bytes.SplitSeq(dest[:sz], []byte{0}) { - if len(token) > 0 { - attrs = append(attrs, string(token)) - } - } - - return attrs, nil -} diff --git a/vendor/go.podman.io/storage/pkg/system/xattrs_unsupported.go b/vendor/go.podman.io/storage/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 66bf5858f..000000000 --- a/vendor/go.podman.io/storage/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !linux && !darwin && !freebsd - -package system - -import "syscall" - -const ( - // Value is larger than the maximum size allowed - E2BIG syscall.Errno = syscall.Errno(0) - - // Operation not supported - ENOTSUP syscall.Errno = syscall.Errno(0) - - // Value is too small or too large for maximum size allowed - EOVERFLOW syscall.Errno = syscall.Errno(0) -) - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} - -// Llistxattr is not supported on platforms other than linux. -func Llistxattr(path string) ([]string, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_cgo.go deleted file mode 100644 index 14aaeddcf..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_cgo.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build linux && cgo - -package unshare - -import ( - "unsafe" -) - -/* -#cgo remoteclient CFLAGS: -Wall -Werror -#include -*/ -import "C" - -func getenv(name string) string { - cName := C.CString(name) - defer C.free(unsafe.Pointer(cName)) - - value := C.GoString(C.getenv(cName)) - - return value -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_nocgo.go deleted file mode 100644 index f970935b5..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/getenv_linux_nocgo.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build linux && !cgo - -package unshare - -import ( - "os" -) - -func getenv(name string) string { - return os.Getenv(name) -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare.c b/vendor/go.podman.io/storage/pkg/unshare/unshare.c deleted file mode 100644 index a2800654f..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare.c +++ /dev/null @@ -1,379 +0,0 @@ -#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__) - -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Open Source projects like conda-forge, want to package podman and are based - off of centos:6, Conda-force has minimal libc requirements and is lacking - the memfd.h file, so we use mmam.h -*/ -#ifndef MFD_ALLOW_SEALING -#define MFD_ALLOW_SEALING 2U -#endif -#ifndef MFD_CLOEXEC -#define MFD_CLOEXEC 1U -#endif - -#ifndef F_LINUX_SPECIFIC_BASE -#define F_LINUX_SPECIFIC_BASE 1024 -#endif -#ifndef F_ADD_SEALS -#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9) -#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10) -#endif -#ifndef F_SEAL_SEAL -#define F_SEAL_SEAL 0x0001LU -#endif -#ifndef F_SEAL_SHRINK -#define F_SEAL_SHRINK 0x0002LU -#endif -#ifndef F_SEAL_GROW -#define F_SEAL_GROW 0x0004LU -#endif -#ifndef F_SEAL_WRITE -#define F_SEAL_WRITE 0x0008LU -#endif - -#define BUFSTEP 1024 - -static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces"; -static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone"; - -static int _containers_unshare_parse_envint(const char *envname) { - char *p, *q; - long l; - - p = getenv(envname); - if (p == NULL) { - return -1; - } - q = NULL; - l = strtol(p, &q, 10); - if ((q == NULL) || (*q != '\0')) { - fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); - _exit(1); - } - unsetenv(envname); - return l; -} - -static void _check_proc_sys_file(const char *path) -{ - FILE *fp; - char buf[32]; - size_t n_read; - long r; - - fp = fopen(path, "r"); - if (fp == NULL) { - if (errno != ENOENT) - fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces); - } else { - memset(buf, 0, sizeof(buf)); - n_read = fread(buf, 1, sizeof(buf) - 1, fp); - if (n_read > 0) { - r = atoi(buf); - if (r == 0) { - fprintf(stderr, "User namespaces are not enabled in %s.\n", path); - } - } else { - fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path); - } - fclose(fp); - } -} - -static char **parse_proc_stringlist(const char *list) { - int fd, n, i, n_strings; - char *buf, *new_buf, **ret; - size_t size, new_size, used; - - fd = open(list, O_RDONLY); - if (fd == -1) { - return NULL; - } - buf = NULL; - size = 0; - used = 0; - for (;;) { - new_size = used + BUFSTEP; - new_buf = realloc(buf, new_size); - if (new_buf == NULL) { - free(buf); - fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP)); - return NULL; - } - buf = new_buf; - size = new_size; - memset(buf + used, '\0', size - used); - n = read(fd, buf + used, size - used - 1); - if (n < 0) { - fprintf(stderr, "read(): %m\n"); - return NULL; - } - if (n == 0) { - break; - } - used += n; - } - close(fd); - n_strings = 0; - for (n = 0; n < used; n++) { - if ((n == 0) || (buf[n-1] == '\0')) { - n_strings++; - } - } - ret = calloc(n_strings + 1, sizeof(char *)); - if (ret == NULL) { - fprintf(stderr, "calloc(): out of memory\n"); - return NULL; - } - i = 0; - for (n = 0; n < used; n++) { - if ((n == 0) || (buf[n-1] == '\0')) { - ret[i++] = &buf[n]; - } - } - ret[i] = NULL; - return ret; -} - -/* - * Taken from the runc cloned_binary.c file - * Copyright (C) 2019 Aleksa Sarai - * Copyright (C) 2019 SUSE LLC - * - * This work is dual licensed under the following licenses. You may use, - * redistribute, and/or modify the work under the conditions of either (or - * both) licenses. - * - * === Apache-2.0 === - */ -static int try_bindfd(void) -{ - int fd, ret = -1; - char src[PATH_MAX] = {0}; - char template[64] = {0}; - - strncpy(template, "/tmp/containers.XXXXXX", sizeof(template) - 1); - - /* - * We need somewhere to mount it, mounting anything over /proc/self is a - * BAD idea on the host -- even if we do it temporarily. - */ - fd = mkstemp(template); - if (fd < 0) - return ret; - close(fd); - - ret = -EPERM; - - if (readlink("/proc/self/exe", src, sizeof (src) - 1) < 0) - goto out; - - if (mount(src, template, NULL, MS_BIND, NULL) < 0) - goto out; - if (mount(NULL, template, NULL, MS_REMOUNT | MS_BIND | MS_RDONLY, NULL) < 0) - goto out_umount; - - /* Get read-only handle that we're sure can't be made read-write. */ - ret = open(template, O_PATH | O_CLOEXEC); - -out_umount: - /* - * Make sure the MNT_DETACH works, otherwise we could get remounted - * read-write and that would be quite bad (the fd would be made read-write - * too, invalidating the protection). - */ - if (umount2(template, MNT_DETACH) < 0) { - if (ret >= 0) - close(ret); - ret = -ENOTRECOVERABLE; - } - -out: - /* - * We don't care about unlink errors, the worst that happens is that - * there's an empty file left around in STATEDIR. - */ - unlink(template); - return ret; -} - -static int copy_self_proc_exe(char **argv) { - char *exename; - int fd, mmfd, n_read, n_written; - struct stat st; - char buf[2048]; - - fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC); - if (fd == -1) { - fprintf(stderr, "open(\"/proc/self/exe\"): %m\n"); - return -1; - } - if (fstat(fd, &st) == -1) { - fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n"); - close(fd); - return -1; - } - exename = basename(argv[0]); - mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC); - if (mmfd == -1) { - fprintf(stderr, "memfd_create(): %m\n"); - goto close_fd; - } - for (;;) { - n_read = read(fd, buf, sizeof(buf)); - if (n_read < 0) { - fprintf(stderr, "read(\"/proc/self/exe\"): %m\n"); - return -1; - } - if (n_read == 0) { - break; - } - n_written = write(mmfd, buf, n_read); - if (n_written < 0) { - fprintf(stderr, "write(anonfd): %m\n"); - goto close_fd; - } - if (n_written != n_read) { - fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read); - goto close_fd; - } - } - close(fd); - if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) { - fprintf(stderr, "Close_Fd sealing memfd copy: %m\n"); - goto close_mmfd; - } - - return mmfd; - -close_fd: - close(fd); -close_mmfd: - close(mmfd); - return -1; -} -static int containers_reexec(int flags) { - char **argv; - int fd = -1; - - argv = parse_proc_stringlist("/proc/self/cmdline"); - if (argv == NULL) { - return -1; - } - - if (flags & CLONE_NEWNS) - fd = try_bindfd(); - if (fd < 0) - fd = copy_self_proc_exe(argv); - if (fd < 0) - return fd; - - if (fexecve(fd, argv, environ) == -1) { - close(fd); - fprintf(stderr, "Error during reexec(...): %m\n"); - return -1; - } - close(fd); - return 0; -} - -void _containers_unshare(void) -{ - int flags, pidfd, continuefd, n, pgrp, sid, ctty; - char buf[2048]; - - flags = _containers_unshare_parse_envint("_Containers-unshare"); - if (flags == -1) { - return; - } - if ((flags & CLONE_NEWUSER) != 0) { - if (unshare(CLONE_NEWUSER) == -1) { - fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n"); - _check_proc_sys_file (_max_user_namespaces); - _check_proc_sys_file (_unprivileged_user_namespaces); - _exit(1); - } - } - pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); - if (pidfd != -1) { - snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); - size_t size = write(pidfd, buf, strlen(buf)); - if (size != strlen(buf)) { - fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); - _exit(1); - } - close(pidfd); - } - continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); - if (continuefd != -1) { - n = read(continuefd, buf, sizeof(buf)); - if (n > 0) { - fprintf(stderr, "Error: %.*s\n", n, buf); - _exit(1); - } - close(continuefd); - } - sid = _containers_unshare_parse_envint("_Containers-setsid"); - if (sid == 1) { - if (setsid() == -1) { - fprintf(stderr, "Error during setsid: %m\n"); - _exit(1); - } - } - pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); - if (pgrp == 1) { - if (setpgrp() == -1) { - fprintf(stderr, "Error during setpgrp: %m\n"); - _exit(1); - } - } - ctty = _containers_unshare_parse_envint("_Containers-ctty"); - if (ctty != -1) { - if (ioctl(ctty, TIOCSCTTY, 0) == -1) { - fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); - _exit(1); - } - } - if ((flags & CLONE_NEWUSER) != 0) { - if (setresgid(0, 0, 0) != 0) { - fprintf(stderr, "Error during setresgid(0): %m\n"); - _exit(1); - } - if (setresuid(0, 0, 0) != 0) { - fprintf(stderr, "Error during setresuid(0): %m\n"); - _exit(1); - } - } - if ((flags & ~CLONE_NEWUSER) != 0) { - if (unshare(flags & ~CLONE_NEWUSER) == -1) { - fprintf(stderr, "Error during unshare(...): %m\n"); - _exit(1); - } - } - if (containers_reexec(flags) != 0) { - _exit(1); - } - return; -} - -#endif // !UNSHARE_NO_CODE_AT_ALL diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare.go b/vendor/go.podman.io/storage/pkg/unshare/unshare.go deleted file mode 100644 index 00f397f35..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare.go +++ /dev/null @@ -1,32 +0,0 @@ -package unshare - -import ( - "fmt" - "os" - "os/user" - "sync" -) - -var ( - homeDirOnce sync.Once - homeDirErr error - homeDir string -) - -// HomeDir returns the home directory for the current user. -func HomeDir() (string, error) { - homeDirOnce.Do(func() { - home := os.Getenv("HOME") - if home == "" { - usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) - if err != nil { - homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err) - return - } - homeDir, homeDirErr = usr.HomeDir, nil - return - } - homeDir, homeDirErr = home, nil - }) - return homeDir, homeDirErr -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare_cgo.go b/vendor/go.podman.io/storage/pkg/unshare/unshare_cgo.go deleted file mode 100644 index f575fba2e..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build (linux && cgo && !gccgo) || (freebsd && cgo) - -package unshare - -// #cgo CFLAGS: -Wall -// extern void _containers_unshare(void); -// static void __attribute__((constructor)) init(void) { -// _containers_unshare(); -// } -import "C" diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare_darwin.go b/vendor/go.podman.io/storage/pkg/unshare/unshare_darwin.go deleted file mode 100644 index a9daf714c..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare_darwin.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build darwin - -package unshare - -import ( - "os" - - "github.com/opencontainers/runtime-spec/specs-go" - "go.podman.io/storage/pkg/idtools" -) - -const ( - // UsernsEnvName is the environment variable, if set indicates in rootless mode - UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" -) - -// IsRootless tells us if we are running in rootless mode -func IsRootless() bool { - return true -} - -// GetRootlessUID returns the UID of the user in the parent userNS -func GetRootlessUID() int { - return os.Getuid() -} - -// GetRootlessGID returns the GID of the user in the parent userNS -func GetRootlessGID() int { - return os.Getgid() -} - -// RootlessEnv returns the environment settings for the rootless containers -func RootlessEnv() []string { - return append(os.Environ(), UsernsEnvName+"=") -} - -// MaybeReexecUsingUserNamespace re-exec the process in a new namespace -func MaybeReexecUsingUserNamespace(evenForRoot bool) { -} - -// GetHostIDMappings reads mappings for the specified process (or the current -// process if pid is "self" or an empty string) from the kernel. -func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - return nil, nil, nil -} - -// ParseIDMappings parses mapping triples. -func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") - if err != nil { - return nil, nil, err - } - gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") - if err != nil { - return nil, nil, err - } - return uid, gid, nil -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.c b/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.c deleted file mode 100644 index 0b2f17886..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.c +++ /dev/null @@ -1,76 +0,0 @@ -#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__) - - -#include -#include -#include -#include -#include -#include - -static int _containers_unshare_parse_envint(const char *envname) { - char *p, *q; - long l; - - p = getenv(envname); - if (p == NULL) { - return -1; - } - q = NULL; - l = strtol(p, &q, 10); - if ((q == NULL) || (*q != '\0')) { - fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); - _exit(1); - } - unsetenv(envname); - return l; -} - -void _containers_unshare(void) -{ - int pidfd, continuefd, n, pgrp, sid, ctty; - char buf[2048]; - - pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); - if (pidfd != -1) { - snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); - size_t size = write(pidfd, buf, strlen(buf)); - if (size != strlen(buf)) { - fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); - _exit(1); - } - close(pidfd); - } - continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); - if (continuefd != -1) { - n = read(continuefd, buf, sizeof(buf)); - if (n > 0) { - fprintf(stderr, "Error: %.*s\n", n, buf); - _exit(1); - } - close(continuefd); - } - sid = _containers_unshare_parse_envint("_Containers-setsid"); - if (sid == 1) { - if (setsid() == -1) { - fprintf(stderr, "Error during setsid: %m\n"); - _exit(1); - } - } - pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); - if (pgrp == 1) { - if (setpgrp(0, 0) == -1) { - fprintf(stderr, "Error during setpgrp: %m\n"); - _exit(1); - } - } - ctty = _containers_unshare_parse_envint("_Containers-ctty"); - if (ctty != -1) { - if (ioctl(ctty, TIOCSCTTY, 0) == -1) { - fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); - _exit(1); - } - } -} - -#endif diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.go b/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.go deleted file mode 100644 index 2b81f896b..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare_freebsd.go +++ /dev/null @@ -1,178 +0,0 @@ -//go:build freebsd - -package unshare - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "os/exec" - "runtime" - "strconv" - "syscall" - - "github.com/sirupsen/logrus" - "go.podman.io/storage/pkg/reexec" -) - -// Cmd wraps an exec.Cmd created by the reexec package in unshare(), -// and one day might handle setting ID maps and other related setting*s -// by triggering initialization code in the child. -type Cmd struct { - *exec.Cmd - Setsid bool - Setpgrp bool - Ctty *os.File - Hook func(pid int) error -} - -// Command creates a new Cmd which can be customized. -func Command(args ...string) *Cmd { - cmd := reexec.Command(args...) - return &Cmd{ - Cmd: cmd, - } -} - -func (c *Cmd) Start() error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // Set environment variables to tell the child to synchronize its startup. - if c.Env == nil { - c.Env = os.Environ() - } - - // Create the pipe for reading the child's PID. - pidRead, pidWrite, err := os.Pipe() - if err != nil { - return fmt.Errorf("creating pid pipe: %w", err) - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, pidWrite) - - // Create the pipe for letting the child know to proceed. - continueRead, continueWrite, err := os.Pipe() - if err != nil { - pidRead.Close() - pidWrite.Close() - return fmt.Errorf("creating continue read/write pipe: %w", err) - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, continueRead) - - // Pass along other instructions. - if c.Setsid { - c.Env = append(c.Env, "_Containers-setsid=1") - } - if c.Setpgrp { - c.Env = append(c.Env, "_Containers-setpgrp=1") - } - if c.Ctty != nil { - c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, c.Ctty) - } - - // Make sure we clean up our pipes. - defer func() { - if pidRead != nil { - pidRead.Close() - } - if pidWrite != nil { - pidWrite.Close() - } - if continueRead != nil { - continueRead.Close() - } - if continueWrite != nil { - continueWrite.Close() - } - }() - - // Start the new process. - err = c.Cmd.Start() - if err != nil { - return err - } - - // Close the ends of the pipes that the parent doesn't need. - continueRead.Close() - continueRead = nil - pidWrite.Close() - pidWrite = nil - - // Read the child's PID from the pipe. - pidString := "" - b := new(bytes.Buffer) - if _, err := io.Copy(b, pidRead); err != nil { - return fmt.Errorf("reading child PID: %w", err) - } - pidString = b.String() - pid, err := strconv.Atoi(pidString) - if err != nil { - fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) - return fmt.Errorf("parsing PID %q: %w", pidString, err) - } - - // Run any additional setup that we want to do before the child starts running proper. - if c.Hook != nil { - if err = c.Hook(pid); err != nil { - fmt.Fprintf(continueWrite, "hook error: %v", err) - return err - } - } - - return nil -} - -func (c *Cmd) Run() error { - if err := c.Start(); err != nil { - return err - } - return c.Wait() -} - -func (c *Cmd) CombinedOutput() ([]byte, error) { - return nil, errors.New("unshare: CombinedOutput() not implemented") -} - -func (c *Cmd) Output() ([]byte, error) { - return nil, errors.New("unshare: Output() not implemented") -} - -type Runnable interface { - Run() error -} - -// ExecRunnable runs the specified unshare command, captures its exit status, -// and exits with the same status. -func ExecRunnable(cmd Runnable, cleanup func()) { - exit := func(status int) { - if cleanup != nil { - cleanup() - } - os.Exit(status) - } - if err := cmd.Run(); err != nil { - if exitError, ok := err.(*exec.ExitError); ok { - if exitError.ProcessState.Exited() { - if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { - if waitStatus.Exited() { - logrus.Debugf("%v", exitError) - exit(waitStatus.ExitStatus()) - } - if waitStatus.Signaled() { - logrus.Debugf("%v", exitError) - exit(int(waitStatus.Signal()) + 128) - } - } - } - } - logrus.Errorf("%v", err) - logrus.Errorf("(Unable to determine exit status)") - exit(1) - } - exit(0) -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare_gccgo.go b/vendor/go.podman.io/storage/pkg/unshare/unshare_gccgo.go deleted file mode 100644 index 818983474..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare_gccgo.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build linux && cgo && gccgo - -package unshare - -// #cgo CFLAGS: -Wall -Wextra -// extern void _containers_unshare(void); -// static void __attribute__((constructor)) init(void) { -// _containers_unshare(); -// } -import "C" - -// This next bit is straight out of libcontainer. - -// AlwaysFalse is here to stay false -// (and be exported so the compiler doesn't optimize out its reference) -var AlwaysFalse bool - -func init() { - if AlwaysFalse { - // by referencing this C init() in a noop test, it will ensure the compiler - // links in the C function. - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 - C.init() - } -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare_linux.go b/vendor/go.podman.io/storage/pkg/unshare/unshare_linux.go deleted file mode 100644 index 7cb069c78..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare_linux.go +++ /dev/null @@ -1,755 +0,0 @@ -//go:build linux - -package unshare - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "os/exec" - "os/signal" - "os/user" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - - "github.com/moby/sys/capability" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" - "go.podman.io/storage/pkg/idtools" - "go.podman.io/storage/pkg/reexec" -) - -// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and -// handles setting ID maps and other related settings by triggering -// initialization code in the child. -type Cmd struct { - *exec.Cmd - UnshareFlags int - UseNewuidmap bool - UidMappings []specs.LinuxIDMapping //nolint: revive - UseNewgidmap bool - GidMappings []specs.LinuxIDMapping //nolint: revive - GidMappingsEnableSetgroups bool - Setsid bool - Setpgrp bool - Ctty *os.File - OOMScoreAdj *int - Hook func(pid int) error -} - -// Command creates a new Cmd which can be customized. -func Command(args ...string) *Cmd { - cmd := reexec.Command(args...) - return &Cmd{ - Cmd: cmd, - } -} - -func getRootlessUID() int { - uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Geteuid() -} - -func getRootlessGID() int { - gidEnv := getenv("_CONTAINERS_ROOTLESS_GID") - if gidEnv != "" { - u, _ := strconv.Atoi(gidEnv) - return u - } - - /* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */ - uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Getegid() -} - -// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the -// matching file capability -func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { - info, err := os.Stat(path) - if err != nil { - return false, err - } - - mode := info.Mode() - if mode&modeid == modeid { - return true, nil - } - cap, err := capability.NewFile2(path) - if err != nil { - return false, err - } - if err := cap.Load(); err != nil { - return false, err - } - return cap.Get(capability.EFFECTIVE, capid), nil -} - -func (c *Cmd) Start() (retErr error) { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // Set an environment variable to tell the child to synchronize its startup. - if c.Env == nil { - c.Env = os.Environ() - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags)) - - // Please the libpod "rootless" package to find the expected env variables. - if IsRootless() { - c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done") - c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", getRootlessUID())) - c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", getRootlessGID())) - } - - // Create the pipe for reading the child's PID. - pidRead, pidWrite, err := os.Pipe() - if err != nil { - return fmt.Errorf("creating pid pipe: %w", err) - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, pidWrite) - - // Create the pipe for letting the child know to proceed. - continueRead, continueWrite, err := os.Pipe() - if err != nil { - pidRead.Close() - pidWrite.Close() - return fmt.Errorf("creating continue read/write pipe: %w", err) - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, continueRead) - - // Pass along other instructions. - if c.Setsid { - c.Env = append(c.Env, "_Containers-setsid=1") - } - if c.Setpgrp { - c.Env = append(c.Env, "_Containers-setpgrp=1") - } - if c.Ctty != nil { - c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, c.Ctty) - } - - // Make sure we clean up our pipes. - defer func() { - if pidRead != nil { - pidRead.Close() - } - if pidWrite != nil { - pidWrite.Close() - } - if continueRead != nil { - continueRead.Close() - } - if continueWrite != nil { - continueWrite.Close() - } - }() - - // Start the new process. - err = c.Cmd.Start() - if err != nil { - return err - } - - // If the function fails from here, we need to make sure the - // child process is killed and properly cleaned up. - defer func() { - if retErr != nil { - _ = c.Cmd.Process.Kill() - _ = c.Cmd.Wait() - } - }() - - // Close the ends of the pipes that the parent doesn't need. - continueRead.Close() - continueRead = nil - pidWrite.Close() - pidWrite = nil - - // Read the child's PID from the pipe. - b := new(bytes.Buffer) - if _, err := io.Copy(b, pidRead); err != nil { - return fmt.Errorf("reading child PID: %w", err) - } - pidString := b.String() - pid, err := strconv.Atoi(pidString) - if err != nil { - fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) - return fmt.Errorf("parsing PID %q: %w", pidString, err) - } - pidString = fmt.Sprintf("%d", pid) - - // If we created a new user namespace, set any specified mappings. - if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 { - // Always set "setgroups". - setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) - return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) - } - defer setgroups.Close() - if c.GidMappingsEnableSetgroups { - if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { - fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) - return fmt.Errorf("opening \"allow\" to /proc/%s/setgroups: %w", pidString, err) - } - } else { - if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { - fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) - return fmt.Errorf("writing \"deny\" to /proc/%s/setgroups: %w", pidString, err) - } - } - - if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { - uidmap, gidmap, err := GetHostIDMappings("") - if err != nil { - fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) - return fmt.Errorf("reading ID mappings in parent: %w", err) - } - if len(c.UidMappings) == 0 { - c.UidMappings = uidmap - for i := range c.UidMappings { - c.UidMappings[i].HostID = c.UidMappings[i].ContainerID - } - } - if len(c.GidMappings) == 0 { - c.GidMappings = gidmap - for i := range c.GidMappings { - c.GidMappings[i].HostID = c.GidMappings[i].ContainerID - } - } - } - - if len(c.GidMappings) > 0 { - // Build the GID map, since writing to the proc file has to be done all at once. - g := new(bytes.Buffer) - for _, m := range c.GidMappings { - fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) - } - gidmapSet := false - // Set the GID map. - if c.UseNewgidmap { - path, err := exec.LookPath("newgidmap") - if err != nil { - return fmt.Errorf("finding newgidmap: %w", err) - } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(g.String())...)...) - g.Reset() - cmd.Stdout = g - cmd.Stderr = g - if err := cmd.Run(); err == nil { - gidmapSet = true - } else { - logrus.Warnf("running newgidmap: %v: %s", err, g.String()) - isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) - if err != nil { - logrus.Warnf("Failed to check for setgid on %s: %v", path, err) - } else { - if !isSetgid { - logrus.Warnf("%s should be setgid or have filecaps setgid", path) - } - } - logrus.Warnf("Falling back to single mapping") - g.Reset() - fmt.Fprintf(g, "0 %d 1\n", os.Getegid()) - } - } - if !gidmapSet { - if c.UseNewgidmap { - setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) - return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) - } - defer setgroups.Close() - if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { - fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) - return fmt.Errorf("writing 'deny' to /proc/%s/setgroups: %w", pidString, err) - } - } - gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "opening /proc/%s/gid_map: %v", pidString, err) - return fmt.Errorf("opening /proc/%s/gid_map: %w", pidString, err) - } - defer gidmap.Close() - if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { - fmt.Fprintf(continueWrite, "writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) - return fmt.Errorf("writing %q to /proc/%s/gid_map: %w", g.String(), pidString, err) - } - } - } - - if len(c.UidMappings) > 0 { - // Build the UID map, since writing to the proc file has to be done all at once. - u := new(bytes.Buffer) - for _, m := range c.UidMappings { - fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) - } - uidmapSet := false - // Set the UID map. - if c.UseNewuidmap { - path, err := exec.LookPath("newuidmap") - if err != nil { - return fmt.Errorf("finding newuidmap: %w", err) - } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(u.String())...)...) - u.Reset() - cmd.Stdout = u - cmd.Stderr = u - if err := cmd.Run(); err == nil { - uidmapSet = true - } else { - logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) - isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) - if err != nil { - logrus.Warnf("Failed to check for setuid on %s: %v", path, err) - } else { - if !isSetuid { - logrus.Warnf("%s should be setuid or have filecaps setuid", path) - } - } - - logrus.Warnf("Falling back to single mapping") - u.Reset() - fmt.Fprintf(u, "0 %d 1\n", os.Geteuid()) - } - } - if !uidmapSet { - uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) - return fmt.Errorf("opening /proc/%s/uid_map: %w", pidString, err) - } - defer uidmap.Close() - if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { - fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err) - return fmt.Errorf("writing %q to /proc/%s/uid_map: %w", u.String(), pidString, err) - } - } - } - } - - if c.OOMScoreAdj != nil { - oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) - return fmt.Errorf("opening /proc/%s/oom_score_adj: %w", pidString, err) - } - defer oomScoreAdj.Close() - if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { - fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) - return fmt.Errorf("writing \"%d\" to /proc/%s/oom_score_adj: %w", c.OOMScoreAdj, pidString, err) - } - } - // Run any additional setup that we want to do before the child starts running proper. - if c.Hook != nil { - if err = c.Hook(pid); err != nil { - fmt.Fprintf(continueWrite, "hook error: %v", err) - return err - } - } - - return nil -} - -func (c *Cmd) Run() error { - if err := c.Start(); err != nil { - return err - } - return c.Wait() -} - -func (c *Cmd) CombinedOutput() ([]byte, error) { - return nil, errors.New("unshare: CombinedOutput() not implemented") -} - -func (c *Cmd) Output() ([]byte, error) { - return nil, errors.New("unshare: Output() not implemented") -} - -var ( - isRootlessOnce sync.Once - isRootless bool -) - -const ( - // UsernsEnvName is the environment variable, if set indicates in rootless mode - UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" -) - -// hasFullUsersMappings checks whether the current user namespace has all the IDs mapped. -func hasFullUsersMappings() (bool, error) { - content, err := os.ReadFile("/proc/self/uid_map") - if err != nil { - return false, err - } - // The kernel rejects attempts to create mappings where either starting - // point is (u32)-1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c?id=af3e9579ecfb#n1006 . - // So, if the uid_map contains 4294967295, the entire IDs space is available in the - // user namespace, so it is likely the initial user namespace. - return bytes.Contains(content, []byte("4294967295")), nil -} - -var ( - hasCapSysAdminOnce sync.Once - hasCapSysAdminRet bool - hasCapSysAdminErr error -) - -// IsRootless tells us if we are running in rootless mode -func IsRootless() bool { - isRootlessOnce.Do(func() { - isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != "" - if !isRootless { - hasCapSysAdmin, err := HasCapSysAdmin() - if err != nil { - logrus.Warnf("Failed to read CAP_SYS_ADMIN presence for the current process") - } - if err == nil && !hasCapSysAdmin { - isRootless = true - } - } - if !isRootless { - hasMappings, err := hasFullUsersMappings() - if err != nil { - logrus.Warnf("Failed to read current user namespace mappings") - } - if err == nil && !hasMappings { - isRootless = true - } - } - }) - return isRootless -} - -// GetRootlessUID returns the UID of the user in the parent userNS -func GetRootlessUID() int { - uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Getuid() -} - -// GetRootlessGID returns the GID of the user in the parent userNS -func GetRootlessGID() int { - gidEnv := getenv("_CONTAINERS_ROOTLESS_GID") - if gidEnv != "" { - u, _ := strconv.Atoi(gidEnv) - return u - } - return os.Getgid() -} - -// RootlessEnv returns the environment settings for the rootless containers -func RootlessEnv() []string { - return append(os.Environ(), UsernsEnvName+"=done") -} - -type Runnable interface { - Run() error -} - -func bailOnError(err error, format string, a ...any) { //nolint:revive,goprintffuncname - if err != nil { - if format != "" { - logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) - } else { - logrus.Errorf("%v", err) - } - os.Exit(1) - } -} - -// MaybeReexecUsingUserNamespace re-exec the process in a new namespace -func MaybeReexecUsingUserNamespace(evenForRoot bool) { - // If we've already been through this once, no need to try again. - if os.Geteuid() == 0 && GetRootlessUID() > 0 { - return - } - - var uidNum, gidNum uint64 - // Figure out who we are. - me, err := user.Current() - if !os.IsNotExist(err) { - bailOnError(err, "error determining current user") - uidNum, err = strconv.ParseUint(me.Uid, 10, 32) - bailOnError(err, "error parsing current UID %s", me.Uid) - gidNum, err = strconv.ParseUint(me.Gid, 10, 32) - bailOnError(err, "error parsing current GID %s", me.Gid) - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // ID mappings to use to reexec ourselves. - var uidmap, gidmap []specs.LinuxIDMapping - if uidNum != 0 || evenForRoot { - // Read the set of ID mappings that we're allowed to use. Each - // range in /etc/subuid and /etc/subgid file is a starting host - // ID and a range size. - uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username) - if err != nil { - logrus.Warnf("Reading allowed ID mappings: %v", err) - } - if len(uidmap) == 0 { - logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) - } - if len(gidmap) == 0 { - logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username) - } - // Map our UID and GID, then the subuid and subgid ranges, - // consecutively, starting at 0, to get the mappings to use for - // a copy of ourselves. - uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...) - gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...) - var rangeStart uint32 - for i := range uidmap { - uidmap[i].ContainerID = rangeStart - rangeStart += uidmap[i].Size - } - rangeStart = 0 - for i := range gidmap { - gidmap[i].ContainerID = rangeStart - rangeStart += gidmap[i].Size - } - } else { - // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able - // to use unshare(), so don't bother creating a new user namespace at this point. - capabilities, err := capability.NewPid2(0) - bailOnError(err, "Initializing a new Capabilities object of pid 0") - err = capabilities.Load() - bailOnError(err, "Reading the current capabilities sets") - - if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) { - return - } - // Read the set of ID mappings that we're currently using. - uidmap, gidmap, err = GetHostIDMappings("") - bailOnError(err, "Reading current ID mappings") - // Just reuse them. - for i := range uidmap { - uidmap[i].HostID = uidmap[i].ContainerID - } - for i := range gidmap { - gidmap[i].HostID = gidmap[i].ContainerID - } - } - - // Unlike most uses of reexec or unshare, we're using a name that - // _won't_ be recognized as a registered reexec handler, since we - // _want_ to fall through reexec.Init() to the normal main(). - cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...) - - // If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again. - err = os.Setenv(UsernsEnvName, "1") - bailOnError(err, "error setting %s=1 in environment", UsernsEnvName) - - // Set the default isolation type to use the "rootless" method. - if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present { - if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { - if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { - logrus.Errorf("Setting BUILDAH_ISOLATION=rootless in environment: %v", err) - os.Exit(1) - } - } - } - - // Reuse our stdio. - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - // Set up a new user namespace with the ID mapping. - cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS - cmd.UseNewuidmap = uidNum != 0 - cmd.UidMappings = uidmap - cmd.UseNewgidmap = uidNum != 0 - cmd.GidMappings = gidmap - cmd.GidMappingsEnableSetgroups = true - - // Finish up. - logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) - - // Forward SIGHUP, SIGINT, and SIGTERM to our child process. - interrupted := make(chan os.Signal, 100) - defer func() { - signal.Stop(interrupted) - close(interrupted) - }() - cmd.Hook = func(int) error { - go func() { - for receivedSignal := range interrupted { - if err := cmd.Cmd.Process.Signal(receivedSignal); err != nil { - logrus.Warnf( - "Failed to send a signal '%d' to the Process (PID: %d): %v", - receivedSignal, cmd.Cmd.Process.Pid, err, - ) - } - } - }() - return nil - } - signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) - - // Make sure our child process gets SIGKILLed if we exit, for whatever - // reason, before it does. - if cmd.Cmd.SysProcAttr == nil { - cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL - - ExecRunnable(cmd, nil) -} - -// ExecRunnable runs the specified unshare command, captures its exit status, -// and exits with the same status. -func ExecRunnable(cmd Runnable, cleanup func()) { - exit := func(status int) { - if cleanup != nil { - cleanup() - } - os.Exit(status) - } - if err := cmd.Run(); err != nil { - if exitError, ok := err.(*exec.ExitError); ok { - if exitError.ProcessState.Exited() { - if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { - if waitStatus.Exited() { - logrus.Debugf("%v", exitError) - exit(waitStatus.ExitStatus()) - } - if waitStatus.Signaled() { - logrus.Debugf("%v", exitError) - exit(int(waitStatus.Signal()) + 128) - } - } - } - } - logrus.Errorf("%v", err) - logrus.Errorf("(Unable to determine exit status)") - exit(1) - } - exit(0) -} - -// getHostIDMappings reads mappings from the named node under /proc. -func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { - var mappings []specs.LinuxIDMapping - f, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err) - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - fields := strings.Fields(line) - if len(fields) != 3 { - return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) - } - cid, err := strconv.ParseUint(fields[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err) - } - hid, err := strconv.ParseUint(fields[1], 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err) - } - size, err := strconv.ParseUint(fields[2], 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err) - } - mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) - } - return mappings, nil -} - -// GetHostIDMappings reads mappings for the specified process (or the current -// process if pid is "self" or an empty string) from the kernel. -func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - if pid == "" { - pid = "self" - } - uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) - if err != nil { - return nil, nil, err - } - gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) - if err != nil { - return nil, nil, err - } - return uidmap, gidmap, nil -} - -// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. -func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - mappings, err := idtools.NewIDMappings(user, group) - if err != nil { - return nil, nil, fmt.Errorf("reading subuid mappings for user %q and subgid mappings for group %q: %w", user, group, err) - } - var uidmap, gidmap []specs.LinuxIDMapping - for _, m := range mappings.UIDs() { - uidmap = append(uidmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - for _, m := range mappings.GIDs() { - gidmap = append(gidmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - return uidmap, gidmap, nil -} - -// ParseIDMappings parses mapping triples. -func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") - if err != nil { - return nil, nil, err - } - gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") - if err != nil { - return nil, nil, err - } - return uid, gid, nil -} - -// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. -func HasCapSysAdmin() (bool, error) { - hasCapSysAdminOnce.Do(func() { - currentCaps, err := capability.NewPid2(0) - if err != nil { - hasCapSysAdminErr = err - return - } - if err = currentCaps.Load(); err != nil { - hasCapSysAdminErr = err - return - } - hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) - }) - return hasCapSysAdminRet, hasCapSysAdminErr -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported.go b/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported.go deleted file mode 100644 index 3b463627c..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build !linux && !darwin - -package unshare - -import ( - "os" - - "github.com/opencontainers/runtime-spec/specs-go" - "go.podman.io/storage/pkg/idtools" -) - -const ( - // UsernsEnvName is the environment variable, if set indicates in rootless mode - UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" -) - -// IsRootless tells us if we are running in rootless mode -func IsRootless() bool { - return os.Getuid() != 0 -} - -// GetRootlessUID returns the UID of the user in the parent userNS -func GetRootlessUID() int { - return os.Getuid() -} - -// GetRootlessGID returns the GID of the user in the parent userNS -func GetRootlessGID() int { - return os.Getgid() -} - -// RootlessEnv returns the environment settings for the rootless containers -func RootlessEnv() []string { - return append(os.Environ(), UsernsEnvName+"=") -} - -// MaybeReexecUsingUserNamespace re-exec the process in a new namespace -func MaybeReexecUsingUserNamespace(evenForRoot bool) { -} - -// GetHostIDMappings reads mappings for the specified process (or the current -// process if pid is "self" or an empty string) from the kernel. -func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - return nil, nil, nil -} - -// ParseIDMappings parses mapping triples. -func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil -} - -// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. -func HasCapSysAdmin() (bool, error) { - return os.Geteuid() == 0, nil -} diff --git a/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported_cgo.go deleted file mode 100644 index ae2869d74..000000000 --- a/vendor/go.podman.io/storage/pkg/unshare/unshare_unsupported_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build cgo && !(linux || freebsd) - -package unshare - -// Go refuses to compile a subpackage with CGO_ENABLED=1 if there is a *.c file but no 'import "C"'. -// OTOH if we did have an 'import "C"', the Linux-only code would fail to compile. -// So, satisfy the Go compiler by using import "C" but #ifdef-ing out all of the code. - -// #cgo CPPFLAGS: -DUNSHARE_NO_CODE_AT_ALL -import "C" diff --git a/vendor/modules.txt b/vendor/modules.txt index a05794326..637d0af5b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -10,8 +10,6 @@ filippo.io/edwards25519/field github.com/AlecAivazis/survey/v2 github.com/AlecAivazis/survey/v2/core github.com/AlecAivazis/survey/v2/terminal -# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 -## explicit; go 1.16 # github.com/BurntSushi/toml v1.5.0 ## explicit; go 1.18 github.com/BurntSushi/toml @@ -70,12 +68,6 @@ github.com/containerd/platforms # github.com/containerd/typeurl/v2 v2.2.3 ## explicit; go 1.21 github.com/containerd/typeurl/v2 -# github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 -## explicit -github.com/containers/libtrust -# github.com/containers/ocicrypt v1.2.1 -## explicit; go 1.22 -github.com/containers/ocicrypt/spec # github.com/crazy-max/cron/v3 v3.1.1 ## explicit; go 1.12 github.com/crazy-max/cron/v3 @@ -90,23 +82,14 @@ github.com/crazy-max/gonfig/file github.com/crazy-max/gonfig/flag github.com/crazy-max/gonfig/parser github.com/crazy-max/gonfig/types +# github.com/creack/pty v1.1.18 +## explicit; go 1.13 # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v29.1.4+incompatible -## explicit -github.com/docker/cli/cli/config -github.com/docker/cli/cli/config/configfile -github.com/docker/cli/cli/config/credentials -github.com/docker/cli/cli/config/memorystore -github.com/docker/cli/cli/config/types -# github.com/docker/distribution v2.8.3+incompatible -## explicit -github.com/docker/distribution/registry/api/errcode -github.com/docker/distribution/registry/api/v2 # github.com/docker/docker v28.5.2+incompatible ## explicit github.com/docker/docker/api @@ -131,10 +114,6 @@ github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client -# github.com/docker/docker-credential-helpers v0.9.5 -## explicit; go 1.21 -github.com/docker/docker-credential-helpers/client -github.com/docker/docker-credential-helpers/credentials # github.com/docker/go-connections v0.6.0 ## explicit; go 1.18 github.com/docker/go-connections/nat @@ -143,6 +122,9 @@ github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units +# github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 +## explicit +github.com/docker/libtrust # github.com/dromara/carbon/v2 v2.6.16 ## explicit; go 1.18 github.com/dromara/carbon/v2 @@ -262,9 +244,6 @@ github.com/google/uuid # github.com/gorilla/css v1.0.1 ## explicit; go 1.20 github.com/gorilla/css/scanner -# github.com/gorilla/mux v1.8.1 -## explicit; go 1.20 -github.com/gorilla/mux # github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 ## explicit; go 1.20 github.com/gorilla/websocket @@ -306,6 +285,16 @@ github.com/json-iterator/go # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote +# github.com/klauspost/compress v1.18.4 +## explicit; go 1.23 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/le +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/leodido/go-urn v1.4.0 ## explicit; go 1.18 github.com/leodido/go-urn @@ -349,15 +338,8 @@ github.com/moby/buildkit/util/suggest github.com/moby/docker-image-spec/specs-go/v1 # github.com/moby/sys/atomicwriter v0.1.0 ## explicit; go 1.18 -# github.com/moby/sys/capability v0.4.0 -## explicit; go 1.21 -github.com/moby/sys/capability -# github.com/moby/sys/mountinfo v0.7.2 -## explicit; go 1.17 -github.com/moby/sys/mountinfo -# github.com/moby/sys/user v0.4.0 -## explicit; go 1.17 -github.com/moby/sys/user +# github.com/moby/term v0.5.2 +## explicit; go 1.18 # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -383,9 +365,6 @@ github.com/opencontainers/go-digest ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runtime-spec v1.3.0 -## explicit -github.com/opencontainers/runtime-spec/specs-go # github.com/panjf2000/ants/v2 v2.11.5 ## explicit; go 1.18 github.com/panjf2000/ants/v2 @@ -405,6 +384,45 @@ github.com/pmezard/go-difflib/difflib # github.com/rabbitmq/amqp091-go v1.10.0 ## explicit; go 1.20 github.com/rabbitmq/amqp091-go +# github.com/regclient/regclient v0.11.2 +## explicit; go 1.25.0 +github.com/regclient/regclient +github.com/regclient/regclient/config +github.com/regclient/regclient/internal/auth +github.com/regclient/regclient/internal/cache +github.com/regclient/regclient/internal/conffile +github.com/regclient/regclient/internal/httplink +github.com/regclient/regclient/internal/limitread +github.com/regclient/regclient/internal/pqueue +github.com/regclient/regclient/internal/reghttp +github.com/regclient/regclient/internal/reqmeta +github.com/regclient/regclient/internal/sloghandle +github.com/regclient/regclient/internal/strparse +github.com/regclient/regclient/internal/timejson +github.com/regclient/regclient/internal/units +github.com/regclient/regclient/internal/version +github.com/regclient/regclient/pkg/archive +github.com/regclient/regclient/scheme +github.com/regclient/regclient/scheme/ocidir +github.com/regclient/regclient/scheme/reg +github.com/regclient/regclient/types +github.com/regclient/regclient/types/blob +github.com/regclient/regclient/types/descriptor +github.com/regclient/regclient/types/docker +github.com/regclient/regclient/types/docker/schema1 +github.com/regclient/regclient/types/docker/schema2 +github.com/regclient/regclient/types/errs +github.com/regclient/regclient/types/manifest +github.com/regclient/regclient/types/mediatype +github.com/regclient/regclient/types/oci +github.com/regclient/regclient/types/oci/v1 +github.com/regclient/regclient/types/ping +github.com/regclient/regclient/types/platform +github.com/regclient/regclient/types/ref +github.com/regclient/regclient/types/referrer +github.com/regclient/regclient/types/repo +github.com/regclient/regclient/types/tag +github.com/regclient/regclient/types/warning # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg @@ -420,7 +438,7 @@ github.com/russross/blackfriday/v2 # github.com/sirupsen/logrus v1.9.4 ## explicit; go 1.17 github.com/sirupsen/logrus -# github.com/spf13/pflag v1.0.9 +# github.com/spf13/pflag v1.0.10 ## explicit; go 1.12 github.com/spf13/pflag # github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf @@ -446,6 +464,12 @@ github.com/tidwall/sjson # github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 ## explicit; go 1.16 github.com/tonistiigi/go-csvvalue +# github.com/ulikunitz/xz v0.5.15 +## explicit; go 1.12 +github.com/ulikunitz/xz +github.com/ulikunitz/xz/internal/hash +github.com/ulikunitz/xz/internal/xlog +github.com/ulikunitz/xz/lzma # github.com/vanng822/css v0.0.0-20190504095207-a21e860bcd04 ## explicit github.com/vanng822/css @@ -507,56 +531,6 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.podman.io/image/v5 v5.39.1 -## explicit; go 1.24.0 -go.podman.io/image/v5/docker -go.podman.io/image/v5/docker/policyconfiguration -go.podman.io/image/v5/docker/reference -go.podman.io/image/v5/internal/blobinfocache -go.podman.io/image/v5/internal/image -go.podman.io/image/v5/internal/imagedestination/impl -go.podman.io/image/v5/internal/imagedestination/stubs -go.podman.io/image/v5/internal/imagesource -go.podman.io/image/v5/internal/imagesource/impl -go.podman.io/image/v5/internal/imagesource/stubs -go.podman.io/image/v5/internal/iolimits -go.podman.io/image/v5/internal/manifest -go.podman.io/image/v5/internal/multierr -go.podman.io/image/v5/internal/pkg/platform -go.podman.io/image/v5/internal/private -go.podman.io/image/v5/internal/putblobdigest -go.podman.io/image/v5/internal/rootless -go.podman.io/image/v5/internal/set -go.podman.io/image/v5/internal/signature -go.podman.io/image/v5/internal/streamdigest -go.podman.io/image/v5/internal/tmpdir -go.podman.io/image/v5/internal/uploadreader -go.podman.io/image/v5/internal/useragent -go.podman.io/image/v5/manifest -go.podman.io/image/v5/pkg/blobinfocache/none -go.podman.io/image/v5/pkg/compression/internal -go.podman.io/image/v5/pkg/compression/types -go.podman.io/image/v5/pkg/docker/config -go.podman.io/image/v5/pkg/strslice -go.podman.io/image/v5/pkg/sysregistriesv2 -go.podman.io/image/v5/pkg/tlsclientconfig -go.podman.io/image/v5/transports -go.podman.io/image/v5/types -go.podman.io/image/v5/version -# go.podman.io/storage v1.62.0 -## explicit; go 1.24.0 -go.podman.io/storage/internal/rawfilelock -go.podman.io/storage/pkg/fileutils -go.podman.io/storage/pkg/homedir -go.podman.io/storage/pkg/idtools -go.podman.io/storage/pkg/ioutils -go.podman.io/storage/pkg/lockfile -go.podman.io/storage/pkg/longpath -go.podman.io/storage/pkg/mount -go.podman.io/storage/pkg/reexec -go.podman.io/storage/pkg/regexp -go.podman.io/storage/pkg/system -go.podman.io/storage/pkg/unshare # go.yaml.in/yaml/v2 v2.4.3 ## explicit; go 1.15 go.yaml.in/yaml/v2 @@ -756,6 +730,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 +# gotest.tools/v3 v3.5.2 +## explicit; go 1.17 # k8s.io/api v0.35.2 ## explicit; go 1.25.0 k8s.io/api/admissionregistration/v1 @@ -1073,4 +1049,3 @@ sigs.k8s.io/structured-merge-diff/v6/value # sigs.k8s.io/yaml v1.6.0 ## explicit; go 1.22 sigs.k8s.io/yaml -# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 From 437c5a745b1adbddb54f4794c5f6aa82009e1d27 Mon Sep 17 00:00:00 2001 From: CrazyMax <1951866+crazy-max@users.noreply.github.com> Date: Sat, 28 Mar 2026 23:49:29 +0100 Subject: [PATCH 4/4] wip: keep 429 backoff state request local in reghttp --- .../regclient/internal/reghttp/http.go | 93 ++++++++----------- 1 file changed, 41 insertions(+), 52 deletions(-) diff --git a/vendor/github.com/regclient/regclient/internal/reghttp/http.go b/vendor/github.com/regclient/regclient/internal/reghttp/http.go index 7f857881c..e1e2107ec 100644 --- a/vendor/github.com/regclient/regclient/internal/reghttp/http.go +++ b/vendor/github.com/regclient/regclient/internal/reghttp/http.go @@ -44,7 +44,6 @@ var ( const ( DefaultRetryLimit = 5 // number of times a request will be retried - backoffResetCount = 5 // number of successful requests needed to reduce the backoff ) // Client is an HTTP client wrapper. @@ -64,18 +63,16 @@ type Client struct { } type clientHost struct { - config *config.Host // config entry - httpClient *http.Client // modified http client for registry specific settings - userAgent string // user agent to specify in http request headers - slog *slog.Logger // logging for tracing and failures - auth map[string]*auth.Auth // map of auth handlers by repository - backoffCur int // current count of backoffs for this host - backoffLast time.Time // time the last request was released, this may be in the future if there is a queue, or zero if no delay is needed - backoffReset int // count of successful requests when a backoff is experienced, once [backoffResetCount] is reached, [backoffCur] is reduced by one and this is reset to 0 - reqFreq time.Duration // how long between submitting requests for this host - reqNext time.Time // time to release the next request - throttle *pqueue.Queue[reqmeta.Data] // limit concurrent requests to the host - mu sync.Mutex // mutex to prevent data races + config *config.Host // config entry + httpClient *http.Client // modified http client for registry specific settings + userAgent string // user agent to specify in http request headers + slog *slog.Logger // logging for tracing and failures + auth map[string]*auth.Auth // map of auth handlers by repository + backoffLast time.Time // time a backoff was last seen, used to deprioritize mirrors for later requests + reqFreq time.Duration // how long between submitting requests for this host + reqNext time.Time // time to release the next request + throttle *pqueue.Queue[reqmeta.Data] // limit concurrent requests to the host + mu sync.Mutex // mutex to prevent data races } // Req is a request to send to a registry. @@ -109,6 +106,8 @@ type Resp struct { reader io.Reader readCur, readMax int64 retryCount int + backoffCur int + backoffLast time.Time throttleDone func() } @@ -645,75 +644,65 @@ func (resp *Resp) Seek(offset int64, whence int) (int64, error) { } func (resp *Resp) backoffGet() time.Time { - c := resp.client - ch := c.getHost(resp.mirror) - ch.mu.Lock() - defer ch.mu.Unlock() - if ch.backoffCur > 0 { - delay := c.delayInit << ch.backoffCur - delay = min(delay, c.delayMax) - next := ch.backoffLast.Add(delay) + if resp.backoffCur > 0 { + delay := resp.client.delayInit << resp.backoffCur + delay = min(delay, resp.client.delayMax) + next := resp.backoffLast.Add(delay) now := time.Now() if now.After(next) { next = now } - ch.backoffLast = next + resp.backoffLast = next return next } // reset a stale "retry-after" time - if !ch.backoffLast.IsZero() && ch.backoffLast.Before(time.Now()) { - ch.backoffLast = time.Time{} + if !resp.backoffLast.IsZero() && resp.backoffLast.Before(time.Now()) { + resp.backoffLast = time.Time{} } - return ch.backoffLast + return resp.backoffLast } func (resp *Resp) backoffSet() error { c := resp.client - ch := c.getHost(resp.mirror) - ch.mu.Lock() - defer ch.mu.Unlock() + now := time.Now() // check rate limit header and use that directly if possible if resp.resp != nil && resp.resp.Header.Get("Retry-After") != "" { ras := resp.resp.Header.Get("Retry-After") ra, _ := time.ParseDuration(ras + "s") if ra > 0 { - next := time.Now().Add(ra) - if ch.backoffLast.Before(next) { - ch.backoffLast = next + next := now.Add(ra) + if resp.backoffLast.Before(next) { + resp.backoffLast = next } + resp.backoffHostSet(next) return nil } } - // Else track the number of backoffs and fail when the limit is exceeded. - // New requests always get at least one try, but fail fast if the server has been throwing errors. - ch.backoffCur++ - if ch.backoffLast.IsZero() { - ch.backoffLast = time.Now() + // Track backoffs for this request only. Shared host backoff state caused later + // requests to fail after a previous request exhausted its own retry budget. + resp.backoffCur++ + if resp.backoffLast.IsZero() { + resp.backoffLast = now } - if ch.backoffCur >= c.retryLimit { - return fmt.Errorf("%w: backoffs %d", errs.ErrBackoffLimit, ch.backoffCur) + resp.backoffHostSet(resp.backoffLast) + if resp.backoffCur >= c.retryLimit { + return fmt.Errorf("%w: backoffs %d", errs.ErrBackoffLimit, resp.backoffCur) } return nil } func (resp *Resp) backoffReset() { - c := resp.client - ch := c.getHost(resp.mirror) + resp.backoffCur = 0 + resp.backoffLast = time.Time{} +} + +func (resp *Resp) backoffHostSet(next time.Time) { + ch := resp.client.getHost(resp.mirror) ch.mu.Lock() defer ch.mu.Unlock() - if ch.backoffCur > 0 { - ch.backoffReset++ - // If enough successful requests are seen, lower the backoffCur count. - // This requires multiple successful requests of a flaky server, but quickly drops when above the retry limit. - if ch.backoffReset > backoffResetCount || ch.backoffCur > c.retryLimit { - ch.backoffReset = 0 - ch.backoffCur-- - if ch.backoffCur == 0 { - // reset the last time to the zero value - ch.backoffLast = time.Time{} - } - } + if ch.backoffLast.Before(next) { + ch.backoffLast = next } }