From 3855e86897c021a4fdb56e9a194ce4493d045b9f Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Thu, 18 Apr 2024 15:24:10 -0700 Subject: [PATCH 01/12] docs: include instructions for usage with AWS S3 One Zone --- .gitignore | 43 ++++++++++++++- deployments/s3_express/.terraform.lock.hcl | 25 +++++++++ deployments/s3_express/.tool-versions | 1 + deployments/s3_express/README.md | 45 ++++++++++++++++ deployments/s3_express/main.tf | 51 ++++++++++++++++++ deployments/s3_express/plan.tfplan | Bin 0 -> 5958 bytes .../s3_express/settings.s3express.example | 21 ++++++++ deployments/s3_express/test_data/test.txt | 2 + deployments/s3_express/variables.tf | 20 +++++++ deployments/s3_express/versions.tf | 8 +++ docs/getting_started.md | 31 +++++++++++ 11 files changed, 246 insertions(+), 1 deletion(-) create mode 100644 deployments/s3_express/.terraform.lock.hcl create mode 100644 deployments/s3_express/.tool-versions create mode 100644 deployments/s3_express/README.md create mode 100644 deployments/s3_express/main.tf create mode 100644 deployments/s3_express/plan.tfplan create mode 100644 deployments/s3_express/settings.s3express.example create mode 100644 deployments/s3_express/test_data/test.txt create mode 100644 deployments/s3_express/variables.tf create mode 100644 deployments/s3_express/versions.tf diff --git a/.gitignore b/.gitignore index d2a86aba..f5e56d93 100644 --- a/.gitignore +++ b/.gitignore @@ -346,4 +346,45 @@ test-settings.* s3-requests.http httpRequests/ -.bin/ \ No newline at end of file +.bin/ + +# Created by https://www.toptal.com/developers/gitignore/api/terraform +# Edit at https://www.toptal.com/developers/gitignore?templates=terraform + +### Terraform ### +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log +crash.*.log + +# Exclude all .tfvars files, which are likely to contain sensitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +*.tfvars +*.tfvars.json + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* + +# Ignore CLI configuration files +.terraformrc +terraform.rc + +# End of https://www.toptal.com/developers/gitignore/api/terraform diff --git a/deployments/s3_express/.terraform.lock.hcl b/deployments/s3_express/.terraform.lock.hcl new file mode 100644 index 00000000..d6034f09 --- /dev/null +++ b/deployments/s3_express/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "5.45.0" + constraints = "5.45.0" + hashes = [ + "h1:8m3+C1VNevzU/8FsABoKp2rTOx3Ue7674INfhfk0TZY=", + "zh:1379bcf45aef3d486ee18b4f767bfecd40a0056510d26107f388be3d7994c368", + "zh:1615a6f5495acfb3a0cb72324587261dd4d72711a3cc51aff13167b14531501e", + "zh:18b69a0f33f8b1862fbd3f200756b7e83e087b73687085f2cf9c7da4c318e3e6", + "zh:2c5e7aecd197bc3d3b19290bad8cf4c390c2c6a77bb165da4e11f53f2dfe2e54", + "zh:3794da9bef97596e3bc60e12cdd915bda5ec2ed62cd1cd93723d58b4981905fe", + "zh:40a5e45ed91801f83db76dffd467dcf425ea2ca8642327cf01119601cb86021c", + "zh:4abfc3f53d0256a7d5d1fa5e931e4601b02db3d1da28f452341d3823d0518f1a", + "zh:4eb0e98078f79aeb06b5ff6115286dc2135d12a80287885698d04036425494a2", + "zh:75470efbadea4a8d783642497acaeec5077fc4a7f3df3340defeaa1c7de29bf7", + "zh:8861a0b4891d5fa2fa7142f236ae613cea966c45b5472e3915a4ac3abcbaf487", + "zh:8bf6f21cd9390b742ca0b4393fde92616ca9e6553fb75003a0999006ad233d35", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:ad73008a044e75d337acda910fb54d8b81a366873c8a413fec1291034899a814", + "zh:bf261713b0b8bebfe8c199291365b87d9043849f28a2dc764bafdde73ae43693", + "zh:da3bafa1fd830be418dfcc730e85085fe67c0d415c066716f2ac350a2306f40a", + ] +} diff --git a/deployments/s3_express/.tool-versions b/deployments/s3_express/.tool-versions new file mode 100644 index 00000000..fad6fadc --- /dev/null +++ b/deployments/s3_express/.tool-versions @@ -0,0 +1 @@ +terraform 1.8.1 diff --git a/deployments/s3_express/README.md b/deployments/s3_express/README.md new file mode 100644 index 00000000..75242564 --- /dev/null +++ b/deployments/s3_express/README.md @@ -0,0 +1,45 @@ +# Purpose +This Terraform script sets up an AWS S3 Express One Zone bucket for testing. + +## Usage +Use environment variables to authenticate: + +```bash +export AWS_ACCESS_KEY_ID="anaccesskey" +export AWS_SECRET_ACCESS_KEY="asecretkey" +export AWS_REGION="us-west-2" +``` + +Generate a plan: +```bash +terraform plan -out=plan.tfplan \ +> -var="bucket_name=my-bucket-name--usw2-az1--x-s3" \ +> -var="region=us-west-2" \ +> -var="availability_zone_id=usw2-az1" \ +> -var="owner_email=my_email@foo.com" +``` +> [!NOTE] +> Note that AWS S3 Express One Zone is only available in [certain regions and availability zones](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints). If you get an error like this: `api error InvalidBucketName`. If you have met the [naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html), this likely means you have chosen a bad region/availability zone combination. + + +If you are comfortable with the plan, apply it: +``` +terraform apply "plan.tfplan" +``` + +Then build the image (you can also use the latest release) +```bash +docker build --file Dockerfile.oss --tag nginx-s3-gateway:oss --tag nginx-s3-gateway . +``` + +Configure and run the image: + +```bash +docker run --rm --env-file ./settings.s3express.example --publish 80:80 --name nginx-s3-gateway \ + nginx-s3-gateway:oss +``` + +Confirm that it is working. The terraform script will prepopulate the bucket with a single test object +```bash +curl http://localhost:80/test.txt +``` diff --git a/deployments/s3_express/main.tf b/deployments/s3_express/main.tf new file mode 100644 index 00000000..55edfd62 --- /dev/null +++ b/deployments/s3_express/main.tf @@ -0,0 +1,51 @@ +provider "aws" { + region = var.region +} + +resource "aws_s3_directory_bucket" "example" { + bucket = var.bucket_name + location { + name = var.availability_zone_id + } + + force_destroy = true +} + +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} + +data "aws_iam_policy_document" "example" { + statement { + effect = "Allow" + + actions = [ + "s3express:*", + ] + + resources = [ + aws_s3_directory_bucket.example.arn, + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } +} + +resource "aws_s3_bucket_policy" "example" { + bucket = aws_s3_directory_bucket.example.bucket + policy = data.aws_iam_policy_document.example.json +} + +# The filemd5() function is available in Terraform 0.11.12 and later +# For Terraform 0.11.11 and earlier, use the md5() function and the file() function: +# etag = "${md5(file("path/to/file"))}" +# etag = filemd5("path/to/file") +resource "aws_s3_object" "example" { + bucket = aws_s3_directory_bucket.example.bucket + key = "test.txt" + source = "${path.root}/test_data/test.txt" +} + + diff --git a/deployments/s3_express/plan.tfplan b/deployments/s3_express/plan.tfplan new file mode 100644 index 0000000000000000000000000000000000000000..294916db59483c83c855c858bc6eb4b395c85c4b GIT binary patch literal 5958 zcmaKwWmuHk7RLt(fuV$#J+ zrN#&o^YR4aO4NK?oQ?OCdQ&Jc$p?ihs-VvKQp75%O|!&`I`J0_x-i)GzZnTpLr}et z%nW%My_F2&(1^9UtDRtfVITWOWoj{k)cgjVvW3W(qQuXjMr7_W6HBp-&V`{1C z3 zaV?>5Qy9*brG>%|PBkg3H$&Fe0a;-NZo#exV=kwI%=v`3L333Ywjab;Mz-JU?tF_3 zXX#MOY4_XR7)KOSdPfams85KN$NMNj9*BK&O|E=gn_k$!*xIG2n{LOVI2?`Z z9?g;D`GnIvmC;-dQ-CgdQ+$jGE>6I>z{pNpofx9xNf|CC-2S#fE7hJeNhkXw57Bp8 zE*aSAv&82-cj6cspGIx)Ji#uU8+pH%%@Oi(NlbIcJe@^6J!+CBU^VHrl5h_+_Z2UixSb<} zlnR7w`p^BV!TvSiYdin|iPTWOjTxALc+!Xp05IGJ05tx!;&1=A;$7WL+${bs`6WFk zr@}!Z|6Fyb*JZME46j;{+th@Xda05_P1I~rId%RQK94gzk9S9=LsnmwD~ztmcrK^; zHQm!lc@M23DR5A!ud5fzCcjYcm`p1MX{0a)uOBvzw!aAZ_cteV<<@%*@y^{&`})3EmlZ(=N*FL56HOF!84G zSCmPf2^`8X(e~TDE!dP-d(j1VZ3ZG)G^@^aXv?TeN-;}y>Z4FAj7{yLHhASLvMA@)0`KzeeKN5or#uJP!L(HE!*XB1k0(C8TX6gt3oR*SZL(2c-93Z{)W?c!&+t_@AL;r??fCh%xN2Yz zM&Wgn5;w=i)A4m$IZqINaq~rz>(b&UJ)u+qexH(J9g^^{??Rfi^2<{RoSe>(GEmP+ z5FKp;_^oJU`gg}ku$@C)YFE$n^0Z)_$~6hq zURnbOL`{wU`w}|0zcp;UD)4ow%+JGaCX{2kI>^K!(Jf2k?+)?vRTJLF2^h@u#GEil4 zRB+({^;y6HzL6aZn2LVR6XQ%^l_t|7t~3q4?!`wFCr{KoC&Oj{(C7`PzAPF!-}j=8m?D!^AHkfk5Edu5PY5XnX=+@lz;A{%DRw5Hh-`;eeiA0lQsak zWhsKNlR!9}ed!Bb#nw0(r(mTA^$V<;PAa_LXu4%Fpe>>0g#=zhB7z@2%03L%fblzO z6mJr&$Ywvhh;=S2n>H7rF;wH?;{KrXP33YyoFf#@vAi$VKXsZ@EJ}eRzXDmqUFBSU z>X>UcX?<))0KC5~6#jiEU9xg+*zHI>n!c5t%5^_x*Vd5&?pKg7jLe6&vVD_^E5|g- z*FN@8_&_Oe*{QTZI>E9v|1#Js-@e4SM%JU`iDvTzCS` zu#Vk*5~TU~0z>(vOLl*`iA&pd4`AS0_3bDU(c~#Yn@qnc!h$RS2Bvy9Es(9WQWM;D z*D%nVa z>EabKrKDcK&lpyh-b?u%x0`y#92%+Yf=b_GASXoGP>N58ATIa)vi6ukAdR-CC%)|i zWUrM_M1Az_yW>8DnQ65hVvmjT@YE58%5gBm{fR#v+ED}t;)X3-6gwxJbbjnxjXoL{ zv`D?kc3pb>iByP0xWT6+{{}Ic|3i#cGAFJxz1txoT#QPN;;J20yR;$a_ z%>!WOu245X{(lie^k2l7IXPO|SaCaW zayyvVIC8mJ{!N;g*p#I9@v4qg988qFDb`VsWBzqVj)(&F)?D)-cGv;#$y3u{f6op+R0kE-VyGY7#Bd+|U2PUVX7maX<(w7)%k&*jfXKlZL1%rk zO(FGFPzMbU7WNjtJIHT0Cau_cTclcy;jQ zE&{r!`eKSMTu%VQqo)nUuN($2r@-9fuDCkYp5u}wCX3` zQidb82_#@1NVKY0GXnSr?>^qB*KA`uu=nS96)U3~*Wr4n+{URIC>-w3fWF0_4m^+t zT%0@px6+!?a!zKNS0!?!1;bzp5UjmKpIfABN$GXbOMPjUosRE~QDlKMq53)9@xx9L z?sqX4L^oW?h@eY^RHq{`V65Ahu_7ph9V5|V%Ugozlc=MIN*iqQ-wy2Gb1$6`$iFp! z!%wLf;*mqh!wYfM)0vPY{$s|=72Em(QFfbS-nSJo=^dG~Kr!~>YmC2AsH9{l_7VjE z$h=9Te}5vA{gFZ*CN4H6ruG)DzvWTFnC-q4&ZBdG?Da~E;7>;~B*hJWfm&^tJ@?3& z?&Re1O!Y@9uXeVCZ5+WsRO6nL*e|v5%pnTo9u~+RjKo`gmJ;7~=jk1-LCta<1=kf^ zDWM)O^PfryJl6$yh~5WuQ_R5n&Cu5_qy{J}^S9YU>#h0=WW)JA>T9cCmxuD>Cb~{Q z`?-s{1|{vc3yZuaJT$5FaockniDy{q>#v;*(#}3i=Ha4KXxpE$HsF7%QI9v;AJMP! z(;nxRqiU98qH(6tDFQ~8=6Kan#<9X4umJt#7^0?q5?dLsA`b?ZNyU*iwm;ho^M^b= zhd0te#ZCPFJ$9sj#?HdU)yB#3x6pBn@Z$CI5IlUhQcAW3fCi-q1D`NO!_e*1nLMD+ zJ&UGePstnM!8T717x$Ha4vA3|#gO6RpF%;FqJ&@+V{Jtp6- z*`D^w8r;VcXq^2D{QF;%D)XD(oY(DU{QG%{e><;}x%-dbZ0qXuQxA#S*3u}u88>#U zRt1J>X=qJBAx3=BQikG8T;OU;I&E`88pM50|#Nz9ev#G-~ zcJte<4j;ok1rO1uD+-G~g@;IVE=a7{YIA2DIaWE$3%t>+H8b=8P7jCiG+z zJ=5ne$n%#~9kMLWai6D3y+gtGg1Q{^ZoZW%)K))FQ<(d6H9@z`u_?jo^@SB?67 z^Hua(!$~$OZTY>!RXFS^-N~AT?+Jru{1I-#=F&W8=w_I4woPTh0zsNM#=fV0N6pKh zZiw{1@;l3Pr4YE0K>X2vlpo415a6fm`KvtgL-zdL{t!X`T>NW$^Fu-TX~#F)xmo_M zr~JA4*O$wW7U-urVg9}P_lD?C7r)+IKk)I>rf`0^`1c3TpPqi@_>Y_JrxD}-?&%-z zy+7TR{DRpZ@c(I=1b?{uhpXUEZ@=>M2bzBx%AH@m{S&DF^z~~p{2nro8(;sAzZ%Nu TH%|t@e-(4lTL8c*=*R3oxFuew literal 0 HcmV?d00001 diff --git a/deployments/s3_express/settings.s3express.example b/deployments/s3_express/settings.s3express.example new file mode 100644 index 00000000..00630be2 --- /dev/null +++ b/deployments/s3_express/settings.s3express.example @@ -0,0 +1,21 @@ +S3_BUCKET_NAME=bucket_name=my-bucket-name--usw2-az1--x-s3 +AWS_ACCESS_KEY_ID= +AWS_SECRET_ACCESS_KEY= +AWS_SESSION_TOKEN= +S3_SERVER=bucket_name=my-bucket-name--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com +S3_SERVER_PORT=443 +S3_SERVER_PROTO=https +S3_REGION=us-west-2 +S3_STYLE=virtual +DEBUG=true +AWS_SIGS_VERSION=4 +ALLOW_DIRECTORY_LIST=false +PROVIDE_INDEX_PAGE=false +APPEND_SLASH_FOR_POSSIBLE_DIRECTORY=false +DIRECTORY_LISTING_PATH_PREFIX="" +PROXY_CACHE_MAX_SIZE=10g +PROXY_CACHE_SLICE_SIZE="1m" +PROXY_CACHE_INACTIVE=60m +PROXY_CACHE_VALID_OK=1h +PROXY_CACHE_VALID_NOTFOUND=1m +PROXY_CACHE_VALID_FORBIDDEN=30s diff --git a/deployments/s3_express/test_data/test.txt b/deployments/s3_express/test_data/test.txt new file mode 100644 index 00000000..b0a9adc7 --- /dev/null +++ b/deployments/s3_express/test_data/test.txt @@ -0,0 +1,2 @@ +Congratulations, friend. You are using Amazon S3 Express One Zone. +🚂🚂🚂 Choo-choo~ 🚂🚂🚂 \ No newline at end of file diff --git a/deployments/s3_express/variables.tf b/deployments/s3_express/variables.tf new file mode 100644 index 00000000..689eef56 --- /dev/null +++ b/deployments/s3_express/variables.tf @@ -0,0 +1,20 @@ +# Format for bucket name [bucket_name]--[azid]--x-s3 +variable "bucket_name" { + type = string + default = "example--usw2-az2--x-s3" +} + +variable "owner_email" { + type = string +} + +variable "region" { + type = string + default = "us-west-2" +} + +# "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#az-ids" +variable "availability_zone_id" { + type = string + default = "usw2-az2" +} diff --git a/deployments/s3_express/versions.tf b/deployments/s3_express/versions.tf new file mode 100644 index 00000000..a1aaa0de --- /dev/null +++ b/deployments/s3_express/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.45.0" + } + } +} diff --git a/docs/getting_started.md b/docs/getting_started.md index d3380817..036b8da6 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -124,6 +124,37 @@ Setting your slice size too small can have performance impacts since NGINX perfo You may make byte-range requests and normal requests for the same file and NGINX will automatically handle them differently. The caches for file chunks and normal file requests are separate on disk. +## Usage with AWS S3 Express One Zone +The gateway may be used to proxy files in the AWS S3 Express One Zone product (also called Directory Buckets). + +To do so, be sure that `S3_STYLE` is set to `virtual`. Additionally, the `S3_SERVER` configuration must be set a combination of the bucket name and the [Zonal Endpoint](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints). + +### Directory Bucket Names +See the [official documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) for the most up to date rules on Directory Bucket naming. + +Directory Buckets must have names matching this format: +``` +bucket-base-name--azid--x-s3 +``` +For example: +``` +bucket-base-name--usw2-az1--x-s3 +``` +### Final Configuration +The bucket name must be prepended to the zonal endpoint like this +``` +bucket-base-name--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com +``` +The above is the value that must be provided to the `S3_SERVER` variable. +Additionally, the `S3_BUCKET_NAME` must be set to the full bucket name with the suffix: +``` +bucket-base-name--usw2-az1--x-s3 +``` +Buckets created in the AWS UI don't require manual specification of a suffix but it must be included in the gateway configuration. + +### Trying it Out +A sample Terraform script to provision a bucket is provided in `/deployments/s3_express`. + ## Running as a Systemd Service An [install script](/standalone_ubuntu_oss_install.sh) for the gateway shows From 41fe06a98bcf4208a986df9f1b772657b8accb6e Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Thu, 18 Apr 2024 16:10:48 -0700 Subject: [PATCH 02/12] fix typo --- deployments/s3_express/settings.s3express.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployments/s3_express/settings.s3express.example b/deployments/s3_express/settings.s3express.example index 00630be2..a7412f14 100644 --- a/deployments/s3_express/settings.s3express.example +++ b/deployments/s3_express/settings.s3express.example @@ -2,7 +2,7 @@ S3_BUCKET_NAME=bucket_name=my-bucket-name--usw2-az1--x-s3 AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_SESSION_TOKEN= -S3_SERVER=bucket_name=my-bucket-name--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com +S3_SERVER=my-bucket-name--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com S3_SERVER_PORT=443 S3_SERVER_PROTO=https S3_REGION=us-west-2 From 04244251d3be23fa641a3a2d1cb0f18ec5dd8146 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Fri, 19 Apr 2024 11:37:04 -0700 Subject: [PATCH 03/12] fix typo --- deployments/s3_express/settings.s3express.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployments/s3_express/settings.s3express.example b/deployments/s3_express/settings.s3express.example index a7412f14..88d3dae5 100644 --- a/deployments/s3_express/settings.s3express.example +++ b/deployments/s3_express/settings.s3express.example @@ -1,4 +1,4 @@ -S3_BUCKET_NAME=bucket_name=my-bucket-name--usw2-az1--x-s3 +S3_BUCKET_NAME=my-bucket-name--usw2-az1--x-s3 AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_SESSION_TOKEN= From 16a2feaaa867f871049a89a579def1e062d427d5 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Mon, 22 Apr 2024 08:54:27 -0700 Subject: [PATCH 04/12] move back to not including bucket in s3 address for express zone --- .gitignore | 2 +- common/etc/nginx/include/s3gateway.js | 2 +- deployments/s3_express/plan.tfplan | Bin 5958 -> 0 bytes .../s3_express/settings.s3express.example | 8 ++++---- oss/etc/nginx/templates/upstreams.conf.template | 2 +- plus/etc/nginx/templates/upstreams.conf.template | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) delete mode 100644 deployments/s3_express/plan.tfplan diff --git a/.gitignore b/.gitignore index f5e56d93..bfd15afa 100644 --- a/.gitignore +++ b/.gitignore @@ -386,5 +386,5 @@ override.tf.json # Ignore CLI configuration files .terraformrc terraform.rc - +.tfplan # End of https://www.toptal.com/developers/gitignore/api/terraform diff --git a/common/etc/nginx/include/s3gateway.js b/common/etc/nginx/include/s3gateway.js index 7a497cf8..5cf52802 100644 --- a/common/etc/nginx/include/s3gateway.js +++ b/common/etc/nginx/include/s3gateway.js @@ -86,7 +86,7 @@ const INDEX_PAGE = "index.html"; * Constant defining the service requests are being signed for. * @type {string} */ -const SERVICE = 's3'; +const SERVICE = 's3express'; /** * Transform the headers returned from S3 such that there isn't information diff --git a/deployments/s3_express/plan.tfplan b/deployments/s3_express/plan.tfplan deleted file mode 100644 index 294916db59483c83c855c858bc6eb4b395c85c4b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5958 zcmaKwWmuHk7RLt(fuV$#J+ zrN#&o^YR4aO4NK?oQ?OCdQ&Jc$p?ihs-VvKQp75%O|!&`I`J0_x-i)GzZnTpLr}et z%nW%My_F2&(1^9UtDRtfVITWOWoj{k)cgjVvW3W(qQuXjMr7_W6HBp-&V`{1C z3 zaV?>5Qy9*brG>%|PBkg3H$&Fe0a;-NZo#exV=kwI%=v`3L333Ywjab;Mz-JU?tF_3 zXX#MOY4_XR7)KOSdPfams85KN$NMNj9*BK&O|E=gn_k$!*xIG2n{LOVI2?`Z z9?g;D`GnIvmC;-dQ-CgdQ+$jGE>6I>z{pNpofx9xNf|CC-2S#fE7hJeNhkXw57Bp8 zE*aSAv&82-cj6cspGIx)Ji#uU8+pH%%@Oi(NlbIcJe@^6J!+CBU^VHrl5h_+_Z2UixSb<} zlnR7w`p^BV!TvSiYdin|iPTWOjTxALc+!Xp05IGJ05tx!;&1=A;$7WL+${bs`6WFk zr@}!Z|6Fyb*JZME46j;{+th@Xda05_P1I~rId%RQK94gzk9S9=LsnmwD~ztmcrK^; zHQm!lc@M23DR5A!ud5fzCcjYcm`p1MX{0a)uOBvzw!aAZ_cteV<<@%*@y^{&`})3EmlZ(=N*FL56HOF!84G zSCmPf2^`8X(e~TDE!dP-d(j1VZ3ZG)G^@^aXv?TeN-;}y>Z4FAj7{yLHhASLvMA@)0`KzeeKN5or#uJP!L(HE!*XB1k0(C8TX6gt3oR*SZL(2c-93Z{)W?c!&+t_@AL;r??fCh%xN2Yz zM&Wgn5;w=i)A4m$IZqINaq~rz>(b&UJ)u+qexH(J9g^^{??Rfi^2<{RoSe>(GEmP+ z5FKp;_^oJU`gg}ku$@C)YFE$n^0Z)_$~6hq zURnbOL`{wU`w}|0zcp;UD)4ow%+JGaCX{2kI>^K!(Jf2k?+)?vRTJLF2^h@u#GEil4 zRB+({^;y6HzL6aZn2LVR6XQ%^l_t|7t~3q4?!`wFCr{KoC&Oj{(C7`PzAPF!-}j=8m?D!^AHkfk5Edu5PY5XnX=+@lz;A{%DRw5Hh-`;eeiA0lQsak zWhsKNlR!9}ed!Bb#nw0(r(mTA^$V<;PAa_LXu4%Fpe>>0g#=zhB7z@2%03L%fblzO z6mJr&$Ywvhh;=S2n>H7rF;wH?;{KrXP33YyoFf#@vAi$VKXsZ@EJ}eRzXDmqUFBSU z>X>UcX?<))0KC5~6#jiEU9xg+*zHI>n!c5t%5^_x*Vd5&?pKg7jLe6&vVD_^E5|g- z*FN@8_&_Oe*{QTZI>E9v|1#Js-@e4SM%JU`iDvTzCS` zu#Vk*5~TU~0z>(vOLl*`iA&pd4`AS0_3bDU(c~#Yn@qnc!h$RS2Bvy9Es(9WQWM;D z*D%nVa z>EabKrKDcK&lpyh-b?u%x0`y#92%+Yf=b_GASXoGP>N58ATIa)vi6ukAdR-CC%)|i zWUrM_M1Az_yW>8DnQ65hVvmjT@YE58%5gBm{fR#v+ED}t;)X3-6gwxJbbjnxjXoL{ zv`D?kc3pb>iByP0xWT6+{{}Ic|3i#cGAFJxz1txoT#QPN;;J20yR;$a_ z%>!WOu245X{(lie^k2l7IXPO|SaCaW zayyvVIC8mJ{!N;g*p#I9@v4qg988qFDb`VsWBzqVj)(&F)?D)-cGv;#$y3u{f6op+R0kE-VyGY7#Bd+|U2PUVX7maX<(w7)%k&*jfXKlZL1%rk zO(FGFPzMbU7WNjtJIHT0Cau_cTclcy;jQ zE&{r!`eKSMTu%VQqo)nUuN($2r@-9fuDCkYp5u}wCX3` zQidb82_#@1NVKY0GXnSr?>^qB*KA`uu=nS96)U3~*Wr4n+{URIC>-w3fWF0_4m^+t zT%0@px6+!?a!zKNS0!?!1;bzp5UjmKpIfABN$GXbOMPjUosRE~QDlKMq53)9@xx9L z?sqX4L^oW?h@eY^RHq{`V65Ahu_7ph9V5|V%Ugozlc=MIN*iqQ-wy2Gb1$6`$iFp! z!%wLf;*mqh!wYfM)0vPY{$s|=72Em(QFfbS-nSJo=^dG~Kr!~>YmC2AsH9{l_7VjE z$h=9Te}5vA{gFZ*CN4H6ruG)DzvWTFnC-q4&ZBdG?Da~E;7>;~B*hJWfm&^tJ@?3& z?&Re1O!Y@9uXeVCZ5+WsRO6nL*e|v5%pnTo9u~+RjKo`gmJ;7~=jk1-LCta<1=kf^ zDWM)O^PfryJl6$yh~5WuQ_R5n&Cu5_qy{J}^S9YU>#h0=WW)JA>T9cCmxuD>Cb~{Q z`?-s{1|{vc3yZuaJT$5FaockniDy{q>#v;*(#}3i=Ha4KXxpE$HsF7%QI9v;AJMP! z(;nxRqiU98qH(6tDFQ~8=6Kan#<9X4umJt#7^0?q5?dLsA`b?ZNyU*iwm;ho^M^b= zhd0te#ZCPFJ$9sj#?HdU)yB#3x6pBn@Z$CI5IlUhQcAW3fCi-q1D`NO!_e*1nLMD+ zJ&UGePstnM!8T717x$Ha4vA3|#gO6RpF%;FqJ&@+V{Jtp6- z*`D^w8r;VcXq^2D{QF;%D)XD(oY(DU{QG%{e><;}x%-dbZ0qXuQxA#S*3u}u88>#U zRt1J>X=qJBAx3=BQikG8T;OU;I&E`88pM50|#Nz9ev#G-~ zcJte<4j;ok1rO1uD+-G~g@;IVE=a7{YIA2DIaWE$3%t>+H8b=8P7jCiG+z zJ=5ne$n%#~9kMLWai6D3y+gtGg1Q{^ZoZW%)K))FQ<(d6H9@z`u_?jo^@SB?67 z^Hua(!$~$OZTY>!RXFS^-N~AT?+Jru{1I-#=F&W8=w_I4woPTh0zsNM#=fV0N6pKh zZiw{1@;l3Pr4YE0K>X2vlpo415a6fm`KvtgL-zdL{t!X`T>NW$^Fu-TX~#F)xmo_M zr~JA4*O$wW7U-urVg9}P_lD?C7r)+IKk)I>rf`0^`1c3TpPqi@_>Y_JrxD}-?&%-z zy+7TR{DRpZ@c(I=1b?{uhpXUEZ@=>M2bzBx%AH@m{S&DF^z~~p{2nro8(;sAzZ%Nu TH%|t@e-(4lTL8c*=*R3oxFuew diff --git a/deployments/s3_express/settings.s3express.example b/deployments/s3_express/settings.s3express.example index 88d3dae5..669532d8 100644 --- a/deployments/s3_express/settings.s3express.example +++ b/deployments/s3_express/settings.s3express.example @@ -1,8 +1,8 @@ S3_BUCKET_NAME=my-bucket-name--usw2-az1--x-s3 -AWS_ACCESS_KEY_ID= -AWS_SECRET_ACCESS_KEY= -AWS_SESSION_TOKEN= -S3_SERVER=my-bucket-name--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com +AWS_ACCESS_KEY_ID=ZZZZZZZZZZZZZZZZZZZZ +AWS_SECRET_ACCESS_KEY=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +AWS_SESSION_TOKEN=bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +S3_SERVER=s3express-usw2-az1.us-west-2.amazonaws.com S3_SERVER_PORT=443 S3_SERVER_PROTO=https S3_REGION=us-west-2 diff --git a/oss/etc/nginx/templates/upstreams.conf.template b/oss/etc/nginx/templates/upstreams.conf.template index 9e93a857..402ea858 100644 --- a/oss/etc/nginx/templates/upstreams.conf.template +++ b/oss/etc/nginx/templates/upstreams.conf.template @@ -7,5 +7,5 @@ upstream storage_urls { # Be sure to specify the port in the S3_SERVER and be sure that port # corresponds to the https/http in the proxy_pass directive. - server ${S3_SERVER}:${S3_SERVER_PORT}; + server ${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}; } diff --git a/plus/etc/nginx/templates/upstreams.conf.template b/plus/etc/nginx/templates/upstreams.conf.template index 5074cd14..67191ab8 100644 --- a/plus/etc/nginx/templates/upstreams.conf.template +++ b/plus/etc/nginx/templates/upstreams.conf.template @@ -9,5 +9,5 @@ upstream storage_urls { # Be sure to specify the port in the S3_SERVER and be sure that port # corresponds to the https/http in the proxy_pass directive. - server ${S3_SERVER}:${S3_SERVER_PORT} resolve; + server ${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT} resolve; } From cdd150de2648f118d7ebb3d96a86ce1ebb14eaf2 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Tue, 23 Apr 2024 08:47:03 -0700 Subject: [PATCH 05/12] experimental refactor to normalize bucket naming needs --- common/docker-entrypoint.sh | 6 ++++++ common/etc/nginx/include/s3gateway.js | 17 ++++------------- .../etc/nginx/templates/default.conf.template | 15 +++++++-------- oss/etc/nginx/templates/upstreams.conf.template | 2 +- .../etc/nginx/templates/upstreams.conf.template | 2 +- standalone_ubuntu_oss_install.sh | 7 +++++++ 6 files changed, 26 insertions(+), 23 deletions(-) diff --git a/common/docker-entrypoint.sh b/common/docker-entrypoint.sh index d607c804..5b8a6000 100644 --- a/common/docker-entrypoint.sh +++ b/common/docker-entrypoint.sh @@ -68,6 +68,12 @@ if [ -z "${CORS_ALLOWED_ORIGIN+x}" ]; then export CORS_ALLOWED_ORIGIN="*" fi +if [ "${S3_STYLE}" == "path" ]; then + export FINAL_S3_SERVER="${S3_SERVER}:${S3_SERVER_PORT}" +else + export FINAL_S3_SERVER="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" +fi + # Nothing is modified under this line if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then diff --git a/common/etc/nginx/include/s3gateway.js b/common/etc/nginx/include/s3gateway.js index 5cf52802..b0a6a8a7 100644 --- a/common/etc/nginx/include/s3gateway.js +++ b/common/etc/nginx/include/s3gateway.js @@ -165,12 +165,7 @@ function s3date(r) { function s3auth(r) { const bucket = process.env['S3_BUCKET_NAME']; const region = process.env['S3_REGION']; - let server; - if (S3_STYLE === 'path') { - server = process.env['S3_SERVER'] + ':' + process.env['S3_SERVER_PORT']; - } else { - server = process.env['S3_SERVER']; - } + const host = r.variables.s3_host; const sigver = process.env['AWS_SIGS_VERSION']; let signature; @@ -180,7 +175,7 @@ function s3auth(r) { let req = _s3ReqParamsForSigV2(r, bucket); signature = awssig2.signatureV2(r, req.uri, req.httpDate, credentials); } else { - let req = _s3ReqParamsForSigV4(r, bucket, server); + let req = _s3ReqParamsForSigV4(r, bucket, host); signature = awssig4.signatureV4(r, awscred.Now(), region, SERVICE, req.uri, req.queryParams, req.host, credentials); } @@ -221,15 +216,11 @@ function _s3ReqParamsForSigV2(r, bucket) { * @see {@link https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html | AWS V4 Signing Process} * @param r {NginxHTTPRequest} HTTP request object * @param bucket {string} S3 bucket associated with request - * @param server {string} S3 host associated with request + * @param host {string} S3 host associated with request * @returns {S3ReqParams} s3ReqParams object (host, uri, queryParams) * @private */ -function _s3ReqParamsForSigV4(r, bucket, server) { - let host = server; - if (S3_STYLE === 'virtual' || S3_STYLE === 'default' || S3_STYLE === undefined) { - host = bucket + '.' + host; - } +function _s3ReqParamsForSigV4(r, bucket, host) { const baseUri = s3BaseUri(r); const computed_url = !utils.parseBoolean(r.variables.forIndexPage) ? r.variables.uri_path diff --git a/common/etc/nginx/templates/default.conf.template b/common/etc/nginx/templates/default.conf.template index faa1e492..795a9e1b 100644 --- a/common/etc/nginx/templates/default.conf.template +++ b/common/etc/nginx/templates/default.conf.template @@ -19,11 +19,10 @@ map $uri_full_path $uri_path { default $PREFIX_LEADING_DIRECTORY_PATH$uri_full_path; } -map $S3_STYLE $s3_host_hdr { - virtual "${S3_BUCKET_NAME}.${S3_SERVER}"; - path "${S3_SERVER}:${S3_SERVER_PORT}"; - default "${S3_BUCKET_NAME}.${S3_SERVER}"; -} +# FINAL_S3_SERVER is set in the startup script +# (either ./common/docker-entrypoint.sh or ./standalone_ubuntu_oss_install.sh) +# based on the S3_STYLE configuration option. +js_var $s3_host ${FINAL_S3_SERVER}; js_var $indexIsEmpty true; js_var $forIndexPage true; @@ -141,7 +140,7 @@ server { proxy_set_header X-Amz-Security-Token $awsSessionToken; # We set the host as the bucket name to inform the S3 API of the bucket - proxy_set_header Host $s3_host_hdr; + proxy_set_header Host $s3_host; # Use keep alive connections in order to improve performance proxy_http_version 1.1; @@ -202,7 +201,7 @@ server { proxy_set_header X-Amz-Security-Token $awsSessionToken; # We set the host as the bucket name to inform the S3 API of the bucket - proxy_set_header Host $s3_host_hdr; + proxy_set_header Host $s3_host; # Use keep alive connections in order to improve performance proxy_http_version 1.1; @@ -265,7 +264,7 @@ server { proxy_set_header X-Amz-Security-Token $awsSessionToken; # We set the host as the bucket name to inform the S3 API of the bucket - proxy_set_header Host $s3_host_hdr; + proxy_set_header Host $s3_host; # Use keep alive connections in order to improve performance proxy_http_version 1.1; diff --git a/oss/etc/nginx/templates/upstreams.conf.template b/oss/etc/nginx/templates/upstreams.conf.template index 402ea858..a7e1e166 100644 --- a/oss/etc/nginx/templates/upstreams.conf.template +++ b/oss/etc/nginx/templates/upstreams.conf.template @@ -7,5 +7,5 @@ upstream storage_urls { # Be sure to specify the port in the S3_SERVER and be sure that port # corresponds to the https/http in the proxy_pass directive. - server ${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}; + server ${FINAL_S3_SERVER}; } diff --git a/plus/etc/nginx/templates/upstreams.conf.template b/plus/etc/nginx/templates/upstreams.conf.template index 67191ab8..c85f004a 100644 --- a/plus/etc/nginx/templates/upstreams.conf.template +++ b/plus/etc/nginx/templates/upstreams.conf.template @@ -9,5 +9,5 @@ upstream storage_urls { # Be sure to specify the port in the S3_SERVER and be sure that port # corresponds to the https/http in the proxy_pass directive. - server ${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT} resolve; + server ${FINAL_S3_SERVER} resolve; } diff --git a/standalone_ubuntu_oss_install.sh b/standalone_ubuntu_oss_install.sh index ee173499..8f62af84 100644 --- a/standalone_ubuntu_oss_install.sh +++ b/standalone_ubuntu_oss_install.sh @@ -199,6 +199,13 @@ LIMIT_METHODS_TO="GET HEAD" LIMIT_METHODS_TO_CSV="GET, HEAD" EOF fi + +if [ "${S3_STYLE}" == "path" ]; then + FINAL_S3_SERVER="${S3_SERVER}:${S3_SERVER_PORT}" +else + FINAL_S3_SERVER="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" +fi + set -o nounset # abort on unbound variable if [ -z "${CORS_ALLOWED_ORIGIN+x}" ]; then From 05ba58b429fe0edd4f9a9f10d6c8afb987d02a2d Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Tue, 23 Apr 2024 09:26:43 -0700 Subject: [PATCH 06/12] docs, add s3_style config --- common/docker-entrypoint.d/00-check-for-required-env.sh | 3 ++- common/etc/nginx/include/s3gateway.js | 3 ++- common/etc/nginx/nginx.conf | 1 + .../nginx/templates/gateway/s3_location_common.conf.template | 2 +- deployments/s3_express/settings.s3express.example | 1 + docs/getting_started.md | 3 +++ settings.example | 1 + standalone_ubuntu_oss_install.sh | 5 ++++- test.sh | 4 ++++ test/docker-compose.yaml | 1 + 10 files changed, 20 insertions(+), 4 deletions(-) diff --git a/common/docker-entrypoint.d/00-check-for-required-env.sh b/common/docker-entrypoint.d/00-check-for-required-env.sh index 604214d2..a09a76a1 100755 --- a/common/docker-entrypoint.d/00-check-for-required-env.sh +++ b/common/docker-entrypoint.d/00-check-for-required-env.sh @@ -22,7 +22,7 @@ set -e failed=0 -required=("S3_BUCKET_NAME" "S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" +required=("S3_SERVICE" "S3_BUCKET_NAME" "S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" "S3_REGION" "S3_STYLE" "ALLOW_DIRECTORY_LIST" "AWS_SIGS_VERSION" "CORS_ENABLED") @@ -122,6 +122,7 @@ if [ $failed -gt 0 ]; then fi echo "S3 Backend Environment" +echo "Service: ${S3_SERVICE}" echo "Access Key ID: ${AWS_ACCESS_KEY_ID}" echo "Origin: ${S3_SERVER_PROTO}://${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" echo "Region: ${S3_REGION}" diff --git a/common/etc/nginx/include/s3gateway.js b/common/etc/nginx/include/s3gateway.js index b0a6a8a7..67687744 100644 --- a/common/etc/nginx/include/s3gateway.js +++ b/common/etc/nginx/include/s3gateway.js @@ -39,6 +39,7 @@ _requireEnvVars('S3_SERVER_PORT'); _requireEnvVars('S3_REGION'); _requireEnvVars('AWS_SIGS_VERSION'); _requireEnvVars('S3_STYLE'); +_requireEnvVars('S3_SERVICE'); /** @@ -86,7 +87,7 @@ const INDEX_PAGE = "index.html"; * Constant defining the service requests are being signed for. * @type {string} */ -const SERVICE = 's3express'; +const SERVICE = process.env['S3_SERVICE']; /** * Transform the headers returned from S3 such that there isn't information diff --git a/common/etc/nginx/nginx.conf b/common/etc/nginx/nginx.conf index cd938089..7b9d9c60 100644 --- a/common/etc/nginx/nginx.conf +++ b/common/etc/nginx/nginx.conf @@ -20,6 +20,7 @@ env S3_REGION; env AWS_SIGS_VERSION; env DEBUG; env S3_STYLE; +env S3_SERVICE; env ALLOW_DIRECTORY_LIST; env PROVIDE_INDEX_PAGE; env APPEND_SLASH_FOR_POSSIBLE_DIRECTORY; diff --git a/common/etc/nginx/templates/gateway/s3_location_common.conf.template b/common/etc/nginx/templates/gateway/s3_location_common.conf.template index f65f9987..30501642 100644 --- a/common/etc/nginx/templates/gateway/s3_location_common.conf.template +++ b/common/etc/nginx/templates/gateway/s3_location_common.conf.template @@ -19,7 +19,7 @@ proxy_set_header Authorization $s3auth; proxy_set_header X-Amz-Security-Token $awsSessionToken; # We set the host as the bucket name to inform the S3 API of the bucket -proxy_set_header Host $s3_host_hdr; +proxy_set_header Host $s3_host; # Use keep alive connections in order to improve performance proxy_http_version 1.1; diff --git a/deployments/s3_express/settings.s3express.example b/deployments/s3_express/settings.s3express.example index 669532d8..f271a127 100644 --- a/deployments/s3_express/settings.s3express.example +++ b/deployments/s3_express/settings.s3express.example @@ -7,6 +7,7 @@ S3_SERVER_PORT=443 S3_SERVER_PROTO=https S3_REGION=us-west-2 S3_STYLE=virtual +S3_SERVICE=s3express DEBUG=true AWS_SIGS_VERSION=4 ALLOW_DIRECTORY_LIST=false diff --git a/docs/getting_started.md b/docs/getting_started.md index 036b8da6..06d668b0 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -26,6 +26,9 @@ running as a Container or as a Systemd service. | `S3_SERVER_PROTO` | Yes | `http`, `https` | | Protocol to used connect to S3 server | | `S3_SERVER` | Yes | | | S3 host to connect to | | `S3_STYLE` | Yes | `virtual`, `path`, `default` | `default` | The S3 host/path method.
  • `virtual` is the method that that uses DNS-style bucket+hostname:port. This is the `default` value.
  • `path` is a method that appends the bucket name as the first directory in the URI's path. This method is used by many S3 compatible services.

    See this [AWS blog article](https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/) for further information. | + +| `S3_SERVICE` | Yes |`s3`, `s3express` | `s3` | Configures the gateway to interface with either normal S3 buckets or S3 Express One Zone | + | `DEBUG` | No | `true`, `false` | `false` | Flag enabling AWS signatures debug output | | `APPEND_SLASH_FOR_POSSIBLE_DIRECTORY` | No | `true`, `false` | `false` | Flag enabling the return a 302 with a `/` appended to the path. This is independent of the behavior selected in `ALLOW_DIRECTORY_LIST` or `PROVIDE_INDEX_PAGE`. | | `DIRECTORY_LISTING_PATH_PREFIX` | No | | | In `ALLOW_DIRECTORY_LIST=true` mode [adds defined prefix to links](#configuring-directory-listing) | diff --git a/settings.example b/settings.example index b2884a63..95cdd80c 100644 --- a/settings.example +++ b/settings.example @@ -7,6 +7,7 @@ S3_SERVER_PORT=443 S3_SERVER_PROTO=https S3_REGION=us-east-1 S3_STYLE=virtual +S3_SERVICE=s3 DEBUG=false AWS_SIGS_VERSION=4 ALLOW_DIRECTORY_LIST=false diff --git a/standalone_ubuntu_oss_install.sh b/standalone_ubuntu_oss_install.sh index 8f62af84..bc637e3b 100644 --- a/standalone_ubuntu_oss_install.sh +++ b/standalone_ubuntu_oss_install.sh @@ -30,7 +30,7 @@ fi failed=0 -required=("S3_BUCKET_NAME" "S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" +required=("S3_SERVICE" "S3_BUCKET_NAME" "S3_SERVER" "S3_SERVER_PORT" "S3_SERVER_PROTO" "S3_REGION" "S3_STYLE" "ALLOW_DIRECTORY_LIST" "AWS_SIGS_VERSION") if [ ! -z ${AWS_CONTAINER_CREDENTIALS_RELATIVE_URI+x} ]; then @@ -162,6 +162,8 @@ S3_SERVER_PROTO=${S3_SERVER_PROTO} S3_SERVER=${S3_SERVER} # The S3 host/path method - 'virtual', 'path' or 'default' S3_STYLE=${S3_STYLE:-'default'} +# Name of S3 service - 's3' or 's3express' +S3_SERVICE=${S3_SERVICE:-'s3'} # Flag (true/false) enabling AWS signatures debug output (default: false) DEBUG=${DEBUG:-'false'} # Cache size limit @@ -346,6 +348,7 @@ env S3_REGION; env AWS_SIGS_VERSION; env DEBUG; env S3_STYLE; +env S3_SERVICE; env ALLOW_DIRECTORY_LIST; events { diff --git a/test.sh b/test.sh index a12fd28e..44451d56 100755 --- a/test.sh +++ b/test.sh @@ -351,6 +351,7 @@ runUnitTestWithOutSessionToken() { --workdir /var/tmp \ -e "DEBUG=true" \ -e "S3_STYLE=virtual" \ + -e "S3_SERVICE=s3" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ -e "S3_BUCKET_NAME=unit_test" \ @@ -370,6 +371,7 @@ runUnitTestWithOutSessionToken() { --workdir /var/tmp \ -e "DEBUG=true" \ -e "S3_STYLE=virtual" \ + -e "S3_SERVICE=s3" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ -e "S3_BUCKET_NAME=unit_test" \ @@ -396,6 +398,7 @@ runUnitTestWithSessionToken() { --workdir /var/tmp \ -e "DEBUG=true" \ -e "S3_STYLE=virtual" \ + -e "S3_SERVICE=s3" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ -e "AWS_SESSION_TOKEN=unit_test" \ @@ -416,6 +419,7 @@ runUnitTestWithSessionToken() { --workdir /var/tmp \ -e "DEBUG=true" \ -e "S3_STYLE=virtual" \ + -e "S3_SERVICE=s3" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ -e "AWS_SESSION_TOKEN=unit_test" \ diff --git a/test/docker-compose.yaml b/test/docker-compose.yaml index 44c58763..0d75ca55 100644 --- a/test/docker-compose.yaml +++ b/test/docker-compose.yaml @@ -23,6 +23,7 @@ services: S3_REGION: "us-east-1" DEBUG: "true" S3_STYLE: "virtual" + S3_SERVICE: "s3" ALLOW_DIRECTORY_LIST: PROVIDE_INDEX_PAGE: APPEND_SLASH_FOR_POSSIBLE_DIRECTORY: From fee3ef03a16809390ab957364dc7626e3aaa5f75 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Tue, 23 Apr 2024 10:30:04 -0700 Subject: [PATCH 07/12] fix tests for bucket prepended hostname --- test/docker-compose.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/test/docker-compose.yaml b/test/docker-compose.yaml index 0d75ca55..4bcbbd9b 100644 --- a/test/docker-compose.yaml +++ b/test/docker-compose.yaml @@ -40,6 +40,7 @@ services: minio: image: quay.io/minio/minio:RELEASE.2023-06-09T07-32-12Z + hostname: bucket-1.minio ports: - "9090:9000/tcp" restart: "no" From ce773b6b06ba8ff8e7f748f465190e8ef84df376 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Wed, 24 Apr 2024 11:18:10 -0700 Subject: [PATCH 08/12] move to using as the path style --- common/docker-entrypoint.sh | 21 ++++++++++++--- .../etc/nginx/templates/default.conf.template | 2 +- docs/getting_started.md | 22 +++++++++++++++- .../nginx/templates/upstreams.conf.template | 5 +--- .../nginx/templates/upstreams.conf.template | 4 +-- standalone_ubuntu_oss_install.sh | 26 ++++++++++++++++--- 6 files changed, 65 insertions(+), 15 deletions(-) diff --git a/common/docker-entrypoint.sh b/common/docker-entrypoint.sh index 5b8a6000..86886027 100644 --- a/common/docker-entrypoint.sh +++ b/common/docker-entrypoint.sh @@ -68,12 +68,27 @@ if [ -z "${CORS_ALLOWED_ORIGIN+x}" ]; then export CORS_ALLOWED_ORIGIN="*" fi -if [ "${S3_STYLE}" == "path" ]; then - export FINAL_S3_SERVER="${S3_SERVER}:${S3_SERVER_PORT}" +# This is the primary logic to determine the s3 host used for the +# upstream (the actual proxying action) as well as the `Host` header +# +# It is currently slightly more complex than necessary because we are transitioning +# to a new logic which is defined by "virtual-v2". "virtual-v2" is the recommended setting +# for all deployments. + +# S3_UPSTREAM needs the port specified. The port must +# correspond to https/http in the proxy_pass directive. +if [ "${S3_STYLE}" == "virtual-v2" ]; then + export S3_UPSTREAM="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" + export S3_HOST_HEADER="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" +elif [ "${S3_STYLE}" == "path" ]; then + export S3_UPSTREAM="${S3_SERVER}:${S3_SERVER_PORT}" + export S3_HOST_HEADER="${S3_SERVER}:${S3_SERVER_PORT}" else - export FINAL_S3_SERVER="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" + export S3_UPSTREAM="${S3_SERVER}:${S3_SERVER_PORT}" + export S3_HOST_HEADER="${S3_BUCKET_NAME}.${S3_SERVER}" fi + # Nothing is modified under this line if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then diff --git a/common/etc/nginx/templates/default.conf.template b/common/etc/nginx/templates/default.conf.template index 795a9e1b..692bf5ce 100644 --- a/common/etc/nginx/templates/default.conf.template +++ b/common/etc/nginx/templates/default.conf.template @@ -22,7 +22,7 @@ map $uri_full_path $uri_path { # FINAL_S3_SERVER is set in the startup script # (either ./common/docker-entrypoint.sh or ./standalone_ubuntu_oss_install.sh) # based on the S3_STYLE configuration option. -js_var $s3_host ${FINAL_S3_SERVER}; +js_var $s3_host ${S3_HOST_HEADER}; js_var $indexIsEmpty true; js_var $forIndexPage true; diff --git a/docs/getting_started.md b/docs/getting_started.md index 06d668b0..b3c2e6d8 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -25,7 +25,7 @@ running as a Container or as a Systemd service. | `S3_SERVER_PORT` | Yes | | | SSL/TLS port to connect to | | `S3_SERVER_PROTO` | Yes | `http`, `https` | | Protocol to used connect to S3 server | | `S3_SERVER` | Yes | | | S3 host to connect to | -| `S3_STYLE` | Yes | `virtual`, `path`, `default` | `default` | The S3 host/path method.
  • `virtual` is the method that that uses DNS-style bucket+hostname:port. This is the `default` value.
  • `path` is a method that appends the bucket name as the first directory in the URI's path. This method is used by many S3 compatible services.

    See this [AWS blog article](https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/) for further information. | +| `S3_STYLE` | Yes | `virtual-v2` `virtual`, `path`, `default` | `default` | The S3 host/path method.
  • `virtual` and `virtual-v2` represent the method that that uses DNS-style bucket+hostname:port. `virtual` is the `default` value. **`virtual-v2` is the recommended value and will replace `virtual` completely in later version.** `virtual-v2` brings the implementation up to the latest recommendations from the AWS documentation and is required to support S3 Express One Zone. See below for details
  • `path` is a method that appends the bucket name as the first directory in the URI's path. This method is used by many S3 compatible services.

    See this [AWS blog article](https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/) for further information. | | `S3_SERVICE` | Yes |`s3`, `s3express` | `s3` | Configures the gateway to interface with either normal S3 buckets or S3 Express One Zone | @@ -66,6 +66,26 @@ There are few optional environment variables that can be used. be coded to the current AWS region. This environment variable will be ignored if `STS_ENDPOINT` is set. Valid options are: `global` (default) or `regional`. +### Choosing a `S3_STYLE` Setting +If you are using AWS S3 or S3 Express One Zone, use `virtual-v2`. We are maintaining `virtual` temporarily until we hear from the community that `virtual-v2` does not cause issues - or we introduce a versioning system that allows us to safely flag breaking changes. **`virtual-v2` is not expected to be a breaking change** but we are being cautious. + +A full reference for S3 addressing styles may be found [here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html) + +Here is the difference between `virtual` and `virtual-v2`: +#### virtual +* Proxied endpoint: `S3_SERVER:S3_SERVER_PORT` +* `Host` header: `S3_BUCKET_NAME}.S3_SERVER` +* `host` field in the [S3 V4 `CanonicalHeaders`](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html): `S3_BUCKET_NAME}.S3_SERVER` + +#### virtual-v2 +All items are set to the same value: +* Proxied endpoint: `S3_BUCKET_NAME.S3_SERVER:S3_SERVER_PORT` +* `Host` header: `S3_BUCKET_NAME.S3_SERVER:S3_SERVER_PORT` +* `host` field in the [S3 V4 `CanonicalHeaders`](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html): `S3_BUCKET_NAME.S3_SERVER:S3_SERVER_PORT` + +#### path +`path` style routing does not prepend the bucket name to the host, and includes it as the first segment in the request path. AWS is actively trying to move away from this method. Some S3 compatible object stores may require that you use this setting - but try to avoid it if your object store works with `virtual-v2`. + ### Configuring Directory Listing diff --git a/oss/etc/nginx/templates/upstreams.conf.template b/oss/etc/nginx/templates/upstreams.conf.template index a7e1e166..383c1667 100644 --- a/oss/etc/nginx/templates/upstreams.conf.template +++ b/oss/etc/nginx/templates/upstreams.conf.template @@ -4,8 +4,5 @@ resolver ${DNS_RESOLVERS}; upstream storage_urls { # Upstreams are not refreshed until NGINX configuration is reloaded. # NGINX Plus will dynamically reload upstreams when DNS records are changed. - - # Be sure to specify the port in the S3_SERVER and be sure that port - # corresponds to the https/http in the proxy_pass directive. - server ${FINAL_S3_SERVER}; + server ${S3_UPSTREAM}; } diff --git a/plus/etc/nginx/templates/upstreams.conf.template b/plus/etc/nginx/templates/upstreams.conf.template index c85f004a..6c87e6bd 100644 --- a/plus/etc/nginx/templates/upstreams.conf.template +++ b/plus/etc/nginx/templates/upstreams.conf.template @@ -7,7 +7,5 @@ resolver ${DNS_RESOLVERS}; upstream storage_urls { zone s3_backends 64k; - # Be sure to specify the port in the S3_SERVER and be sure that port - # corresponds to the https/http in the proxy_pass directive. - server ${FINAL_S3_SERVER} resolve; + server ${S3_UPSTREAM} resolve; } diff --git a/standalone_ubuntu_oss_install.sh b/standalone_ubuntu_oss_install.sh index bc637e3b..4b68cae8 100644 --- a/standalone_ubuntu_oss_install.sh +++ b/standalone_ubuntu_oss_install.sh @@ -202,10 +202,30 @@ LIMIT_METHODS_TO_CSV="GET, HEAD" EOF fi -if [ "${S3_STYLE}" == "path" ]; then - FINAL_S3_SERVER="${S3_SERVER}:${S3_SERVER_PORT}" +# This is the primary logic to determine the s3 host used for the +# upstream (the actual proxying action) as well as the `Host` header +# +# It is currently slightly more complex than necessary because we are transitioning +# to a new logic which is defined by "virtual-v2". "virtual-v2" is the recommended setting +# for all deployments. + +# S3_UPSTREAM needs the port specified. The port must +# correspond to https/http in the proxy_pass directive. +if [ "${S3_STYLE}" == "virtual-v2" ]; then + cat >> "/etc/nginx/environment" << EOF +S3_UPSTREAM="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" +S3_HOST_HEADER="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" +EOF +elif [ "${S3_STYLE}" == "path" ]; then + cat >> "/etc/nginx/environment" << EOF +S3_UPSTREAM="${S3_SERVER}:${S3_SERVER_PORT}" +S3_HOST_HEADER="${S3_SERVER}:${S3_SERVER_PORT}" +EOF else - FINAL_S3_SERVER="${S3_BUCKET_NAME}.${S3_SERVER}:${S3_SERVER_PORT}" + cat >> "/etc/nginx/environment" << EOF +S3_UPSTREAM="${S3_SERVER}:${S3_SERVER_PORT}" +S3_HOST_HEADER="${S3_BUCKET_NAME}.${S3_SERVER}" +EOF fi set -o nounset # abort on unbound variable From 1d109896f9b2325a83f40b7c5e4fe314abb10bb3 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Wed, 24 Apr 2024 11:59:19 -0700 Subject: [PATCH 09/12] add matrix to maintain coverage for both path styles --- .github/workflows/main.yml | 5 ++++- test.sh | 8 ++++---- test/docker-compose.yaml | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f7bed9dc..52675aa0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -54,6 +54,9 @@ jobs: test-oss: runs-on: ubuntu-22.04 needs: build-oss-for-test + strategy: + matrix: + path_style: [virtual, virtual-v2] steps: - uses: actions/checkout@v4 - name: Install dependencies @@ -82,7 +85,7 @@ jobs: run: | docker load --input ${{ runner.temp }}/oss.tar - name: Run tests - stable njs version - run: ./test.sh --type oss + run: S3_STYLE=${{ matrix.path_style }} ./test.sh --type oss build-latest-njs-for-test: runs-on: ubuntu-22.04 diff --git a/test.sh b/test.sh index 44451d56..772b9cff 100755 --- a/test.sh +++ b/test.sh @@ -350,7 +350,7 @@ runUnitTestWithOutSessionToken() { -v "$(pwd)/test/unit:/var/tmp" \ --workdir /var/tmp \ -e "DEBUG=true" \ - -e "S3_STYLE=virtual" \ + -e "S3_STYLE=virtual-v2" \ -e "S3_SERVICE=s3" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ @@ -370,7 +370,7 @@ runUnitTestWithOutSessionToken() { -v "$(pwd)/test/unit:/var/tmp" \ --workdir /var/tmp \ -e "DEBUG=true" \ - -e "S3_STYLE=virtual" \ + -e "S3_STYLE=virtual-v2" \ -e "S3_SERVICE=s3" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ @@ -397,7 +397,7 @@ runUnitTestWithSessionToken() { -v "$(pwd)/test/unit:/var/tmp" \ --workdir /var/tmp \ -e "DEBUG=true" \ - -e "S3_STYLE=virtual" \ + -e "S3_STYLE=virtual-v2" \ -e "S3_SERVICE=s3" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ @@ -418,7 +418,7 @@ runUnitTestWithSessionToken() { -v "$(pwd)/test/unit:/var/tmp" \ --workdir /var/tmp \ -e "DEBUG=true" \ - -e "S3_STYLE=virtual" \ + -e "S3_STYLE=virtual-v2" \ -e "S3_SERVICE=s3" \ -e "AWS_ACCESS_KEY_ID=unit_test" \ -e "AWS_SECRET_ACCESS_KEY=unit_test" \ diff --git a/test/docker-compose.yaml b/test/docker-compose.yaml index 4bcbbd9b..2faa9c8a 100644 --- a/test/docker-compose.yaml +++ b/test/docker-compose.yaml @@ -22,7 +22,7 @@ services: S3_SERVER_PROTO: "http" S3_REGION: "us-east-1" DEBUG: "true" - S3_STYLE: "virtual" + S3_STYLE: "${S3_STYLE:-virtual-v2}" S3_SERVICE: "s3" ALLOW_DIRECTORY_LIST: PROVIDE_INDEX_PAGE: From de7cb16cc9a015d7acb14b442beddd4ab5f67102 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Wed, 24 Apr 2024 12:15:16 -0700 Subject: [PATCH 10/12] fix some omissions --- common/etc/nginx/templates/default.conf.template | 2 +- deployments/s3_express/settings.s3express.example | 2 +- settings.example | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/etc/nginx/templates/default.conf.template b/common/etc/nginx/templates/default.conf.template index 692bf5ce..52d1e55f 100644 --- a/common/etc/nginx/templates/default.conf.template +++ b/common/etc/nginx/templates/default.conf.template @@ -19,7 +19,7 @@ map $uri_full_path $uri_path { default $PREFIX_LEADING_DIRECTORY_PATH$uri_full_path; } -# FINAL_S3_SERVER is set in the startup script +# S3_HOST_HEADER is set in the startup script # (either ./common/docker-entrypoint.sh or ./standalone_ubuntu_oss_install.sh) # based on the S3_STYLE configuration option. js_var $s3_host ${S3_HOST_HEADER}; diff --git a/deployments/s3_express/settings.s3express.example b/deployments/s3_express/settings.s3express.example index f271a127..3dcc2c6f 100644 --- a/deployments/s3_express/settings.s3express.example +++ b/deployments/s3_express/settings.s3express.example @@ -6,7 +6,7 @@ S3_SERVER=s3express-usw2-az1.us-west-2.amazonaws.com S3_SERVER_PORT=443 S3_SERVER_PROTO=https S3_REGION=us-west-2 -S3_STYLE=virtual +S3_STYLE=virtual-v2 S3_SERVICE=s3express DEBUG=true AWS_SIGS_VERSION=4 diff --git a/settings.example b/settings.example index 95cdd80c..ca71efc9 100644 --- a/settings.example +++ b/settings.example @@ -6,7 +6,7 @@ S3_SERVER=s3.us-east-1.amazonaws.com S3_SERVER_PORT=443 S3_SERVER_PROTO=https S3_REGION=us-east-1 -S3_STYLE=virtual +S3_STYLE=virtual-v2 S3_SERVICE=s3 DEBUG=false AWS_SIGS_VERSION=4 From 2d8b6a0577ffae9214dfd9a08697deb884c2f237 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Wed, 24 Apr 2024 14:16:56 -0700 Subject: [PATCH 11/12] default to s3 as a service type if variable not provided --- common/etc/nginx/include/s3gateway.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/etc/nginx/include/s3gateway.js b/common/etc/nginx/include/s3gateway.js index 67687744..d9e016a8 100644 --- a/common/etc/nginx/include/s3gateway.js +++ b/common/etc/nginx/include/s3gateway.js @@ -87,7 +87,7 @@ const INDEX_PAGE = "index.html"; * Constant defining the service requests are being signed for. * @type {string} */ -const SERVICE = process.env['S3_SERVICE']; +const SERVICE = process.env['S3_SERVICE'] || "s3"; /** * Transform the headers returned from S3 such that there isn't information From 437cc07d04b9f937ecebfa028a3774694f6aa3af Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Wed, 24 Apr 2024 14:56:50 -0700 Subject: [PATCH 12/12] fix readme --- docs/getting_started.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index b3c2e6d8..e1e6d072 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -25,17 +25,15 @@ running as a Container or as a Systemd service. | `S3_SERVER_PORT` | Yes | | | SSL/TLS port to connect to | | `S3_SERVER_PROTO` | Yes | `http`, `https` | | Protocol to used connect to S3 server | | `S3_SERVER` | Yes | | | S3 host to connect to | -| `S3_STYLE` | Yes | `virtual-v2` `virtual`, `path`, `default` | `default` | The S3 host/path method.
  • `virtual` and `virtual-v2` represent the method that that uses DNS-style bucket+hostname:port. `virtual` is the `default` value. **`virtual-v2` is the recommended value and will replace `virtual` completely in later version.** `virtual-v2` brings the implementation up to the latest recommendations from the AWS documentation and is required to support S3 Express One Zone. See below for details
  • `path` is a method that appends the bucket name as the first directory in the URI's path. This method is used by many S3 compatible services.

    See this [AWS blog article](https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/) for further information. | - -| `S3_SERVICE` | Yes |`s3`, `s3express` | `s3` | Configures the gateway to interface with either normal S3 buckets or S3 Express One Zone | - +| `S3_STYLE` | Yes | `virtual-v2`, `virtual`, `path`, `default` | `default` | The S3 host/path method.
  • `virtual` and `virtual-v2` represent the method that uses DNS-style bucket+hostname:port. The `default` is the same as `virtual`. In the future, the `default` value will become `virtual-v2`. See [Choosing a `S3_STYLE` Setting](#user-content-choosing-a-s3_style-setting) below for details.
  • `path` is a method that appends the bucket name as the first directory in the URI's path. This method is used by many S3 compatible services.

    See this [AWS blog article](https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/) for further information. | +| `S3_SERVICE` | Yes | |`s3`, `s3express` | `s3` | Configures the gateway to interface with either normal S3 buckets or S3 Express One Zone | | `DEBUG` | No | `true`, `false` | `false` | Flag enabling AWS signatures debug output | | `APPEND_SLASH_FOR_POSSIBLE_DIRECTORY` | No | `true`, `false` | `false` | Flag enabling the return a 302 with a `/` appended to the path. This is independent of the behavior selected in `ALLOW_DIRECTORY_LIST` or `PROVIDE_INDEX_PAGE`. | | `DIRECTORY_LISTING_PATH_PREFIX` | No | | | In `ALLOW_DIRECTORY_LIST=true` mode [adds defined prefix to links](#configuring-directory-listing) | | `DNS_RESOLVERS` | No | | | DNS resolvers (separated by single spaces) to configure NGINX with | | `PROXY_CACHE_MAX_SIZE` | No | | `10g` | Limits cache size | -| `PROXY_CACHE_INACTIVE` | No | | `60m` | Cached data that are not accessed during the time specified by the parameter get removed from the cache regardless of their freshness -| `PROXY_CACHE_SLICE_SIZE` | No | | `1m` | For requests with a `Range` header included, determines the size of the chunks in which the file is fetched. Values much smaller than the requests can lead to inefficiencies due to reading and writing many files. See [below for more details](#byte-range-requests-and-caching) | | +| `PROXY_CACHE_INACTIVE` | No | | `60m` | Cached data that are not accessed during the time specified by the parameter get removed from the cache regardless of their freshness | +| `PROXY_CACHE_SLICE_SIZE` | No | | `1m` | For requests with a `Range` header included, determines the size of the chunks in which the file is fetched. Values much smaller than the requests can lead to inefficiencies due to reading and writing many files. See [below for more details](#byte-range-requests-and-caching) | | `PROXY_CACHE_VALID_OK` | No | | `1h` | Sets caching time for response code 200 and 302 | | `PROXY_CACHE_VALID_NOTFOUND` | No | | `1m` | Sets caching time for response code 404 | | `PROXY_CACHE_VALID_FORBIDDEN` | No | | `30s` | Sets caching time for response code 403 | @@ -67,7 +65,10 @@ There are few optional environment variables that can be used. `STS_ENDPOINT` is set. Valid options are: `global` (default) or `regional`. ### Choosing a `S3_STYLE` Setting -If you are using AWS S3 or S3 Express One Zone, use `virtual-v2`. We are maintaining `virtual` temporarily until we hear from the community that `virtual-v2` does not cause issues - or we introduce a versioning system that allows us to safely flag breaking changes. **`virtual-v2` is not expected to be a breaking change** but we are being cautious. +**If you are using AWS S3 or S3 Express One Zone, use `virtual-v2`.** We are maintaining `virtual` temporarily until we hear from the community that `virtual-v2` does not cause issues - or we introduce a versioning system that allows us to safely flag breaking changes. +Until then, `virtual` works as before, and `default` still causes the `virtual` behavior to be used. + +**`virtual-v2` is not expected to be a breaking change** but we are being cautious. A full reference for S3 addressing styles may be found [here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html) @@ -150,7 +151,7 @@ You may make byte-range requests and normal requests for the same file and NGINX ## Usage with AWS S3 Express One Zone The gateway may be used to proxy files in the AWS S3 Express One Zone product (also called Directory Buckets). -To do so, be sure that `S3_STYLE` is set to `virtual`. Additionally, the `S3_SERVER` configuration must be set a combination of the bucket name and the [Zonal Endpoint](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints). +To do so, be sure that `S3_STYLE` is set to `virtual-v2`. Additionally, the `S3_SERVER` configuration must be set a combination of the bucket name and the [Zonal Endpoint](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints). ### Directory Bucket Names See the [official documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) for the most up to date rules on Directory Bucket naming.