Author: Jason White [github@jasonwhite.io]
Hash: ae238af5a37632ed7c3e19834c65bcf1f15b54bd
Timestamp: Sun, 16 Feb 2020 20:57:03 +0000 (4 years ago)

+40 -32 +/-4 browse
Add retry on initial S3 connection
Add retry on initial S3 connection

This helps alleviate race conditions on startup of multiple services.
1diff --git a/Dockerfile b/Dockerfile
2index 97490eb..056d6c7 100644
3--- a/Dockerfile
4+++ b/Dockerfile
5 @@ -1,4 +1,4 @@
6- FROM rust:1.39 as build
7+ FROM rust:1.41 as build
8
9 ENV CARGO_BUILD_TARGET=x86_64-unknown-linux-musl
10
11 diff --git a/docker-compose.minio.yml b/docker-compose.minio.yml
12index 61f7ac8..5300485 100644
13--- a/docker-compose.minio.yml
14+++ b/docker-compose.minio.yml
15 @@ -14,21 +14,21 @@ services:
16 command: ["server", "/data"]
17 app:
18 image: jasonwhite0/rudolfs:latest
19- # build:
20- # context: .
21- # dockerfile: Dockerfile
22+ #build:
23+ # context: .
24+ # dockerfile: Dockerfile
25 ports:
26 - "8081:8080"
27 volumes:
28 - data:/data
29 restart: always
30 environment:
31- - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
32- - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
33- - AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION}
34- - LFS_ENCRYPTION_KEY=${LFS_ENCRYPTION_KEY}
35- - LFS_S3_BUCKET=${LFS_S3_BUCKET}
36- - LFS_MAX_CACHE_SIZE=${LFS_MAX_CACHE_SIZE}
37+ - AWS_ACCESS_KEY_ID
38+ - AWS_SECRET_ACCESS_KEY
39+ - AWS_DEFAULT_REGION
40+ - LFS_ENCRYPTION_KEY
41+ - LFS_S3_BUCKET
42+ - LFS_MAX_CACHE_SIZE
43 - AWS_S3_ENDPOINT=http://minio:9000
44 entrypoint:
45 - /tini
46 diff --git a/docker-compose.yml b/docker-compose.yml
47index 7e8ede8..35c3ae2 100644
48--- a/docker-compose.yml
49+++ b/docker-compose.yml
50 @@ -12,12 +12,12 @@ services:
51 - data:/data
52 restart: always
53 environment:
54- - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
55- - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
56- - AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION}
57- - LFS_ENCRYPTION_KEY=${LFS_ENCRYPTION_KEY}
58- - LFS_S3_BUCKET=${LFS_S3_BUCKET}
59- - LFS_MAX_CACHE_SIZE=${LFS_MAX_CACHE_SIZE}
60+ - AWS_ACCESS_KEY_ID
61+ - AWS_SECRET_ACCESS_KEY
62+ - AWS_DEFAULT_REGION
63+ - LFS_ENCRYPTION_KEY
64+ - LFS_S3_BUCKET
65+ - LFS_MAX_CACHE_SIZE
66 entrypoint:
67 - /tini
68 - --
69 diff --git a/src/storage/s3.rs b/src/storage/s3.rs
70index 79f718d..16e2569 100644
71--- a/src/storage/s3.rs
72+++ b/src/storage/s3.rs
73 @@ -20,7 +20,9 @@
74 use bytes::Bytes;
75 use derive_more::{Display, From};
76 use futures::{future, stream, Future, Stream};
77+ use futures_backoff::retry;
78 use http::StatusCode;
79+ use log;
80 use rusoto_core::{Region, RusotoError};
81 use rusoto_s3::{
82 GetObjectError, GetObjectRequest, HeadBucketError, HeadBucketRequest,
83 @@ -116,31 +118,37 @@ impl Backend {
84 }
85 }
86
87- impl<C> Backend<C>
88- where
89- C: S3,
90- {
91+ impl<C> Backend<C> {
92 pub fn with_client(
93 client: C,
94 bucket: String,
95 prefix: String,
96- ) -> impl Future<Item = Self, Error = Error> {
97- // Peform a HEAD operation to check that the bucket exists and that our
98- // credentials work. This helps catch very common errors early on
99- // application startup.
100+ ) -> impl Future<Item = Self, Error = Error>
101+ where
102+ C: S3 + Clone,
103+ {
104+ // Perform a HEAD operation to check that the bucket exists and that
105+ // our credentials work. This helps catch very common
106+ // errors early on application startup.
107 let req = HeadBucketRequest {
108 bucket: bucket.clone(),
109 };
110
111- client
112- .head_bucket(req)
113- .map_err(InitError::from)
114- .from_err()
115- .map(move |()| Backend {
116- client,
117- bucket,
118- prefix,
119+ let c = client.clone();
120+
121+ retry(move || {
122+ c.head_bucket(req.clone()).map_err(|e| {
123+ log::error!("Failed to query S3 bucket ('{}'). Retrying...", e);
124+ e
125 })
126+ })
127+ .map_err(InitError::from)
128+ .from_err()
129+ .map(move |()| Backend {
130+ client,
131+ bucket,
132+ prefix,
133+ })
134 }
135
136 fn key_to_path(&self, key: &StorageKey) -> String {