-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathindex.js
234 lines (194 loc) · 7.32 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
import { S3Client, ListObjectsV2Command, DeleteObjectsCommand } from "@aws-sdk/client-s3";
import { Upload } from "@aws-sdk/lib-storage";
import { CloudFrontClient, ListDistributionsCommand, GetDistributionCommand, CreateInvalidationCommand } from "@aws-sdk/client-cloudfront";
import { readdir, stat } from "fs/promises";
import { join } from "path";
import mime from "mime";
import * as fs from "fs";
// TODO:
// - Wrap code into class like s3WebsiteDeploy or so
// - Check method definitions if all need to be async
// - Make some methods private?
// Cloudfront Client
const cfClient = new CloudFrontClient({ region: "us-east-1" }); // CloudFront is global, but you can still set a default region
// Get Cloudfront Ids for a given domain alias
async function getDistributionsForDomains(cfDomains) {
const uniqueCfIds = new Set();
let distributions;
let dist;
let marker;
let cfId;
for (const domain of cfDomains) {
console.log(`Looking for domain ${domain} in CloudFront distributions...`);
cfId = null;
do {
// Fetch distributions with the current marker, as results may be paginated
const command = new ListDistributionsCommand({ Marker: marker });
distributions = await cfClient.send(command);
dist = distributions.DistributionList.Items.find((item) => item.Aliases?.Items?.includes(domain));
marker = distributions.DistributionList.NextMarker;
} while (!dist && marker);
if (!dist) {
console.error(`No CloudFront distribution found for domain ${domain}`);
continue;
}
cfId = dist.Id;
console.log(`Found CloudFront distribution with ID: ${cfId}`);
uniqueCfIds.add(cfId);
}
if (uniqueCfIds.length === 0) {
console.log(`Found no Cloudfront distribution for any of the given domains ${cfDomains}`);
process.exit(1);
}
return uniqueCfIds;
}
// Get distribution details for specific Cloudfront ID
async function getDistribution(distributionId) {
try {
const command = new GetDistributionCommand({ Id: distributionId });
const response = await cfClient.send(command);
// console.log("Distribution Details:", response.Distribution);
return response.Distribution;
} catch (error) {
console.error("Error getting distribution details:", error);
process.exit(1);
}
}
// Create Cloudfront invalidation for given CF id
async function createInvalidation(distributionId) {
const paths = ['/*'] // simply invalidate all files
const timestamp = Date.now().toString(); // Unique ID for the invalidation
const command = new CreateInvalidationCommand({
DistributionId: distributionId,
InvalidationBatch: {
CallerReference: timestamp,
Paths: {
Quantity: paths.length,
Items: paths,
},
},
});
try {
const response = await cfClient.send(command);
console.log("Cloudfront invalidation created:", response.Invalidation.Id);
} catch (error) {
console.error("Error creating invalidation:", error);
}
}
// AWS S3 Configuration
const region = "us-east-1"; // autodetect?
const s3Client = new S3Client({ region });
// Cleanup S3 bucket by finding all files and removing them
async function cleanupS3Bucket(bucketName) {
let continuationToken;
let hasMore = true;
console.log(`Listing and deleting contents of bucket: ${bucketName}`);
while (hasMore) {
try {
// List objects in the bucket
const listCommand = new ListObjectsV2Command({
Bucket: bucketName,
ContinuationToken: continuationToken,
});
const listResponse = await s3Client.send(listCommand);
if (listResponse.Contents && listResponse.Contents.length > 0) {
// Prepare objects for deletion
const objectsToDelete = listResponse.Contents.map((item) => ({ Key: item.Key }));
// Delete objects
const deleteCommand = new DeleteObjectsCommand({
Bucket: bucketName,
Delete: {
Objects: objectsToDelete,
},
});
const deleteResponse = await s3Client.send(deleteCommand);
console.log(`Deleted objects:`, deleteResponse.Deleted.map((obj) => obj.Key));
}
// Check if there are more objects to process
if (listResponse.IsTruncated) {
continuationToken = listResponse.NextContinuationToken;
} else {
hasMore = false;
}
} catch (error) {
console.error("Error processing bucket contents:", error);
break;
}
}
console.log("Finished cleaning bucket.");
}
/**
* Recursively upload files from a local directory to S3.
* @param {string} dir - The local directory path.
* @param {string} s3Prefix - S3 key prefix (folder path in the bucket).
*/
async function uploadDirectoryToS3(dir, bucketName, s3Prefix = "") {
const files = await readdir(dir);
for (const file of files) {
const filePath = join(dir, file);
const fileStat = await stat(filePath);
if (fileStat.isDirectory()) {
// Recursively upload subdirectory
await uploadDirectoryToS3(filePath, bucketName, join(s3Prefix, file));
} else {
// Upload file
const fileStream = fs.createReadStream(filePath);
const mimeType = mime.getType(filePath) || "application/octet-stream";
const s3Key = join(s3Prefix, file);
console.log(`Uploading ${filePath} to s3://${bucketName}/${s3Key}`);
const uploadParams = {
Bucket: bucketName,
Key: s3Key.replace(/\\/g, "/"), // Ensure S3 key uses forward slashes
Body: fileStream,
ContentType: mimeType, // Set the MIME type
};
const resp = new Upload({ client: s3Client, params: uploadParams });
try {
await resp.done();
// console.log(`Uploaded: ${s3Key}`);
} catch (error) {
console.error(`Failed to upload ${s3Key}:`, error);
}
}
}
}
// Wrapper method to do all steps
// Example call
// deploy(['test.com'], 'dist/')
async function deploy(domains, localDirectory) {
// 1. Find Cloudfront distributions for given list of domains
const cfIds = await getDistributionsForDomains(domains)
// 2. Extract bucket names from Cloudfront distributions
const bucketNames = new Set();
for (const id of cfIds) {
const distribution = await getDistribution(id)
const s3Domain = distribution.DistributionConfig.Origins.Items[0].DomainName;
const bucketName = s3Domain.split('.')[0];
bucketNames.add(bucketName)
}
console.log(`Found following CF attached bucket names: ${new Array(...bucketNames).join(' ')}`);
if (bucketNames.size > 1) {
console.warn("Are you ensure you want to push your artefacts to more then one bucket? Hit Ctr+C to abort now!")
await new Promise(r => setTimeout(r, 5000));
}
console.log("");
console.log(`Will upload to to buckets from local dir: ${localDirectory}`);
console.log("");
for (const bucketName of bucketNames) {
// 3. Cleanup s3 bucket (i.e. delete all present files)
await cleanupS3Bucket(bucketName)
console.log("");
// 4. Upload new files to s3
await uploadDirectoryToS3(localDirectory, bucketName)
.then(() => console.log(`Upload completed to ${bucketName} bucket`))
.catch((err) => console.error("Error during upload:", err));
}
console.log("");
// 5. Invalidate Cloudfront caches to ensure new content is served
for (const id of cfIds) {
await createInvalidation(id)
}
console.log("");
console.log("All done");
}
export { deploy };