-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added S3 bucket support and updated docker compose and tests
- Loading branch information
1 parent
2dec181
commit 8ae3d85
Showing
10 changed files
with
2,863 additions
and
937 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,149 @@ | ||
// src/model/data/index.js | ||
|
||
// If the environment sets an AWS Region, we'll use AWS backend | ||
// services (S3, DynamoDB); otherwise, we'll use an in-memory db. | ||
module.exports = process.env.AWS_REGION ? require('./aws') : require('./memory'); | ||
|
||
const logger = require('../../logger'); | ||
const s3Client = require('./s3Client'); | ||
const { PutObjectCommand, GetObjectCommand, DeleteObjectCommand } = require('@aws-sdk/client-s3'); | ||
|
||
///////// | ||
// XXX: temporary use of memory-db until we add DynamoDB | ||
const MemoryDB = require('../memory/memory-db'); | ||
|
||
|
||
// Create two in-memory databases: one for fragment metadata and the other for raw data | ||
|
||
const metadata = new MemoryDB(); | ||
|
||
// Write a fragment's metadata to memory db. Returns a Promise | ||
function writeFragment(fragment) { | ||
return metadata.put(fragment.ownerId, fragment.id, fragment); | ||
} | ||
|
||
// Read a fragment's metadata from memory db. Returns a Promise | ||
function readFragment(ownerId, id) { | ||
return metadata.get(ownerId, id); | ||
} | ||
|
||
// Writes a fragment's data to an S3 Object in a Bucket | ||
// https://github.com/awsdocs/aws-sdk-for-javascript-v3/blob/main/doc_source/s3-example-creating-buckets.md#upload-an-existing-object-to-an-amazon-s3-bucket | ||
async function writeFragmentData(ownerId, id, data) { | ||
// Create the PUT API params from our details | ||
const params = { | ||
Bucket: process.env.AWS_S3_BUCKET_NAME, | ||
// Our key will be a mix of the ownerID and fragment id, written as a path | ||
Key: `${ownerId}/${id}`, | ||
Body: data, | ||
}; | ||
|
||
// Create a PUT Object command to send to S3 | ||
const command = new PutObjectCommand(params); | ||
|
||
try { | ||
// Use our client to send the command | ||
await s3Client.send(command); | ||
} catch (err) { | ||
// If anything goes wrong, log enough info that we can debug | ||
const { Bucket, Key } = params; | ||
logger.error({ err, Bucket, Key }, 'Error uploading fragment data to S3'); | ||
throw new Error('unable to upload fragment data'); | ||
} | ||
} | ||
|
||
|
||
// Get a list of fragment ids/objects for the given user from memory db. Returns a Promise | ||
async function listFragments(ownerId, expand = false) { | ||
const fragments = await metadata.query(ownerId); | ||
|
||
// If we don't get anything back, or are supposed to give expanded fragments, return | ||
if (expand || !fragments) { | ||
return fragments; | ||
} | ||
|
||
// Otherwise, map to only send back the ids | ||
return fragments.map((fragment) => fragment.id); | ||
} | ||
|
||
// Delete a fragment's metadata and data from memory db. Returns a Promise | ||
function deleteFragment(ownerId, id) { | ||
|
||
// Delete metadata | ||
metadata.del(ownerId, id); | ||
// Delete data | ||
|
||
const params = { | ||
Bucket: process.env.AWS_S3_BUCKET_NAME, | ||
// Our key will be a mix of the ownerID and fragment id, written as a path | ||
Key: `${ownerId}/${id}`, | ||
}; | ||
|
||
const command = new DeleteObjectCommand(params); | ||
|
||
try { | ||
// Use our client to send the command | ||
const info = s3Client.send(command); | ||
console.log("Successfully deleted object", info); | ||
} catch (err) { | ||
// If anything goes wrong, log enough info that we can debug | ||
const { Bucket, Key } = params; | ||
logger.error({ err, Bucket, Key }, 'Error deleting fragment data from S3'); | ||
throw new Error('unable to delete fragment data'); | ||
} | ||
|
||
|
||
} | ||
|
||
// Convert a stream of data into a Buffer, by collecting | ||
// chunks of data until finished, then assembling them together. | ||
// We wrap the whole thing in a Promise so it's easier to consume. | ||
const streamToBuffer = (stream) => | ||
new Promise((resolve, reject) => { | ||
// As the data streams in, we'll collect it into an array. | ||
const chunks = []; | ||
|
||
// Streams have events that we can listen for and run | ||
// code. We need to know when new `data` is available, | ||
// if there's an `error`, and when we're at the `end` | ||
// of the stream. | ||
|
||
// When there's data, add the chunk to our chunks list | ||
stream.on('data', (chunk) => chunks.push(chunk)); | ||
// When there's an error, reject the Promise | ||
stream.on('error', reject); | ||
// When the stream is done, resolve with a new Buffer of our chunks | ||
stream.on('end', () => resolve(Buffer.concat(chunks))); | ||
}); | ||
|
||
// Reads a fragment's data from S3 and returns (Promise<Buffer>) | ||
// https://github.com/awsdocs/aws-sdk-for-javascript-v3/blob/main/doc_source/s3-example-creating-buckets.md#getting-a-file-from-an-amazon-s3-bucket | ||
async function readFragmentData(ownerId, id) { | ||
// Create the PUT API params from our details | ||
const params = { | ||
Bucket: process.env.AWS_S3_BUCKET_NAME, | ||
// Our key will be a mix of the ownerID and fragment id, written as a path | ||
Key: `${ownerId}/${id}`, | ||
}; | ||
|
||
// Create a GET Object command to send to S3 | ||
const command = new GetObjectCommand(params); | ||
|
||
try { | ||
// Get the object from the Amazon S3 bucket. It is returned as a ReadableStream. | ||
const data = await s3Client.send(command); | ||
// Convert the ReadableStream to a Buffer | ||
return streamToBuffer(data.Body); | ||
} catch (err) { | ||
const { Bucket, Key } = params; | ||
logger.error({ err, Bucket, Key }, 'Error streaming fragment data from S3'); | ||
throw new Error('unable to read fragment data'); | ||
} | ||
} | ||
|
||
module.exports.listFragments = listFragments; | ||
module.exports.writeFragment = writeFragment; | ||
module.exports.readFragment = readFragment; | ||
module.exports.writeFragmentData = writeFragmentData; | ||
module.exports.readFragmentData = readFragmentData; | ||
module.exports.deleteFragment = deleteFragment; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
// src/model/data/aws/s3Client.js | ||
|
||
/** | ||
* S3 specific config and objects. See: | ||
* https://www.npmjs.com/package/@aws-sdk/client-s3 | ||
*/ | ||
const { S3Client } = require('@aws-sdk/client-s3'); | ||
const logger = require('../../../logger'); | ||
|
||
/** | ||
* If AWS credentials are configured in the environment, use them. Normally when we connect to S3 | ||
* from a deployment in AWS, we won't bother with this. But if you're testing locally, you'll need | ||
* these, or if you're connecting to LocalStack or MinIO | ||
* @returns Object | undefined | ||
*/ | ||
const getCredentials = () => { | ||
if (process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) { | ||
// See https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-s3/modules/credentials.html | ||
const credentials = { | ||
accessKeyId: process.env.AWS_ACCESS_KEY_ID, | ||
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY, | ||
// Optionally include the AWS Session Token, too (e.g., if you're connecting to AWS from your laptop). | ||
// Not all situations require this, so we won't check for it above, just use it if it is present. | ||
sessionToken: process.env.AWS_SESSION_TOKEN, | ||
}; | ||
logger.debug('Using extra S3 Credentials AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY'); | ||
return credentials; | ||
} | ||
}; | ||
|
||
/** | ||
* If an AWS S3 Endpoint is configured in the environment, use it. | ||
* @returns string | undefined | ||
*/ | ||
const getS3Endpoint = () => { | ||
if (process.env.AWS_S3_ENDPOINT_URL) { | ||
logger.debug({ endpoint: process.env.AWS_S3_ENDPOINT_URL }, 'Using alternate S3 endpoint'); | ||
return process.env.AWS_S3_ENDPOINT_URL; | ||
} | ||
}; | ||
|
||
/** | ||
* Configure and export a new s3Client to use for all API calls. | ||
* NOTE: we want to use this client with both AWS S3, but also | ||
* MinIO and LocalStack in development and testing. We may or may | ||
* not have various configuration settings, and will pass | ||
* `undefined` when we don't (i.e. we'll ignore them). | ||
*/ | ||
module.exports = new S3Client({ | ||
// The region is always required | ||
region: process.env.AWS_REGION, | ||
// Credentials are optional (only MinIO needs them, or if you connect to AWS remotely from your laptop) | ||
credentials: getCredentials(), | ||
// The endpoint URL is optional | ||
endpoint: getS3Endpoint(), | ||
// We always want to use path style key names | ||
forcePathStyle: true, | ||
}); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
// src/routes/api/delete.js | ||
const { createSuccessResponse } = require('../../response'); | ||
const { createErrorResponse } = require('../../response'); | ||
const { Fragment } = require('../../model/fragment'); | ||
const logger = require('../../logger'); | ||
|
||
module.exports = (req, res) => { | ||
const ownerId = req.user; | ||
const id = req.params.id; | ||
|
||
logger.info(`inside delete route`); | ||
|
||
try{ | ||
Fragment.delete(ownerId, id); | ||
logger.info(`Fragment ${id} deleted for user ${ownerId}`); | ||
res.status(200).json(createSuccessResponse()); | ||
} | ||
catch (err) { | ||
logger.error(err); | ||
res.status(500).json(createErrorResponse(500, err)); | ||
} | ||
|
||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
# Checking if using s3 bucket instead of memoryDB works. | ||
|
||
POST http://localhost:8080/v1/fragments | ||
|
||
# We're sending a plain text fragment | ||
Content-Type: text/plain | ||
# Include HTTP Basic Auth credentials | ||
[BasicAuth] | ||
user1@email.com:password1 | ||
# Text Data | ||
`Hello S3!` | ||
|
||
# Expect 201 | ||
HTTP/1.1 201 | ||
|
||
# Capture the Location URL into a variable named `url` | ||
[Captures] | ||
url: header "Location" | ||
|
||
# Try to GET the fragment we just posted by its URL | ||
GET {{url}} | ||
[BasicAuth] | ||
user1@email.com:password1 | ||
|
||
HTTP/1.1 200 | ||
Content-Type: text/plain; charset=utf-8 | ||
Content-Length: 9 | ||
[Asserts] | ||
body == "Hello S3!" | ||
|
||
# Deleting the fragment created | ||
DELETE {{url}} | ||
[BasicAuth] | ||
user1@email.com:password1 | ||
|
||
|
||
HTTP/1.1 200 | ||
|
||
# Trying to get the fragment again after deleting | ||
GET {{url}} | ||
[BasicAuth] | ||
user1@email.com:password1 | ||
|
||
HTTP/1.1 404 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters