Skip to content

Commit

Permalink
Added S3 bucket support and updated docker compose and tests
Browse files Browse the repository at this point in the history
  • Loading branch information
brokoli777 committed Jul 26, 2024
1 parent 2dec181 commit 8ae3d85
Show file tree
Hide file tree
Showing 10 changed files with 2,863 additions and 937 deletions.
4 changes: 0 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,6 @@ WORKDIR /app
# Copy built artifacts from the build stage
COPY --from=build /app ./

# Set user to node
USER node
COPY --chown=node:node . /usr/src/app

# Start the server
CMD ["node", "src/index.js"]

Expand Down
3,508 changes: 2,580 additions & 928 deletions package-lock.json

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
"supertest": "^7.0.0"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.620.0",
"aws-jwt-verify": "^4.0.1",
"compression": "^1.7.4",
"content-type": "^1.0.5",
Expand Down
149 changes: 149 additions & 0 deletions src/model/data/aws/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
// src/model/data/index.js

// If the environment sets an AWS Region, we'll use AWS backend
// services (S3, DynamoDB); otherwise, we'll use an in-memory db.
module.exports = process.env.AWS_REGION ? require('./aws') : require('./memory');

const logger = require('../../logger');
const s3Client = require('./s3Client');
const { PutObjectCommand, GetObjectCommand, DeleteObjectCommand } = require('@aws-sdk/client-s3');

/////////
// XXX: temporary use of memory-db until we add DynamoDB
const MemoryDB = require('../memory/memory-db');


// Create two in-memory databases: one for fragment metadata and the other for raw data

const metadata = new MemoryDB();

// Write a fragment's metadata to memory db. Returns a Promise
function writeFragment(fragment) {
return metadata.put(fragment.ownerId, fragment.id, fragment);
}

// Read a fragment's metadata from memory db. Returns a Promise
function readFragment(ownerId, id) {
return metadata.get(ownerId, id);
}

// Writes a fragment's data to an S3 Object in a Bucket
// https://github.com/awsdocs/aws-sdk-for-javascript-v3/blob/main/doc_source/s3-example-creating-buckets.md#upload-an-existing-object-to-an-amazon-s3-bucket
async function writeFragmentData(ownerId, id, data) {
// Create the PUT API params from our details
const params = {
Bucket: process.env.AWS_S3_BUCKET_NAME,
// Our key will be a mix of the ownerID and fragment id, written as a path
Key: `${ownerId}/${id}`,
Body: data,
};

// Create a PUT Object command to send to S3
const command = new PutObjectCommand(params);

try {
// Use our client to send the command
await s3Client.send(command);
} catch (err) {
// If anything goes wrong, log enough info that we can debug
const { Bucket, Key } = params;
logger.error({ err, Bucket, Key }, 'Error uploading fragment data to S3');
throw new Error('unable to upload fragment data');
}
}


// Get a list of fragment ids/objects for the given user from memory db. Returns a Promise
async function listFragments(ownerId, expand = false) {
const fragments = await metadata.query(ownerId);

// If we don't get anything back, or are supposed to give expanded fragments, return
if (expand || !fragments) {
return fragments;
}

// Otherwise, map to only send back the ids
return fragments.map((fragment) => fragment.id);
}

// Delete a fragment's metadata and data from memory db. Returns a Promise
function deleteFragment(ownerId, id) {

// Delete metadata
metadata.del(ownerId, id);
// Delete data

const params = {
Bucket: process.env.AWS_S3_BUCKET_NAME,
// Our key will be a mix of the ownerID and fragment id, written as a path
Key: `${ownerId}/${id}`,
};

const command = new DeleteObjectCommand(params);

try {
// Use our client to send the command
const info = s3Client.send(command);
console.log("Successfully deleted object", info);
} catch (err) {
// If anything goes wrong, log enough info that we can debug
const { Bucket, Key } = params;
logger.error({ err, Bucket, Key }, 'Error deleting fragment data from S3');
throw new Error('unable to delete fragment data');
}


}

// Convert a stream of data into a Buffer, by collecting
// chunks of data until finished, then assembling them together.
// We wrap the whole thing in a Promise so it's easier to consume.
const streamToBuffer = (stream) =>
new Promise((resolve, reject) => {
// As the data streams in, we'll collect it into an array.
const chunks = [];

// Streams have events that we can listen for and run
// code. We need to know when new `data` is available,
// if there's an `error`, and when we're at the `end`
// of the stream.

// When there's data, add the chunk to our chunks list
stream.on('data', (chunk) => chunks.push(chunk));
// When there's an error, reject the Promise
stream.on('error', reject);
// When the stream is done, resolve with a new Buffer of our chunks
stream.on('end', () => resolve(Buffer.concat(chunks)));
});

// Reads a fragment's data from S3 and returns (Promise<Buffer>)
// https://github.com/awsdocs/aws-sdk-for-javascript-v3/blob/main/doc_source/s3-example-creating-buckets.md#getting-a-file-from-an-amazon-s3-bucket
async function readFragmentData(ownerId, id) {
// Create the PUT API params from our details
const params = {
Bucket: process.env.AWS_S3_BUCKET_NAME,
// Our key will be a mix of the ownerID and fragment id, written as a path
Key: `${ownerId}/${id}`,
};

// Create a GET Object command to send to S3
const command = new GetObjectCommand(params);

try {
// Get the object from the Amazon S3 bucket. It is returned as a ReadableStream.
const data = await s3Client.send(command);
// Convert the ReadableStream to a Buffer
return streamToBuffer(data.Body);
} catch (err) {
const { Bucket, Key } = params;
logger.error({ err, Bucket, Key }, 'Error streaming fragment data from S3');
throw new Error('unable to read fragment data');
}
}

module.exports.listFragments = listFragments;
module.exports.writeFragment = writeFragment;
module.exports.readFragment = readFragment;
module.exports.writeFragmentData = writeFragmentData;
module.exports.readFragmentData = readFragmentData;
module.exports.deleteFragment = deleteFragment;
58 changes: 58 additions & 0 deletions src/model/data/aws/s3Client.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
// src/model/data/aws/s3Client.js

/**
* S3 specific config and objects. See:
* https://www.npmjs.com/package/@aws-sdk/client-s3
*/
const { S3Client } = require('@aws-sdk/client-s3');
const logger = require('../../../logger');

/**
* If AWS credentials are configured in the environment, use them. Normally when we connect to S3
* from a deployment in AWS, we won't bother with this. But if you're testing locally, you'll need
* these, or if you're connecting to LocalStack or MinIO
* @returns Object | undefined
*/
const getCredentials = () => {
if (process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) {
// See https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-s3/modules/credentials.html
const credentials = {
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
// Optionally include the AWS Session Token, too (e.g., if you're connecting to AWS from your laptop).
// Not all situations require this, so we won't check for it above, just use it if it is present.
sessionToken: process.env.AWS_SESSION_TOKEN,
};
logger.debug('Using extra S3 Credentials AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY');
return credentials;
}
};

/**
* If an AWS S3 Endpoint is configured in the environment, use it.
* @returns string | undefined
*/
const getS3Endpoint = () => {
if (process.env.AWS_S3_ENDPOINT_URL) {
logger.debug({ endpoint: process.env.AWS_S3_ENDPOINT_URL }, 'Using alternate S3 endpoint');
return process.env.AWS_S3_ENDPOINT_URL;
}
};

/**
* Configure and export a new s3Client to use for all API calls.
* NOTE: we want to use this client with both AWS S3, but also
* MinIO and LocalStack in development and testing. We may or may
* not have various configuration settings, and will pass
* `undefined` when we don't (i.e. we'll ignore them).
*/
module.exports = new S3Client({
// The region is always required
region: process.env.AWS_REGION,
// Credentials are optional (only MinIO needs them, or if you connect to AWS remotely from your laptop)
credentials: getCredentials(),
// The endpoint URL is optional
endpoint: getS3Endpoint(),
// We always want to use path style key names
forcePathStyle: true,
});
23 changes: 23 additions & 0 deletions src/routes/api/delete.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// src/routes/api/delete.js
const { createSuccessResponse } = require('../../response');
const { createErrorResponse } = require('../../response');
const { Fragment } = require('../../model/fragment');
const logger = require('../../logger');

module.exports = (req, res) => {
const ownerId = req.user;
const id = req.params.id;

logger.info(`inside delete route`);

try{
Fragment.delete(ownerId, id);
logger.info(`Fragment ${id} deleted for user ${ownerId}`);
res.status(200).json(createSuccessResponse());
}
catch (err) {
logger.error(err);
res.status(500).json(createErrorResponse(500, err));
}

}
5 changes: 3 additions & 2 deletions src/routes/api/get.js
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ const extensionToMimeType = {
/**
* Get a list of fragments for the current user
*/
module.exports = (req, res, next) => {
module.exports = (req, res) => {
const expand = req.query.expand;
const ownerId = req.user;
const idExtension = req.params.id;
Expand Down Expand Up @@ -88,7 +88,8 @@ module.exports = (req, res, next) => {
})
.catch((err) => {
logger.error(err);
next(err);
// next(err);
res.status(404).json(createErrorResponse(404, "Fragment not found"));
});
} else {
// Get a list of fragments for the current user
Expand Down
2 changes: 2 additions & 0 deletions src/routes/api/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ router.get('/fragments/:id/info', require('./get-info'));
// You can use Buffer.isBuffer(req.body) to test if it was parsed by the raw body parser.
router.post('/fragments', rawBody(), require('./post'));

router.delete('/fragments/:id', require('./delete'));



module.exports = router;
Expand Down
44 changes: 44 additions & 0 deletions tests/integration/lab-9-s3.hurl
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Checking if using s3 bucket instead of memoryDB works.

POST http://localhost:8080/v1/fragments

# We're sending a plain text fragment
Content-Type: text/plain
# Include HTTP Basic Auth credentials
[BasicAuth]
user1@email.com:password1
# Text Data
`Hello S3!`

# Expect 201
HTTP/1.1 201

# Capture the Location URL into a variable named `url`
[Captures]
url: header "Location"

# Try to GET the fragment we just posted by its URL
GET {{url}}
[BasicAuth]
user1@email.com:password1

HTTP/1.1 200
Content-Type: text/plain; charset=utf-8
Content-Length: 9
[Asserts]
body == "Hello S3!"

# Deleting the fragment created
DELETE {{url}}
[BasicAuth]
user1@email.com:password1


HTTP/1.1 200

# Trying to get the fragment again after deleting
GET {{url}}
[BasicAuth]
user1@email.com:password1

HTTP/1.1 404
6 changes: 3 additions & 3 deletions tests/unit/app.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ const request = require('supertest');
const app = require('../../src/app');


describe('test 404 middleware', () => {
describe('test app error handling', () => {
test('should return HTTP 404 response', async () => {
const res = await request(app).get('/unknownroute273048923');
expect(res.statusCode).toBe(404);
Expand All @@ -16,10 +16,10 @@ describe('test 404 middleware', () => {

test('should return server error message', async () => {
const res = await request(app).get('/v1/fragments/12345').auth('user1@email.com', 'password1');
expect(res.statusCode).toBe(500);
expect(res.statusCode).toBe(404);
expect(res.body.status).toBe('error');
expect(res.body.error.message).toBe('Fragment not found');
expect(res.body.error.code).toBe(500);
expect(res.body.error.code).toBe(404);

});

Expand Down

0 comments on commit 8ae3d85

Please sign in to comment.