replace-image #1

Merged
trentlarson merged 2 commits from replace-image into master 2 months ago
  1. 3
      .env.sample
  2. 21
      CHANGELOG.md
  3. 4
      Dockerfile
  4. 103
      Makefile.test
  5. 16
      README.md
  6. 15
      package.json
  7. 1927
      pnpm-lock.yaml
  8. 2
      sql/migrations/V2__add_is_replacement.sql
  9. 161
      src/server.js
  10. 47
      src/vc/did-eth-local-resolver.js
  11. 7
      src/vc/index.js
  12. 6
      test/Makefile
  13. 127
      test/test.sh
  14. BIN
      test/test0.png
  15. BIN
      test/test1.png

3
.env.sample

@ -1,3 +1,4 @@
# shellcheck disable=SC2034
# These settings work for American Cloud.
#S3_ACCESS_KEY=???
@ -19,8 +20,6 @@ S3_SET_ACL=false
#ENDORSER_API_URL=https://test-api.endorser.ch
#ENDORSER_API_URL=https://api.endorser.ch
INFURA_PROJECT_ID=???
# host where the final image can be accessed by the public
# default is https://test-image.timesafari.app
#DOWNLOAD_IMAGE_SERVER=test-image.timesafari.app

21
CHANGELOG.md

@ -0,0 +1,21 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- Replacement of an existing file
- Local resolver for did:ethr
- Testing for file deletion
### Fixed
- Incorrect check for others who recorded same image
### Changed in DB or environment
- Nothing
## [1.0.0]
### Added
- All endpoints: image POST & DELETE, image-limits, ping

4
Dockerfile

@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1
FROM node:21-alpine
FROM node:22-alpine
ARG IMAGE_API_VERSION
RUN npm install -g pnpm
RUN apk add git
@ -10,4 +10,4 @@ WORKDIR image-api
RUN git checkout $IMAGE_API_VERSION
RUN pnpm install --prod
CMD node server.js
CMD node src/server.js

103
Makefile.test

@ -0,0 +1,103 @@
# from https://github.com/box/Makefile.test
# `make -C test -j`
# Makefile that has a convenient check target.
# It can be included from another Makefile that only has a TESTS variable
# defined like this
#
# TESTS ?=
#
# Runs the specified test executables. Prepends the test's name to each test's output
# and gives a nice summary at the end of test execution about passed failed
# tests.
# Only bash is supported
SHELL := /bin/bash
THIS_FILE := $(realpath $(lastword $(MAKEFILE_LIST)))
# The directory where Makefile.test (this file) resides
THIS_FILE_DIR := $(shell dirname $(THIS_FILE))
# FIRST_MAKEFILE may be passed from parent make to child make. If it is not
# absent, do not overwrite it.
FIRST_MAKEFILE ?= $(realpath $(firstword $(MAKEFILE_LIST)))
export FIRST_MAKEFILE
# The directory where the Makefile, that is invoked from the command line,
# resides. That makefile would define the TESTS variable. We assume that the
# binaries defined in the TESTS variable also reside in the directory as
# the Makefile. The generated intermediate files will also go to this directory.
FIRST_MAKEFILE_DIR ?= $(shell dirname $(FIRST_MAKEFILE))
export FIRST_MAKEFILE_DIR
# So that the child makefiles can see the same TESTS variable.
export TESTS
failedTestsName := .makefile_test_failed_tests
executedTestsName := .makefile_test_executed_tests
TEST_TARGETS := $(TESTS:%=TARGET_FOR_%)
export TEST_TARGETS
# If the tests need a different environment one can append to this variable.
TEST_ENVIRONMENT = PYTHONPATH=$(THIS_FILE_DIR):$$PYTHONPATH PATH=$(THIS_FILE_DIR):$$PATH
# TODO: Only write to intermediate files, if they exist already.
# https://unix.stackexchange.com/q/405497/212862
# There is still a race condition here. Maybe we should use sed for appending.
define RUN_ONE_TEST
TARGET_FOR_$(1): $$(FIRST_MAKEFILE_DIR)/$(1)
+@export PATH=$$$$(pwd):$$$$PATH; \
if [ -e $$(FIRST_MAKEFILE_DIR)/$$(executedTestsName) ]; then \
echo $$< >> $$(FIRST_MAKEFILE_DIR)/$$(executedTestsName); \
fi; \
$$(TEST_ENVIRONMENT) $$< 2>&1 | sed "s/^/ [$$$$(basename $$<)] /"; test $$$${PIPESTATUS[0]} -eq 0; \
if [ $$$$? -eq 0 ]; then \
echo " PASSED: $$$$(basename $$<)"; \
else \
echo " FAILED: $$$$(basename $$<)"; \
if [ -e $$(FIRST_MAKEFILE_DIR)/$$(failedTestsName) ]; then \
echo $$< >> $$(FIRST_MAKEFILE_DIR)/$$(failedTestsName); \
fi; \
fi;
endef
# Build the above rule to run one test, for all tests.
$(foreach currtest,$(TESTS),$(eval $(call RUN_ONE_TEST,$(currtest))))
# execute the tests and look at the generated temp files afterwards.
actualCheck: $(TEST_TARGETS)
+@failed_tests=$$(cat $(FIRST_MAKEFILE_DIR)/$(failedTestsName) 2> /dev/null | wc -l;); \
executed_tests=$$(cat $(FIRST_MAKEFILE_DIR)/$(executedTestsName) 2> /dev/null | wc -l;); \
if [ $$failed_tests -ne 0 -a $$executed_tests -ne 0 ]; then \
echo ---------------------------------; \
echo "Failed $$failed_tests out of $$executed_tests tests"; \
echo ---------------------------------; \
elif [ $$failed_tests -eq 0 ]; then \
echo ---------------------------------; \
echo "All $$executed_tests tests passed"; \
echo ---------------------------------; \
fi; \
exit $$failed_tests;
# A commonly used bash command to clean intermediate files. Instead of writing
# it every time re-use this variable.
RM_INTERMEDIATE_FILES := rm -f $(FIRST_MAKEFILE_DIR)/$(failedTestsName) $(FIRST_MAKEFILE_DIR)/$(executedTestsName)
# At the start of the make, we want to start with empty intermediate files.
TRUNCATE_INTERMEDIATE_FILES := cat /dev/null > $(FIRST_MAKEFILE_DIR)/$(failedTestsName) && cat /dev/null > $(FIRST_MAKEFILE_DIR)/$(executedTestsName)
# With trap make sure the clean step is always executed before and after the
# tests run time. Do not leave residual files in the repo.
check:
+@trap "code=\$$?; \
$(RM_INTERMEDIATE_FILES); \
exit \$${code};" EXIT; \
$(TRUNCATE_INTERMEDIATE_FILES); \
$(MAKE) -f $(THIS_FILE) actualCheck;
all: check
.PHONY: all check preCheck actualCheck $(TEST_TARGETS)
.DEFAULT_GOAL := all

16
README.md

@ -13,23 +13,29 @@ mkdir uploads
pnpm run migrate
```
Now set up an S3 bucket & Infura project, and create a .env file from .env.sample with these important settings:
Now set up an S3 bucket, and create a .env file from .env.sample with these important settings:
```
AWS_ACCESS_KEY=
AWS_SECRET_KEY=
AWS_REGION=
INFURA_PROJECT_ID=
```
## dev
```
node server.js
node src/server.js
```
## test
#### automated
```shell
make -C test -j
```
#### manual
```shell
# run this first command in a directory where `npm install did-jwt` has been run
CODE='OWNER_DID="did:ethr:0x0000694B58C2cC69658993A90D3840C560f2F51F"; OWNER_PRIVATE_KEY_HEX="2b6472c026ec2aa2c4235c994a63868fc9212d18b58f6cbfe861b52e71330f5b"; didJwt = require("did-jwt"); didJwt.createJWT({ exp: Math.floor(Date.now() / 1000) + 60, iat: Math.floor(Date.now() / 1000), iss: OWNER_DID }, { issuer: OWNER_DID, signer: didJwt.SimpleSigner(OWNER_PRIVATE_KEY_HEX) }).then(console.log)'
@ -51,4 +57,4 @@ JWT=`node -e "$CODE"`; curl -X DELETE -H "Authorization: Bearer $JWT" http://loc
## deploy to prod subsequent times
* Update version in server.js file. Add CHANGELOG.md entry.
* Update version in server.js 'ping' endpoint. Add CHANGELOG.md entry.

15
package.json

@ -1,18 +1,16 @@
{
"name": "Images for Trade",
"version": "0.0.1",
"version": "1.2.0-beta",
"description": "",
"license": "UNLICENSED",
"dependencies": {
"@aws-sdk/client-s3": "^3.521.0",
"@aws-sdk/lib-storage": "^3.521.0",
"@aws-sdk/client-s3": "^3.614.0",
"@aws-sdk/lib-storage": "^3.614.0",
"cors": "^2.8.5",
"did-jwt": "^8.0.1",
"did-jwt": "^8.0.4",
"did-resolver": "^4.1.0",
"dotenv": "^16.4.5",
"ethr-did-resolver": "^10.1.5",
"express": "^4.18.2",
"express": "^4.19.2",
"luxon": "^3.4.4",
"multer": "1.4.5-lts.1",
"sqlite3": "^5.1.7"
@ -21,6 +19,7 @@
"flywaydb-cli": "^0.9.0"
},
"scripts": {
"migrate": "flyway -configFiles=sql/flyway.conf migrate"
"migrate": "flyway -configFiles=sql/flyway.conf migrate",
"start": "node src/server.js"
}
}

1927
pnpm-lock.yaml

File diff suppressed because it is too large

2
sql/migrations/V2__add_is_replacement.sql

@ -0,0 +1,2 @@
ALTER TABLE image ADD COLUMN is_replacement BOOLEAN NOT NULL DEFAULT FALSE;

161
server.js → src/server.js

@ -4,13 +4,14 @@ const crypto = require('crypto');
const didJwt = require('did-jwt');
const { Resolver } = require('did-resolver');
const express = require('express');
const { getResolver } = require('ethr-did-resolver');
const fs = require('fs');
const { DateTime } = require('luxon');
const multer = require('multer');
const path = require('path');
const sqlite3 = require('sqlite3').verbose();
const { didEthLocalResolver } = require("./vc/did-eth-local-resolver");
require('dotenv').config()
const app = express();
@ -18,17 +19,11 @@ app.use(cors());
const port = process.env.PORT || 3002;
// file name also referenced in flyway.conf and potentially in .env files or in environment variables
const dbFile = process.env.SQLITE_FILE || './image.sqlite';
const dbFile = process.env.SQLITE_FILE || './image-db.sqlite';
const bucketName = process.env.S3_BUCKET_NAME || 'gifts-image-test';
const imageServer = process.env.DOWNLOAD_IMAGE_SERVER || 'test-image.timesafari.app';
const ethrDidResolver = getResolver;
const resolver =
new Resolver({
...ethrDidResolver({
infuraProjectId: process.env.INFURA_PROJECT_ID || 'fake-infura-project-id'
})
})
const resolver = new Resolver({ 'ethr': didEthLocalResolver });
// Open a connection to the SQLite database
const db = new sqlite3.Database(dbFile, (err) => {
@ -54,10 +49,11 @@ const uploadDir = 'uploads';
const uploadMulter = multer({ dest: uploadDir + '/' });
app.get('/ping', async (req, res) => {
res.send('pong v1.0.0');
res.send('pong - v 0.0.1'); // version
});
app.get('/image-limits', async (req, res) => {
try {
limitsResult = await retrievelimits(req, res);
if (!limitsResult.success) {
return limitsResult.result;
@ -68,15 +64,28 @@ app.get('/image-limits', async (req, res) => {
maxImagesPerWeek: limitsResult.maxImagesPerWeek,
nextWeekBeginDateTime: limitsResult.nextWeekBeginDateTime
}));
} catch (e) {
console.error('Error getting image limits:', e, ' ... with this string: ' + e);
return res.status(500).send(JSON.stringify({ success: false, message: 'Got this error retrieving limits: ' + e }));
}
});
// POST endpoint to upload an image
/**
* POST endpoint to upload an image
*
* Send as FormData, with:
* - "image" file Blob
* - "claimType" (optional, eg. "GiveAction", "PlanAction", "profile")
* - "handleId" (optional)
* - "fileName" (optional, if you want to replace an previous image)
*/
app.post('/image', uploadMulter.single('image'), async (req, res) => {
const reqFile = req.file;
if (reqFile == null) {
return res.status(400).send(JSON.stringify({ success: false, message: 'No file uploaded.' }));
}
if (reqFile.size > 10000000) {
try {
if (reqFile.size > 10485760) { // 10MB
fs.rm(reqFile.path, (err) => {
if (err) {
console.error("Error deleting too-large temp file", reqFile.path, "with error (but continuing):", err);
@ -85,7 +94,6 @@ app.post('/image', uploadMulter.single('image'), async (req, res) => {
return res.status(400).send(JSON.stringify({success: false, message: 'File size is too large. Maximum file size is 10MB.'}));
}
try {
limitsResult = await retrievelimits(req, res);
if (!limitsResult.success) {
return limitsResult.result;
@ -102,19 +110,100 @@ app.post('/image', uploadMulter.single('image'), async (req, res) => {
fs.readFile(reqFile.path, async (err, data) => {
if (err) throw err; // Handle error
try {
let finalFileName;
if (req.body.fileName) {
// replacement file name given
finalFileName = req.body.fileName;
// check if the file to replace was sent by this user earlier
const didForOriginal = await new Promise((resolve, reject) => {
// For some reason, this prepared-statement SQL gives seg fault: "SELECT did FROM image WHERE did = ? and final_file = ?"
if (issuerDid.indexOf("'") >= 0 || finalFileName.indexOf("'") >= 0) {
console.error("Error: SQL injection attempt with", issuerDid, finalFileName);
return res.status(400).send(JSON.stringify({ success: false, message: 'SQL injection attempt detected.' }));
}
const sql = "SELECT did FROM image WHERE did = '" + issuerDid + "' and final_file = '" + finalFileName + "'";
db.get(
sql,
[],
(dbErr, row) => {
if (dbErr) {
console.error(currentDate, 'Error getting image for user from database:', dbErr)
reject(dbErr);
}
resolve(row?.did);
}
);
});
if (!didForOriginal) {
return res.status(404).send(JSON.stringify({ success: false, message: 'No image entry found for user ' + issuerDid + ' for file ' + finalFileName }));
}
// check if any other user recorded this image
const othersWhoSentImage = await new Promise((resolve, reject) => {
db.get(
'SELECT did FROM image WHERE final_file = ? and did != ?',
[ finalFileName, issuerDid ],
(dbErr, row) => {
if (dbErr) {
console.error(currentDate, 'Error getting image for other users from database:', dbErr)
reject(dbErr);
}
resolve(row?.did);
}
);
});
if (othersWhoSentImage) {
return res.status(400).send(JSON.stringify({ success: false, message: 'Other users have also saved this image so it cannot be modified. You will have to replace your own references.' }));
}
// remove from S3
const params = {
Bucket: bucketName, // S3 Bucket name
Key: finalFileName, // File name to use in S3
};
const command = new DeleteObjectCommand(params);
const response = await s3Client.send(command);
if (response.$metadata.httpStatusCode !== 200
&& response.$metadata.httpStatusCode !== 202
&& response.$metadata.httpStatusCode !== 204) {
const errorTime = new Date().toISOString();
console.error(errorTime, "Error deleting from S3 with bad HTTP status, with metadata:", response.$metadata);
return res.status(500).send(JSON.stringify({
success: false,
message: "Got bad status of " + response.$metadata.httpStatusCode + " from S3. See server logs at " + errorTime
}));
}
// might as well remove from DB and add it all back again later
await new Promise((resolve, reject) => {
db.run(
'DELETE FROM image where did = ? and final_file = ?',
[ issuerDid, finalFileName ],
(dbErr) => {
if (dbErr) {
const currentDate = new Date().toISOString();
console.error(currentDate, "Error deleting record by", issuerDid, "named", finalFileName, "from database:", dbErr);
// don't continue because then we'll have storage we cannot track (and potentially limit)
reject(dbErr);
}
resolve();
}
);
});
} else {
// no replacement file name given so it's a new file
const hashSum = crypto.createHash('sha256');
hashSum.update(data);
const hashHex = hashSum.digest('hex');
finalFileName = hashHex + path.extname(reqFile.originalname);
const fileName = hashHex + path.extname(reqFile.originalname);
try {
// look to see if this image already exists
// look to see if this image already exists for this user
const imageUrl = await new Promise((resolve, reject) => {
db.get(
'SELECT url FROM image WHERE final_file = ? and did = ?',
[ fileName, issuerDid ],
[ finalFileName, issuerDid ],
(dbErr, row) => {
if (dbErr) {
console.error(currentDate, 'Error getting image for user from database:', dbErr)
@ -127,16 +216,17 @@ app.post('/image', uploadMulter.single('image'), async (req, res) => {
if (imageUrl) {
return res.status(201).send(JSON.stringify({ success: true, url: imageUrl, message: 'This image already existed.' }));
}
}
// record the upload in the database
const currentDate = new Date().toISOString();
const localFile = reqFile.path.startsWith(uploadDir + '/') ? reqFile.path.substring(uploadDir.length + 1) : reqFile.path;
const finalUrl = `https://${imageServer}/${fileName}`;
const finalUrl = `https://${imageServer}/${finalFileName}`;
const claimType = req.body.claimType;
const handleId = req.body.handleId;
await new Promise((resolve, reject) => {
db.run(
'INSERT INTO image (time, did, claim_type, handle_id, local_file, size, final_file, mime_type, url) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
'INSERT INTO image (time, did, claim_type, handle_id, local_file, size, final_file, mime_type, url, is_replacement) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
[
currentDate,
issuerDid,
@ -144,9 +234,10 @@ app.post('/image', uploadMulter.single('image'), async (req, res) => {
handleId,
localFile,
reqFile.size,
fileName,
finalFileName,
reqFile.mimetype,
finalUrl
finalUrl,
!!req.body.fileName,
],
(dbErr) => {
if (dbErr) {
@ -164,7 +255,7 @@ app.post('/image', uploadMulter.single('image'), async (req, res) => {
Body: data,
Bucket: bucketName, // S3 Bucket name
ContentType: reqFile.mimetype, // File content type
Key: fileName, // File name to use in S3
Key: finalFileName, // File name to use in S3
};
if (process.env.S3_SET_ACL === 'true') {
params.ACL = 'public-read';
@ -186,7 +277,7 @@ app.post('/image', uploadMulter.single('image'), async (req, res) => {
});
// AWS URL: https://gifts-image-test.s3.amazonaws.com/gifts-image-test/FILE
// American Cloud URL: https://a2-west.americancloud.com/TENANT:giftsimagetest/FILE
return res.status(200).send(JSON.stringify({success: true, url: finalUrl}));
return res.status(201).send({success: true, url: finalUrl});
}
} catch (uploadError) {
const errorTime = new Date().toISOString();
@ -240,11 +331,11 @@ app.delete('/image/:url', async (req, res) => {
});
if (!thisUserImageFile) {
console.error('No image entry found for user', issuerDid, '& URL', url, 'so returning 404.');
return res.status(404).send(JSON.stringify({ success: false, message: 'No image entry found for user ' + issuerDid + ' & URL ' + url }));
return res.status(404).send({ success: false, message: 'No image entry found for user ' + issuerDid + ' & URL ' + url });
}
// check if any other user recorded this image
const otherUserImage = await new Promise((resolve, reject) => {
const othersWhoSentImage = await new Promise((resolve, reject) => {
db.get(
'SELECT did FROM image WHERE url = ? and did != ?',
[ url, issuerDid ],
@ -258,7 +349,7 @@ app.delete('/image/:url', async (req, res) => {
);
});
if (!otherUserImage) {
if (!othersWhoSentImage) {
// remove from S3 since nobody else recorded it
const params = {
Bucket: bucketName, // S3 Bucket name
@ -271,10 +362,10 @@ app.delete('/image/:url', async (req, res) => {
&& response.$metadata.httpStatusCode !== 204) {
const errorTime = new Date().toISOString();
console.error(errorTime, "Error deleting from S3 with bad HTTP status, with metadata:", response.$metadata);
return res.status(500).send(JSON.stringify({
return res.status(500).send({
success: false,
message: "Got bad status of " + response.$metadata.httpStatusCode + " from S3. See server logs at " + errorTime
}));
});
}
}
@ -286,22 +377,22 @@ app.delete('/image/:url', async (req, res) => {
(dbErr) => {
if (dbErr) {
const currentDate = new Date().toISOString();
console.error(currentDate, "Error deleting record from", issuerDid, "into database:", dbErr);
// don't continue because then we'll have storage we cannot track (and potentially limit)
console.error(currentDate, "Error deleting record by", issuerDid, "with URL", url, "from database:", dbErr);
// we'll let them know that it's not all cleaned up so they can try again
reject(dbErr);
}
resolve();
}
);
});
return res.status(204).send(JSON.stringify({ success: true }));
return res.status(204).send({ success: true });
} catch (error) {
const errorTime = new Date().toISOString();
console.error(errorTime, "Error processing image delete:", error);
return res.status(500).send(JSON.stringify({
return res.status(500).send({
success: false,
message: "Got error processing image delete. See server logs at " + errorTime + " Error Details: " + error
}));
});
}
});

47
src/vc/did-eth-local-resolver.js

@ -0,0 +1,47 @@
const { DIDResolutionResult } = require('did-resolver');
/**
* This did:ethr resolver instructs the did-jwt machinery to use the
* EcdsaSecp256k1RecoveryMethod2020Uses verification method which adds the recovery bit to the
* signature to recover the DID's public key from a signature.
*
* This effectively hard codes the did:ethr DID resolver to use the address as the public key.
* @param did : string
* @returns {Promise<DIDResolutionResult>}
*
* Similar code resides in endorser-ch
*/
const didEthLocalResolver = async(did) => {
const didRegex = /^did:ethr:(0x[0-9a-fA-F]{40})$/;
const match = did.match(didRegex);
if (match) {
const address = match[1]; // Extract eth address: 0x...
const publicKeyHex = address; // Use the address directly as a public key placeholder
return {
didDocumentMetadata: {},
didResolutionMetadata: {
contentType: "application/did+ld+json"
},
didDocument: {
'@context': [
'https://www.w3.org/ns/did/v1',
"https://w3id.org/security/suites/secp256k1recovery-2020/v2"
],
id: did,
verificationMethod: [{
id: `${did}#controller`,
type: 'EcdsaSecp256k1RecoveryMethod2020',
controller: did,
blockchainAccountId: "eip155:1:" + publicKeyHex,
}],
authentication: [`${did}#controller`],
assertionMethod: [`${did}#controller`],
},
};
}
throw new Error(`Unsupported DID format: ${did}`);
};
module.exports = { didEthLocalResolver };

7
src/vc/index.js

@ -0,0 +1,7 @@
/**
* Verifiable Credential & DID functions, specifically for EndorserSearch.org tools
*
* The goal is to make this folder similar across projects, then move it to a library.
* Other projects: endorser-ch, crowd-funder-for-time-pwa
*
*/

6
test/Makefile

@ -0,0 +1,6 @@
# see ../Makefile.test
TESTS ?= \
test.sh
include ../Makefile.test

127
test/test.sh

@ -0,0 +1,127 @@
#!/usr/bin/env bash
# Execute from the "test" directory so that the test files are available.
#
# We recommend you have the pkgx.dev tools installed.
# If you want to use your installed curl & jq & node, you can comment out the two "pkgx" commands.
HOST=http://localhost:3002
if ! [[ "$PWD" == */test ]]; then
echo "Error: Run this script in the 'test' directory."
exit 1
fi
# load the tools: curl, jq, node
eval "$(pkgx --shellcode)"
env +curl +jq +node
JWT_CODE_USER_0='OWNER_DID="did:ethr:0x0000694B58C2cC69658993A90D3840C560f2F51F"; OWNER_PRIVATE_KEY_HEX="2b6472c026ec2aa2c4235c994a63868fc9212d18b58f6cbfe861b52e71330f5b"; didJwt = require("did-jwt"); didJwt.createJWT({ exp: Math.floor(Date.now() / 1000) + 60, iat: Math.floor(Date.now() / 1000), iss: OWNER_DID }, { issuer: OWNER_DID, signer: didJwt.SimpleSigner(OWNER_PRIVATE_KEY_HEX) }).then(console.log)'
JWT_CODE_USER_1='OWNER_DID="did:ethr:0x111d15564f824D56C7a07b913aA7aDd03382aA39"; OWNER_PRIVATE_KEY_HEX="be64d297e1c6f3545971cd0bc24c3bf32656f8639a2ae32cb84a1e3c75ad69cd"; didJwt = require("did-jwt"); didJwt.createJWT({ exp: Math.floor(Date.now() / 1000) + 60, iat: Math.floor(Date.now() / 1000), iss: OWNER_DID }, { issuer: OWNER_DID, signer: didJwt.SimpleSigner(OWNER_PRIVATE_KEY_HEX) }).then(console.log)'
# exit as soon as anything fails
set -e
echo "Upload test0.png by user #0"
JWT=$(node -e "$JWT_CODE_USER_0")
echo JWT: $JWT
RESULT=$(curl -X POST -H "Authorization: Bearer $JWT" -F "image=@test0.png" "$HOST/image")
echo curl result: $RESULT
echo "Download from the URL supplied"
URL0=$(echo $RESULT | jq -r '.url')
# -L to follow redirect because the returned URL is a timesafari.app URL
STATUS_CODE=$(curl -o test0-back.png -w "%{http_code}" -L $URL0);
if [ $STATUS_CODE -ne 200 ]; then
echo "File is not accessible, received status code: $STATUS_CODE";
fi
echo "Check that downloaded file is the same as the original"
if diff "test0.png" "test0-back.png" >/dev/null; then
echo "Got the same file."
else
echo "Did not get the same file."
exit 1
fi
echo "Upload test1.png by user #1"
JWT=$(node -e "$JWT_CODE_USER_1")
echo JWT: $JWT
RESULT=$(curl -X POST -H "Authorization: Bearer $JWT" -F "image=@test1.png" "$HOST/image")
echo curl result: $RESULT
URL2=$(echo $RESULT | jq -r '.url')
if [ "$URL0" != "$URL2" ]; then
echo "URLs 0 & 1 are different."
else
echo "URLs 0 & 1 are not different."
exit 1
fi
echo "Now unsuccessfully upload a change to the image by user 1"
FILENAME0=$(basename $URL0)
JWT=$(node -e "$JWT_CODE_USER_1")
echo JWT: $JWT
RESULT=$(curl -X POST -H "Authorization: Bearer $JWT" -F "image=@test1.png" -F "fileName=$FILENAME0" "$HOST/image")
echo curl result: $RESULT
SUCCESS=$(echo $RESULT | jq -r '.success')
if [ $SUCCESS = "false" ]; then
echo "User #1 could not replace existing file."
else
echo "File may have been replaced wrongly.";
exit 1
fi
echo "Now successfully upload a change to the image by user 0"
JWT=$(node -e "$JWT_CODE_USER_0")
echo JWT: $JWT
RESULT=$(curl -X POST -H "Authorization: Bearer $JWT" -F "image=@test1.png" -F "fileName=$FILENAME0" "$HOST/image")
echo curl result: $RESULT
SUCCESS=$(echo $RESULT | jq -r '.success')
if [ $SUCCESS = "true" ]; then
echo "User #0 did replace file.";
else
echo "User #0 couldn't replace file.";
exit 1
fi
echo "Fail to remove test file 0 from the service"
TEST_URL="https%3A%2F%2Ftest-image.timesafari.app%2F4599145c3a8792a678f458747f2d8512c680e8680bf5563c35b06cd770051ed2.png"
JWT=$(node -e "$JWT_CODE_USER_1")
echo JWT: $JWT
RESULT=$(curl -X DELETE -H "Authorization: Bearer $JWT" "$HOST/image/$TEST_URL")
echo curl result: $RESULT
SUCCESS=$(echo $RESULT | jq -r '.success')
if [ "$SUCCESS" = "false" ]; then
echo "Test file 0 was not cleaned off server."
else
echo "Test file 0 was cleaned off server.";
exit 1
fi
echo "Remove test file 0 from the service"
TEST_URL="https%3A%2F%2Ftest-image.timesafari.app%2F4599145c3a8792a678f458747f2d8512c680e8680bf5563c35b06cd770051ed2.png"
JWT=$(node -e "$JWT_CODE_USER_0")
echo JWT: $JWT
RESULT=$(curl -X DELETE -H "Authorization: Bearer $JWT" "$HOST/image/$TEST_URL")
echo curl result: $RESULT
SUCCESS=$(echo $RESULT | jq -r '.success')
if [[ -z "$RESULT" ]] || [[ "$SUCCESS" = "true" ]]; then
echo "Test file 0 was cleaned off server."
else
echo "Test file 0 was not cleaned off server.";
exit 1
fi
echo "Remove test file 1 from the service"
TEST_URL="https%3A%2F%2Ftest-image.timesafari.app%2F83801e59789f962ddd19dbf99abd65b416e4c6560c28bdb3e663cea045561b07.png"
JWT=$(node -e "$JWT_CODE_USER_1")
echo JWT: $JWT
RESULT=$(curl -X DELETE -H "Authorization: Bearer $JWT" "$HOST/image/$TEST_URL")
echo curl result: $RESULT
SUCCESS=$(echo $RESULT | jq -r '.success')
if [[ -z "$RESULT" ]] || [[ "$SUCCESS" = "true" ]]; then
echo "Test file 1 was cleaned off server."
else
echo "Test file 1 was not cleaned off server.";
exit 1
fi

BIN
test/test0.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

BIN
test/test1.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

Loading…
Cancel
Save