feat: add support for S3 as a storage provider (#659)

* add s3

* instance the s3 client dynamically

* refactor code

* fix format

* add docs

* add docs

* fix issue with s3 upload if you use the base path,
fix issue with archiving -> disable archiving for s3

* split file service in local and s3 file service and fix s3 upload chunking

* add working download/view

* add new features to local service (from main branch)

* revert s3 service and add working delete/remove functionality

* refactor s3 service

* Update backend/src/file/s3.service.ts

Co-authored-by: Elias Schneider <login@eliasschneider.com>

* Update frontend/src/components/admin/configuration/ConfigurationNavBar.tsx

Co-authored-by: Elias Schneider <login@eliasschneider.com>

* Update docs/docs/setup/s3.md

Co-authored-by: Elias Schneider <login@eliasschneider.com>

* Update backend/prisma/seed/config.seed.ts

Co-authored-by: Elias Schneider <login@eliasschneider.com>

* add note for ZIP archive in docs

* create logger instance

* make s3 instance dynamic

* add icon import

* remove console.logs

* add correct pdf viewing format

* add storage provider to share

* refactor: run formatter

* chore: add prisma migration

* fix: don't expose `storageProvider`

* chore: improve config variables description

---------

Co-authored-by: Elias Schneider <login@eliasschneider.com>
This commit is contained in:
Mattia Müggler
2024-12-19 12:06:49 +01:00
committed by GitHub
parent 0b406f0464
commit 5a54fe4cb7
16 changed files with 2222 additions and 139 deletions

1559
backend/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -13,6 +13,7 @@
"seed": "ts-node prisma/seed/config.seed.ts"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.679.0",
"@nestjs/cache-manager": "^2.2.2",
"@nestjs/common": "^10.4.3",
"@nestjs/config": "^3.2.3",

View File

@@ -0,0 +1,24 @@
-- RedefineTables
PRAGMA defer_foreign_keys=ON;
PRAGMA foreign_keys=OFF;
CREATE TABLE "new_Share" (
"id" TEXT NOT NULL PRIMARY KEY,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"name" TEXT,
"uploadLocked" BOOLEAN NOT NULL DEFAULT false,
"isZipReady" BOOLEAN NOT NULL DEFAULT false,
"views" INTEGER NOT NULL DEFAULT 0,
"expiration" DATETIME NOT NULL,
"description" TEXT,
"removedReason" TEXT,
"creatorId" TEXT,
"reverseShareId" TEXT,
"storageProvider" TEXT NOT NULL DEFAULT 'LOCAL',
CONSTRAINT "Share_creatorId_fkey" FOREIGN KEY ("creatorId") REFERENCES "User" ("id") ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT "Share_reverseShareId_fkey" FOREIGN KEY ("reverseShareId") REFERENCES "ReverseShare" ("id") ON DELETE CASCADE ON UPDATE CASCADE
);
INSERT INTO "new_Share" ("createdAt", "creatorId", "description", "expiration", "id", "isZipReady", "name", "removedReason", "reverseShareId", "uploadLocked", "views") SELECT "createdAt", "creatorId", "description", "expiration", "id", "isZipReady", "name", "removedReason", "reverseShareId", "uploadLocked", "views" FROM "Share";
DROP TABLE "Share";
ALTER TABLE "new_Share" RENAME TO "Share";
PRAGMA foreign_keys=ON;
PRAGMA defer_foreign_keys=OFF;

View File

@@ -95,6 +95,7 @@ model Share {
security ShareSecurity?
recipients ShareRecipient[]
files File[]
storageProvider String @default("LOCAL")
}
model ReverseShare {

View File

@@ -318,6 +318,38 @@ const configVariables: ConfigVariables = {
obscured: true,
},
},
s3: {
enabled: {
type: "boolean",
defaultValue: "false",
},
endpoint: {
type: "string",
defaultValue: "",
},
region: {
type: "string",
defaultValue: "",
},
bucketName: {
type: "string",
defaultValue: "",
},
bucketPath: {
type: "string",
defaultValue: "",
},
key: {
type: "string",
defaultValue: "",
secret: true,
},
secret: {
type: "string",
defaultValue: "",
obscured: true,
},
}
};
type ConfigVariables = {

View File

@@ -17,6 +17,7 @@ import { CreateShareGuard } from "src/share/guard/createShare.guard";
import { ShareOwnerGuard } from "src/share/guard/shareOwner.guard";
import { FileService } from "./file.service";
import { FileSecurityGuard } from "./guard/fileSecurity.guard";
import * as mime from "mime-types";
@Controller("shares/:shareId/files")
export class FileController {
@@ -53,7 +54,7 @@ export class FileController {
@Res({ passthrough: true }) res: Response,
@Param("shareId") shareId: string,
) {
const zip = this.fileService.getZip(shareId);
const zip = await this.fileService.getZip(shareId);
res.set({
"Content-Type": "application/zip",
"Content-Disposition": contentDisposition(`${shareId}.zip`),
@@ -73,13 +74,18 @@ export class FileController {
const file = await this.fileService.get(shareId, fileId);
const headers = {
"Content-Type": file.metaData.mimeType,
"Content-Type":
mime?.lookup?.(file.metaData.name) || "application/octet-stream",
"Content-Length": file.metaData.size,
"Content-Security-Policy": "script-src 'none'",
};
if (download === "true") {
headers["Content-Disposition"] = contentDisposition(file.metaData.name);
} else {
headers["Content-Disposition"] = contentDisposition(file.metaData.name, {
type: "inline",
});
}
res.set(headers);

View File

@@ -4,11 +4,13 @@ import { ReverseShareModule } from "src/reverseShare/reverseShare.module";
import { ShareModule } from "src/share/share.module";
import { FileController } from "./file.controller";
import { FileService } from "./file.service";
import { LocalFileService } from "./local.service";
import { S3FileService } from "./s3.service";
@Module({
imports: [JwtModule.register({}), ReverseShareModule, ShareModule],
controllers: [FileController],
providers: [FileService],
providers: [FileService, LocalFileService, S3FileService],
exports: [FileService],
})
export class FileModule {}

View File

@@ -1,162 +1,88 @@
import {
BadRequestException,
HttpException,
HttpStatus,
Injectable,
InternalServerErrorException,
NotFoundException,
} from "@nestjs/common";
import { JwtService } from "@nestjs/jwt";
import * as crypto from "crypto";
import { createReadStream } from "fs";
import * as fs from "fs/promises";
import * as mime from "mime-types";
import { Injectable } from "@nestjs/common";
import { LocalFileService } from "./local.service";
import { S3FileService } from "./s3.service";
import { ConfigService } from "src/config/config.service";
import { PrismaService } from "src/prisma/prisma.service";
import { validate as isValidUUID } from "uuid";
import { SHARE_DIRECTORY } from "../constants";
import { Readable } from "stream";
import { PrismaService } from "../prisma/prisma.service";
@Injectable()
export class FileService {
constructor(
private prisma: PrismaService,
private config: ConfigService,
private localFileService: LocalFileService,
private s3FileService: S3FileService,
private configService: ConfigService,
) {}
// Determine which service to use based on the current config value
// shareId is optional -> can be used to overwrite a storage provider
private getStorageService(
storageProvider?: string,
): S3FileService | LocalFileService {
if (storageProvider != undefined)
return storageProvider == "S3"
? this.s3FileService
: this.localFileService;
return this.configService.get("s3.enabled")
? this.s3FileService
: this.localFileService;
}
async create(
data: string,
chunk: { index: number; total: number },
file: { id?: string; name: string },
file: {
id?: string;
name: string;
},
shareId: string,
) {
if (!file.id) {
file.id = crypto.randomUUID();
} else if (!isValidUUID(file.id)) {
throw new BadRequestException("Invalid file ID format");
}
const share = await this.prisma.share.findUnique({
where: { id: shareId },
include: { files: true, reverseShare: true },
});
if (share.uploadLocked)
throw new BadRequestException("Share is already completed");
let diskFileSize: number;
try {
diskFileSize = (
await fs.stat(`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`)
).size;
} catch {
diskFileSize = 0;
}
// If the sent chunk index and the expected chunk index doesn't match throw an error
const chunkSize = this.config.get("share.chunkSize");
const expectedChunkIndex = Math.ceil(diskFileSize / chunkSize);
if (expectedChunkIndex != chunk.index)
throw new BadRequestException({
message: "Unexpected chunk received",
error: "unexpected_chunk_index",
expectedChunkIndex,
});
const buffer = Buffer.from(data, "base64");
// Check if there is enough space on the server
const space = await fs.statfs(SHARE_DIRECTORY);
const availableSpace = space.bavail * space.bsize;
if (availableSpace < buffer.byteLength) {
throw new InternalServerErrorException("Not enough space on the server");
}
// Check if share size limit is exceeded
const fileSizeSum = share.files.reduce(
(n, { size }) => n + parseInt(size),
0,
);
const shareSizeSum = fileSizeSum + diskFileSize + buffer.byteLength;
if (
shareSizeSum > this.config.get("share.maxSize") ||
(share.reverseShare?.maxShareSize &&
shareSizeSum > parseInt(share.reverseShare.maxShareSize))
) {
throw new HttpException(
"Max share size exceeded",
HttpStatus.PAYLOAD_TOO_LARGE,
);
}
await fs.appendFile(
`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`,
buffer,
);
const isLastChunk = chunk.index == chunk.total - 1;
if (isLastChunk) {
await fs.rename(
`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`,
`${SHARE_DIRECTORY}/${shareId}/${file.id}`,
);
const fileSize = (
await fs.stat(`${SHARE_DIRECTORY}/${shareId}/${file.id}`)
).size;
await this.prisma.file.create({
data: {
id: file.id,
name: file.name,
size: fileSize.toString(),
share: { connect: { id: shareId } },
},
});
}
return file;
const storageService = this.getStorageService();
return storageService.create(data, chunk, file, shareId);
}
async get(shareId: string, fileId: string) {
const fileMetaData = await this.prisma.file.findUnique({
where: { id: fileId },
async get(shareId: string, fileId: string): Promise<File> {
const share = await this.prisma.share.findFirst({
where: { id: shareId },
});
if (!fileMetaData) throw new NotFoundException("File not found");
const file = createReadStream(`${SHARE_DIRECTORY}/${shareId}/${fileId}`);
return {
metaData: {
mimeType: mime.contentType(fileMetaData.name.split(".").pop()),
...fileMetaData,
size: fileMetaData.size,
},
file,
};
const storageService = this.getStorageService(share.storageProvider);
return storageService.get(shareId, fileId);
}
async remove(shareId: string, fileId: string) {
const fileMetaData = await this.prisma.file.findUnique({
where: { id: fileId },
});
if (!fileMetaData) throw new NotFoundException("File not found");
await fs.unlink(`${SHARE_DIRECTORY}/${shareId}/${fileId}`);
await this.prisma.file.delete({ where: { id: fileId } });
const storageService = this.getStorageService();
return storageService.remove(shareId, fileId);
}
async deleteAllFiles(shareId: string) {
await fs.rm(`${SHARE_DIRECTORY}/${shareId}`, {
recursive: true,
force: true,
});
const storageService = this.getStorageService();
return storageService.deleteAllFiles(shareId);
}
getZip(shareId: string) {
return createReadStream(`${SHARE_DIRECTORY}/${shareId}/archive.zip`);
const storageService = this.getStorageService();
return this.streamToUint8Array(storageService.getZip(shareId) as Readable);
}
private async streamToUint8Array(stream: Readable): Promise<Uint8Array> {
const chunks: Buffer[] = [];
return new Promise((resolve, reject) => {
stream.on("data", (chunk) => chunks.push(Buffer.from(chunk)));
stream.on("end", () => resolve(new Uint8Array(Buffer.concat(chunks))));
stream.on("error", reject);
});
}
}
export interface File {
metaData: {
id: string;
size: string;
createdAt: Date;
mimeType: string | false;
name: string;
shareId: string;
};
file: Readable;
}

View File

@@ -0,0 +1,161 @@
import {
BadRequestException,
HttpException,
HttpStatus,
Injectable,
InternalServerErrorException,
NotFoundException,
} from "@nestjs/common";
import * as crypto from "crypto";
import { createReadStream } from "fs";
import * as fs from "fs/promises";
import * as mime from "mime-types";
import { ConfigService } from "src/config/config.service";
import { PrismaService } from "src/prisma/prisma.service";
import { validate as isValidUUID } from "uuid";
import { SHARE_DIRECTORY } from "../constants";
@Injectable()
export class LocalFileService {
constructor(
private prisma: PrismaService,
private config: ConfigService,
) {}
async create(
data: string,
chunk: { index: number; total: number },
file: { id?: string; name: string },
shareId: string,
) {
if (!file.id) {
file.id = crypto.randomUUID();
} else if (!isValidUUID(file.id)) {
throw new BadRequestException("Invalid file ID format");
}
const share = await this.prisma.share.findUnique({
where: { id: shareId },
include: { files: true, reverseShare: true },
});
if (share.uploadLocked)
throw new BadRequestException("Share is already completed");
let diskFileSize: number;
try {
diskFileSize = (
await fs.stat(`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`)
).size;
} catch {
diskFileSize = 0;
}
// If the sent chunk index and the expected chunk index doesn't match throw an error
const chunkSize = this.config.get("share.chunkSize");
const expectedChunkIndex = Math.ceil(diskFileSize / chunkSize);
if (expectedChunkIndex != chunk.index)
throw new BadRequestException({
message: "Unexpected chunk received",
error: "unexpected_chunk_index",
expectedChunkIndex,
});
const buffer = Buffer.from(data, "base64");
// Check if there is enough space on the server
const space = await fs.statfs(SHARE_DIRECTORY);
const availableSpace = space.bavail * space.bsize;
if (availableSpace < buffer.byteLength) {
throw new InternalServerErrorException("Not enough space on the server");
}
// Check if share size limit is exceeded
const fileSizeSum = share.files.reduce(
(n, { size }) => n + parseInt(size),
0,
);
const shareSizeSum = fileSizeSum + diskFileSize + buffer.byteLength;
if (
shareSizeSum > this.config.get("share.maxSize") ||
(share.reverseShare?.maxShareSize &&
shareSizeSum > parseInt(share.reverseShare.maxShareSize))
) {
throw new HttpException(
"Max share size exceeded",
HttpStatus.PAYLOAD_TOO_LARGE,
);
}
await fs.appendFile(
`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`,
buffer,
);
const isLastChunk = chunk.index == chunk.total - 1;
if (isLastChunk) {
await fs.rename(
`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`,
`${SHARE_DIRECTORY}/${shareId}/${file.id}`,
);
const fileSize = (
await fs.stat(`${SHARE_DIRECTORY}/${shareId}/${file.id}`)
).size;
await this.prisma.file.create({
data: {
id: file.id,
name: file.name,
size: fileSize.toString(),
share: { connect: { id: shareId } },
},
});
}
return file;
}
async get(shareId: string, fileId: string) {
const fileMetaData = await this.prisma.file.findUnique({
where: { id: fileId },
});
if (!fileMetaData) throw new NotFoundException("File not found");
const file = createReadStream(`${SHARE_DIRECTORY}/${shareId}/${fileId}`);
return {
metaData: {
mimeType: mime.contentType(fileMetaData.name.split(".").pop()),
...fileMetaData,
size: fileMetaData.size,
},
file,
};
}
async remove(shareId: string, fileId: string) {
const fileMetaData = await this.prisma.file.findUnique({
where: { id: fileId },
});
if (!fileMetaData) throw new NotFoundException("File not found");
await fs.unlink(`${SHARE_DIRECTORY}/${shareId}/${fileId}`);
await this.prisma.file.delete({ where: { id: fileId } });
}
async deleteAllFiles(shareId: string) {
await fs.rm(`${SHARE_DIRECTORY}/${shareId}`, {
recursive: true,
force: true,
});
}
getZip(shareId: string) {
return createReadStream(`${SHARE_DIRECTORY}/${shareId}/archive.zip`);
}
}

View File

@@ -0,0 +1,299 @@
import {
BadRequestException,
Injectable,
InternalServerErrorException,
NotFoundException,
Logger,
} from "@nestjs/common";
import {
AbortMultipartUploadCommand,
CompleteMultipartUploadCommand,
CreateMultipartUploadCommand,
DeleteObjectCommand,
DeleteObjectsCommand,
GetObjectCommand,
HeadObjectCommand,
ListObjectsV2Command,
S3Client,
UploadPartCommand,
UploadPartCommandOutput,
} from "@aws-sdk/client-s3";
import { PrismaService } from "src/prisma/prisma.service";
import { ConfigService } from "src/config/config.service";
import * as crypto from "crypto";
import * as mime from "mime-types";
import { File } from "./file.service";
import { Readable } from "stream";
import { validate as isValidUUID } from "uuid";
@Injectable()
export class S3FileService {
private readonly logger = new Logger(S3FileService.name);
private multipartUploads: Record<
string,
{
uploadId: string;
parts: Array<{ ETag: string | undefined; PartNumber: number }>;
}
> = {};
constructor(
private prisma: PrismaService,
private config: ConfigService,
) {}
async create(
data: string,
chunk: { index: number; total: number },
file: { id?: string; name: string },
shareId: string,
) {
if (!file.id) {
file.id = crypto.randomUUID();
} else if (!isValidUUID(file.id)) {
throw new BadRequestException("Invalid file ID format");
}
const buffer = Buffer.from(data, "base64");
const key = `${this.getS3Path()}${shareId}/${file.name}`;
const bucketName = this.config.get("s3.bucketName");
const s3Instance = this.getS3Instance();
try {
// Initialize multipart upload if it's the first chunk
if (chunk.index === 0) {
const multipartInitResponse = await s3Instance.send(
new CreateMultipartUploadCommand({
Bucket: bucketName,
Key: key,
}),
);
const uploadId = multipartInitResponse.UploadId;
if (!uploadId) {
throw new Error("Failed to initialize multipart upload.");
}
// Store the uploadId and parts list in memory
this.multipartUploads[file.id] = {
uploadId,
parts: [],
};
}
// Get the ongoing multipart upload
const multipartUpload = this.multipartUploads[file.id];
if (!multipartUpload) {
throw new InternalServerErrorException(
"Multipart upload session not found.",
);
}
const uploadId = multipartUpload.uploadId;
// Upload the current chunk
const partNumber = chunk.index + 1; // Part numbers start from 1
const uploadPartResponse: UploadPartCommandOutput = await s3Instance.send(
new UploadPartCommand({
Bucket: bucketName,
Key: key,
PartNumber: partNumber,
UploadId: uploadId,
Body: buffer,
}),
);
// Store the ETag and PartNumber for later completion
multipartUpload.parts.push({
ETag: uploadPartResponse.ETag,
PartNumber: partNumber,
});
// Complete the multipart upload if it's the last chunk
if (chunk.index === chunk.total - 1) {
await s3Instance.send(
new CompleteMultipartUploadCommand({
Bucket: bucketName,
Key: key,
UploadId: uploadId,
MultipartUpload: {
Parts: multipartUpload.parts,
},
}),
);
// Remove the completed upload from memory
delete this.multipartUploads[file.id];
}
} catch (error) {
// Abort the multipart upload if it fails
const multipartUpload = this.multipartUploads[file.id];
if (multipartUpload) {
try {
await s3Instance.send(
new AbortMultipartUploadCommand({
Bucket: bucketName,
Key: key,
UploadId: multipartUpload.uploadId,
}),
);
} catch (abortError) {
console.error("Error aborting multipart upload:", abortError);
}
delete this.multipartUploads[file.id];
}
this.logger.error(error);
throw new Error("Multipart upload failed. The upload has been aborted.");
}
const isLastChunk = chunk.index == chunk.total - 1;
if (isLastChunk) {
const fileSize: number = await this.getFileSize(shareId, file.name);
await this.prisma.file.create({
data: {
id: file.id,
name: file.name,
size: fileSize.toString(),
share: { connect: { id: shareId } },
},
});
}
return file;
}
async get(shareId: string, fileId: string): Promise<File> {
const fileName = (
await this.prisma.file.findUnique({ where: { id: fileId } })
).name;
const s3Instance = this.getS3Instance();
const key = `${this.getS3Path()}${shareId}/${fileName}`;
const response = await s3Instance.send(
new GetObjectCommand({
Bucket: this.config.get("s3.bucketName"),
Key: key,
}),
);
return {
metaData: {
id: fileId,
size: response.ContentLength?.toString() || "0",
name: fileName,
shareId: shareId,
createdAt: response.LastModified || new Date(),
mimeType:
mime.contentType(fileId.split(".").pop()) ||
"application/octet-stream",
},
file: response.Body as Readable,
} as File;
}
async remove(shareId: string, fileId: string) {
const fileMetaData = await this.prisma.file.findUnique({
where: { id: fileId },
});
if (!fileMetaData) throw new NotFoundException("File not found");
const key = `${this.getS3Path()}${shareId}/${fileMetaData.name}`;
const s3Instance = this.getS3Instance();
try {
await s3Instance.send(
new DeleteObjectCommand({
Bucket: this.config.get("s3.bucketName"),
Key: key,
}),
);
} catch (error) {
throw new Error("Could not delete file from S3");
}
await this.prisma.file.delete({ where: { id: fileId } });
}
async deleteAllFiles(shareId: string) {
const prefix = `${this.getS3Path()}${shareId}/`;
const s3Instance = this.getS3Instance();
try {
// List all objects under the given prefix
const listResponse = await s3Instance.send(
new ListObjectsV2Command({
Bucket: this.config.get("s3.bucketName"),
Prefix: prefix,
}),
);
if (!listResponse.Contents || listResponse.Contents.length === 0) {
throw new Error(`No files found for share ${shareId}`);
}
// Extract the keys of the files to be deleted
const objectsToDelete = listResponse.Contents.map((file) => ({
Key: file.Key!,
}));
// Delete all files in a single request (up to 1000 objects at once)
await s3Instance.send(
new DeleteObjectsCommand({
Bucket: this.config.get("s3.bucketName"),
Delete: {
Objects: objectsToDelete,
},
}),
);
} catch (error) {
throw new Error("Could not delete all files from S3");
}
}
async getFileSize(shareId: string, fileName: string): Promise<number> {
const key = `${this.getS3Path()}${shareId}/${fileName}`;
const s3Instance = this.getS3Instance();
try {
// Get metadata of the file using HeadObjectCommand
const headObjectResponse = await s3Instance.send(
new HeadObjectCommand({
Bucket: this.config.get("s3.bucketName"),
Key: key,
}),
);
// Return ContentLength which is the file size in bytes
return headObjectResponse.ContentLength ?? 0;
} catch (error) {
throw new Error("Could not retrieve file size");
}
}
getS3Instance(): S3Client {
return new S3Client({
endpoint: this.config.get("s3.endpoint"),
region: this.config.get("s3.region"),
credentials: {
accessKeyId: this.config.get("s3.key"),
secretAccessKey: this.config.get("s3.secret"),
},
forcePathStyle: true,
});
}
getZip() {
throw new BadRequestException(
"ZIP download is not supported with S3 storage",
);
}
getS3Path(): string {
const configS3Path = this.config.get("s3.bucketPath");
return configS3Path ? `${configS3Path}/` : "";
}
}

View File

@@ -24,6 +24,7 @@ import { CreateShareDTO } from "./dto/createShare.dto";
export class ShareService {
constructor(
private prisma: PrismaService,
private configService: ConfigService,
private fileService: FileService,
private emailService: EmailService,
private config: ConfigService,
@@ -86,6 +87,7 @@ export class ShareService {
? share.recipients.map((email) => ({ email }))
: [],
},
storageProvider: this.configService.get("s3.enabled") ? "S3" : "LOCAL",
},
});
@@ -105,6 +107,8 @@ export class ShareService {
}
async createZip(shareId: string) {
if (this.config.get("s3.enabled")) return;
const path = `${SHARE_DIRECTORY}/${shareId}`;
const files = await this.prisma.file.findMany({ where: { shareId } });

32
docs/docs/setup/s3.md Normal file
View File

@@ -0,0 +1,32 @@
---
id: s3
---
# S3
You are able to add your preferred S3 provider, like AWS, DigitalOcean, Exoscale or Infomaniak. However, if you don't
want to store your files on a S3 bucket, you don't have to. Consider that this feature is `DISABLED` per default.
## Configuration
You can configure your S3 provider and bucket by going to the configuration page in your admin dashboard `/admin/config/s3`.
| Key | Description | Value |
|:-----------|:-------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------|
| enabled | This property enables the storage location on your configured S3 bucket. | `true` |
| endpoint | This property is the host from your S3 bucket. | `sos-ch-dk-2` |
| region | This property is the region where the bucket is located. | `sos-ch-dk-2.exo.io` |
| bucketName | This property is the name of your S3 bucket. | `my-bucket` |
| bucketPath | This property defines the folder where you want to store your files which are uploaded. Hint: Don't put a slash in the start or end. | `my/custom/path` (or leave it empty for root) |
| key | This is the access key you need to access to your bucket. | `key-asdf` |
| secret | This is the secret you need to access to your bucket. | `secret-asdf` |
Don't forget to save the configuration. :)
## ClamAV
Consider that ClamAV scans are not available at the moment if you store your files in a S3 bucket.
## ZIP
Creating ZIP archives is not currently supported if you store your files in an S3 bucket.

View File

@@ -36,6 +36,10 @@ const sidebars: SidebarsConfig = {
type: "doc",
id: "setup/oauth2login",
},
{
type: "doc",
id: "setup/s3",
},
{
type: "doc",
id: "setup/upgrading",

View File

@@ -16,7 +16,7 @@ import {
TbMail,
TbShare,
TbSocial,
TbSquare,
TbBucket,
TbBinaryTree,
TbSettings,
} from "react-icons/tb";
@@ -29,6 +29,7 @@ const categories = [
{ name: "SMTP", icon: <TbAt /> },
{ name: "OAuth", icon: <TbSocial /> },
{ name: "LDAP", icon: <TbBinaryTree /> },
{ name: "S3", icon: <TbBucket /> },
];
const useStyles = createStyles((theme) => ({

View File

@@ -436,6 +436,21 @@ export default {
"admin.config.ldap.field-name-member-of.description": "LDAP-Attributname für die Gruppen, in denen ein Benutzer Mitglied ist. Dies wird bei der Überprüfung der Admin-Gruppe verwendet.",
"admin.config.ldap.field-name-email": "Attributname für die E-Mail-Adresse des Benutzers",
"admin.config.ldap.field-name-email.description": "LDAP-Attributname für die E-Mail-Adresse eines Benutzers.",
"admin.config.category.s3": "S3",
"admin.config.s3.enabled": "Aktiviert",
"admin.config.s3.enabled.description": "Ob S3 verwendet werden soll, um die freigegebenen Dateien anstelle des lokalen Dateisystems zu speichern.",
"admin.config.s3.endpoint": "Endpunkt",
"admin.config.s3.endpoint.description": "Die URL des S3-Buckets.",
"admin.config.s3.region": "Region",
"admin.config.s3.region.description": "Die Region des S3-Buckets.",
"admin.config.s3.bucket-name": "Bucket-Name",
"admin.config.s3.bucket-name.description": "Der Name des S3-Buckets.",
"admin.config.s3.bucket-path": "Pfad",
"admin.config.s3.bucket-path.description": "Der Standardpfad, der zum Speichern der Dateien im S3-Bucket verwendet werden soll.",
"admin.config.s3.key": "Schlüssel",
"admin.config.s3.secret": "Geheimnis",
"admin.config.s3.key.description": "Der Schlüssel, der den Zugriff auf den S3-Bucket ermöglicht.",
"admin.config.s3.secret.description": "Das Geheimnis, das den Zugriff auf den S3-Bucket ermöglicht.",
"admin.config.notify.success": "Configuration updated successfully.",
"admin.config.notify.logo-success": "Logo updated successfully. It may take a few minutes to update on the website.",
"admin.config.notify.no-changes": "No changes to save.",

View File

@@ -626,6 +626,22 @@ export default {
"Logo updated successfully. It may take a few minutes to update on the website.",
"admin.config.notify.no-changes": "No changes to save.",
"admin.config.category.s3": "S3",
"admin.config.s3.enabled": "Enabled",
"admin.config.s3.enabled.description": "Whether S3 should be used to store the shared files instead of the local file system.",
"admin.config.s3.endpoint": "Endpoint",
"admin.config.s3.endpoint.description": "The URL of the S3 bucket.",
"admin.config.s3.region": "Region",
"admin.config.s3.region.description": "The region of the S3 bucket.",
"admin.config.s3.bucket-name": "Bucket name",
"admin.config.s3.bucket-name.description": "The name of the S3 bucket.",
"admin.config.s3.bucket-path": "Path",
"admin.config.s3.bucket-path.description": "The default path which should be used to store the files in the S3 bucket.",
"admin.config.s3.key": "Key",
"admin.config.s3.key.description": "The key which allows you to access the S3 bucket.",
"admin.config.s3.secret": "Secret",
"admin.config.s3.secret.description": "The secret which allows you to access the S3 bucket.",
// 404
"404.description": "Oops this page doesn't exist.",
"404.button.home": "Bring me back home",