mirror of
https://github.com/dutchcoders/transfer.sh.git
synced 2026-02-03 14:13:26 +00:00
Compare commits
54 Commits
ISSUE-398
...
accept-ran
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2a29083960 | ||
|
|
158e5487ee | ||
|
|
806286ab35 | ||
|
|
d49aee59ba | ||
|
|
e08225e5f8 | ||
|
|
8597f1d9eb | ||
|
|
9e8ce19cd1 | ||
|
|
2bda0a1e55 | ||
|
|
d9369e8b39 | ||
|
|
193f944829 | ||
|
|
ebc4097959 | ||
|
|
ca798ff6f6 | ||
|
|
31520b1afd | ||
|
|
3588502c50 | ||
|
|
31ad4e01e1 | ||
|
|
343427d3b9 | ||
|
|
64c7759126 | ||
|
|
21812d3efc | ||
|
|
35e794220b | ||
|
|
f06aef1c3e | ||
|
|
eeff2c88be | ||
|
|
92324798d5 | ||
|
|
b30b296ac8 | ||
|
|
bb0891cd7d | ||
|
|
9c31ceb2c5 | ||
|
|
597554a59e | ||
|
|
368431fb6b | ||
|
|
e3bb49993c | ||
|
|
cff0a88bf3 | ||
|
|
492731e31f | ||
|
|
f062af9fc5 | ||
|
|
2fbd19365c | ||
|
|
5932a194b2 | ||
|
|
acd6fb084f | ||
|
|
6f49951bc0 | ||
|
|
d2a0e77814 | ||
|
|
014b95ff07 | ||
|
|
0eec27586d | ||
|
|
c7164856d2 | ||
|
|
96723b2685 | ||
|
|
4a56bad05f | ||
|
|
fa74be02d2 | ||
|
|
2959fc2992 | ||
|
|
694b3ce246 | ||
|
|
92948fab23 | ||
|
|
e5455d9713 | ||
|
|
6ab75b30e5 | ||
|
|
6b8eff8322 | ||
|
|
2da62eb235 | ||
|
|
156daa5a24 | ||
|
|
e97fdcb293 | ||
|
|
9fe5f9a5c9 | ||
|
|
788dfa203f | ||
|
|
440ccf3a57 |
33
.github/build/friendly-filenames.json
vendored
Normal file
33
.github/build/friendly-filenames.json
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"android-arm64": { "friendlyName": "android-arm64-v8a" },
|
||||
"darwin-amd64": { "friendlyName": "darwin-amd64" },
|
||||
"darwin-arm64": { "friendlyName": "darwin-arm64" },
|
||||
"dragonfly-amd64": { "friendlyName": "dragonfly-amd64" },
|
||||
"freebsd-386": { "friendlyName": "freebsd-386" },
|
||||
"freebsd-amd64": { "friendlyName": "freebsd-amd64" },
|
||||
"freebsd-arm64": { "friendlyName": "freebsd-arm64-v8a" },
|
||||
"freebsd-arm7": { "friendlyName": "freebsd-arm32-v7a" },
|
||||
"linux-386": { "friendlyName": "linux-386" },
|
||||
"linux-amd64": { "friendlyName": "linux-amd64" },
|
||||
"linux-arm5": { "friendlyName": "linux-arm32-v5" },
|
||||
"linux-arm64": { "friendlyName": "linux-arm64-v8a" },
|
||||
"linux-arm6": { "friendlyName": "linux-arm32-v6" },
|
||||
"linux-arm7": { "friendlyName": "linux-armv7" },
|
||||
"linux-mips64le": { "friendlyName": "linux-mips64le" },
|
||||
"linux-mips64": { "friendlyName": "linux-mips64" },
|
||||
"linux-mipslesoftfloat": { "friendlyName": "linux-mips32le-softfloat" },
|
||||
"linux-mipsle": { "friendlyName": "linux-mips32le" },
|
||||
"linux-mipssoftfloat": { "friendlyName": "linux-mips32-softfloat" },
|
||||
"linux-mips": { "friendlyName": "linux-mips32" },
|
||||
"linux-ppc64le": { "friendlyName": "linux-ppc64le" },
|
||||
"linux-ppc64": { "friendlyName": "linux-ppc64" },
|
||||
"linux-riscv64": { "friendlyName": "linux-riscv64" },
|
||||
"linux-s390x": { "friendlyName": "linux-s390x" },
|
||||
"openbsd-386": { "friendlyName": "openbsd-386" },
|
||||
"openbsd-amd64": { "friendlyName": "openbsd-amd64" },
|
||||
"openbsd-arm64": { "friendlyName": "openbsd-arm64-v8a" },
|
||||
"openbsd-arm7": { "friendlyName": "openbsd-arm32-v7a" },
|
||||
"windows-386": { "friendlyName": "windows-386" },
|
||||
"windows-amd64": { "friendlyName": "windows-amd64" },
|
||||
"windows-arm7": { "friendlyName": "windows-arm32-v7a" }
|
||||
}
|
||||
15
.github/workflows/build-docker-images.yml
vendored
15
.github/workflows/build-docker-images.yml
vendored
@@ -4,9 +4,9 @@ on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # everyday at midnight UTC
|
||||
pull_request:
|
||||
branches: master
|
||||
branches: main
|
||||
push:
|
||||
branches: master
|
||||
branches: main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
@@ -34,9 +34,11 @@ jobs:
|
||||
fi
|
||||
|
||||
TAGS="--tag ${DOCKER_IMAGE}:${VERSION}"
|
||||
TAGS_NOROOT="--tag ${DOCKER_IMAGE}:${VERSION}-noroot"
|
||||
|
||||
if [ $VERSION = edge -o $VERSION = nightly ]; then
|
||||
TAGS="$TAGS --tag ${DOCKER_IMAGE}:latest"
|
||||
TAGS_NOROOT="$TAGS_NOROOT --tag ${DOCKER_IMAGE}:latest-noroot"
|
||||
fi
|
||||
|
||||
echo ::set-output name=docker_image::${DOCKER_IMAGE}
|
||||
@@ -46,6 +48,12 @@ jobs:
|
||||
--build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \
|
||||
--build-arg VCS_REF=${GITHUB_SHA::8} \
|
||||
${TAGS} .
|
||||
echo ::set-output name=buildx_args_noroot::--platform ${DOCKER_PLATFORMS} \
|
||||
--build-arg VERSION=${VERSION} \
|
||||
--build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \
|
||||
--build-arg VCS_REF=${GITHUB_SHA::8} \
|
||||
--build-arg RUNAS=noroot \
|
||||
${TAGS_NOROOT} .
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
@@ -64,6 +72,7 @@ jobs:
|
||||
name: Docker Buildx (build)
|
||||
run: |
|
||||
docker buildx build --no-cache --pull --output "type=image,push=false" ${{ steps.prepare.outputs.buildx_args }}
|
||||
docker buildx build --output "type=image,push=false" ${{ steps.prepare.outputs.buildx_args_noroot }}
|
||||
-
|
||||
name: Docker Login
|
||||
if: success() && github.event_name != 'pull_request'
|
||||
@@ -77,11 +86,13 @@ jobs:
|
||||
if: success() && github.event_name != 'pull_request'
|
||||
run: |
|
||||
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }}
|
||||
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args_noroot }}
|
||||
-
|
||||
name: Docker Check Manifest
|
||||
if: always() && github.event_name != 'pull_request'
|
||||
run: |
|
||||
docker run --rm mplatform/mquery ${{ steps.prepare.outputs.docker_image }}:${{ steps.prepare.outputs.version }}
|
||||
docker run --rm mplatform/mquery ${{ steps.prepare.outputs.docker_image }}:${{ steps.prepare.outputs.version }}-noroot
|
||||
-
|
||||
name: Clear
|
||||
if: always() && github.event_name != 'pull_request'
|
||||
|
||||
202
.github/workflows/release.yml
vendored
202
.github/workflows/release.yml
vendored
@@ -1,59 +1,171 @@
|
||||
name: release
|
||||
name: Build and Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [published]
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
GOOS: [ darwin, linux, windows ]
|
||||
GOARCH: [ amd64 ]
|
||||
# Include amd64 on all platforms.
|
||||
goos: [windows, freebsd, openbsd, linux, dragonfly, darwin]
|
||||
goarch: [amd64, 386]
|
||||
exclude:
|
||||
# Exclude i386 on darwin and dragonfly.
|
||||
- goarch: 386
|
||||
goos: dragonfly
|
||||
- goarch: 386
|
||||
goos: darwin
|
||||
include:
|
||||
- GOOS: darwin
|
||||
GOARCH: amd64
|
||||
suffix: darwin-amd64
|
||||
- GOOS: windows
|
||||
GOARCH: amd64
|
||||
suffix: windows-amd64.exe
|
||||
- GOOS: linux
|
||||
GOARCH: amd64
|
||||
suffix: linux-amd64
|
||||
- GOOS: linux
|
||||
GOARCH: arm
|
||||
suffix: linux-armv7
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.x
|
||||
- name: Build ${{ matrix.suffix }} (GOOS=${{ matrix.GOOS }}, GOARCH=${{ matrix.GOARCH }})
|
||||
env:
|
||||
GOOS: ${{ matrix.GOOS }}
|
||||
GOARCH: ${{ matrix.GOARCH }}
|
||||
CGO_ENABLED: 0
|
||||
run: |
|
||||
go version
|
||||
go build -tags netgo -ldflags "-X github.com/dutchcoders/transfer.sh/cmd.Version=${GITHUB_REF##*/} -a -s -w -extldflags '-static'" -o ./artifacts/transfersh-${GITHUB_REF##*/}-${{ matrix.suffix }}
|
||||
- uses: actions/upload-artifact@v2
|
||||
name: Upload artifacts
|
||||
with:
|
||||
name: artifacts
|
||||
path: ./artifacts
|
||||
# BEIGIN MacOS ARM64
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
# END MacOS ARM64
|
||||
# BEGIN Linux ARM 5 6 7
|
||||
- goos: linux
|
||||
goarch: arm
|
||||
goarm: 7
|
||||
- goos: linux
|
||||
goarch: arm
|
||||
goarm: 6
|
||||
- goos: linux
|
||||
goarch: arm
|
||||
goarm: 5
|
||||
# END Linux ARM 5 6 7
|
||||
# BEGIN Android ARM 8
|
||||
- goos: android
|
||||
goarch: arm64
|
||||
# END Android ARM 8
|
||||
# Windows ARM 7
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
goarm: 7
|
||||
# BEGIN Other architectures
|
||||
# BEGIN riscv64 & ARM64
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
- goos: linux
|
||||
goarch: riscv64
|
||||
# END riscv64 & ARM64
|
||||
# BEGIN MIPS
|
||||
- goos: linux
|
||||
goarch: mips64
|
||||
- goos: linux
|
||||
goarch: mips64le
|
||||
- goos: linux
|
||||
goarch: mipsle
|
||||
- goos: linux
|
||||
goarch: mips
|
||||
# END MIPS
|
||||
# BEGIN PPC
|
||||
- goos: linux
|
||||
goarch: ppc64
|
||||
- goos: linux
|
||||
goarch: ppc64le
|
||||
# END PPC
|
||||
# BEGIN FreeBSD ARM
|
||||
- goos: freebsd
|
||||
goarch: arm64
|
||||
- goos: freebsd
|
||||
goarch: arm
|
||||
goarm: 7
|
||||
# END FreeBSD ARM
|
||||
# BEGIN S390X
|
||||
- goos: linux
|
||||
goarch: s390x
|
||||
# END S390X
|
||||
# END Other architectures
|
||||
# BEGIN OPENBSD ARM
|
||||
- goos: openbsd
|
||||
goarch: arm64
|
||||
- goos: openbsd
|
||||
goarch: arm
|
||||
goarm: 7
|
||||
# END OPENBSD ARM
|
||||
fail-fast: false
|
||||
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ build ]
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
GOARM: ${{ matrix.goarm }}
|
||||
CGO_ENABLED: 0
|
||||
steps:
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download artifacts
|
||||
- name: Checkout codebase
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Show workflow information
|
||||
id: get_filename
|
||||
run: |
|
||||
export _NAME=$(jq ".[\"$GOOS-$GOARCH$GOARM$GOMIPS\"].friendlyName" -r < .github/build/friendly-filenames.json)
|
||||
echo "GOOS: $GOOS, GOARCH: $GOARCH, GOARM: $GOARM, GOMIPS: $GOMIPS, RELEASE_NAME: $_NAME"
|
||||
echo "::set-output name=ASSET_NAME::$_NAME"
|
||||
echo "::set-output name=GIT_TAG::${GITHUB_REF##*/}"
|
||||
echo "ASSET_NAME=$_NAME" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
name: artifacts
|
||||
path: ./artifacts
|
||||
- name: Publish artifacts
|
||||
go-version: ^1.18
|
||||
|
||||
- name: Get project dependencies
|
||||
run: go mod download
|
||||
|
||||
- name: Build Transfersh
|
||||
run: |
|
||||
mkdir -p build_assets
|
||||
go build -tags netgo -ldflags "-X github.com/dutchcoders/transfer.sh/cmd.Version=${GITHUB_REF##*/} -a -s -w -extldflags '-static'" -o build_assets/transfersh-${GITHUB_REF##*/}-${ASSET_NAME}
|
||||
|
||||
- name: Build Mips softfloat Transfersh
|
||||
if: matrix.goarch == 'mips' || matrix.goarch == 'mipsle'
|
||||
run: |
|
||||
GOMIPS=softfloat go build -tags netgo -ldflags "-X github.com/dutchcoders/transfer.sh/cmd.Version=${GITHUB_REF##*/} -a -s -w -extldflags '-static'" -o build_assets/transfersh-softfloat-${GITHUB_REF##*/}-${ASSET_NAME}
|
||||
|
||||
- name: Rename Windows Transfersh
|
||||
if: matrix.goos == 'windows'
|
||||
run: |
|
||||
cd ./build_assets || exit 1
|
||||
mv transfersh-${GITHUB_REF##*/}-${ASSET_NAME} transfersh-${GITHUB_REF##*/}-${ASSET_NAME}.exe
|
||||
|
||||
- name: Prepare to release
|
||||
run: |
|
||||
cp ${GITHUB_WORKSPACE}/README.md ./build_assets/README.md
|
||||
cp ${GITHUB_WORKSPACE}/LICENSE ./build_assets/LICENSE
|
||||
|
||||
- name: Create Gzip archive
|
||||
shell: bash
|
||||
run: |
|
||||
pushd build_assets || exit 1
|
||||
touch -mt $(date +%Y01010000) *
|
||||
tar zcvf transfersh-${GITHUB_REF##*/}-${ASSET_NAME}.tar.gz *
|
||||
mv transfersh-${GITHUB_REF##*/}-${ASSET_NAME}.tar.gz ../
|
||||
FILE=`find . -name "transfersh-${GITHUB_REF##*/}-${ASSET_NAME}*"`
|
||||
DGST=$FILE.sha256sum
|
||||
echo `sha256sum $FILE` > $DGST
|
||||
popd || exit 1
|
||||
FILE=./transfersh-${GITHUB_REF##*/}-${ASSET_NAME}.tar.gz
|
||||
DGST=$FILE.sha256sum
|
||||
echo `sha256sum $FILE` > $DGST
|
||||
|
||||
- name: Change the name
|
||||
run: |
|
||||
mv build_assets transfersh-${GITHUB_REF##*/}-${ASSET_NAME}
|
||||
|
||||
- name: Upload files to Artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: transfersh-${{ steps.get_filename.outputs.GIT_TAG }}-${{ steps.get_filename.outputs.ASSET_NAME }}
|
||||
path: |
|
||||
./transfersh-${{ steps.get_filename.outputs.GIT_TAG }}-${{ steps.get_filename.outputs.ASSET_NAME }}/*
|
||||
|
||||
- name: Upload binaries to release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: github.event_name == 'release'
|
||||
with:
|
||||
files: './artifacts/*'
|
||||
files: |
|
||||
./transfersh-${{ steps.get_filename.outputs.GIT_TAG }}-${{ steps.get_filename.outputs.ASSET_NAME }}.tar.gz*
|
||||
./transfersh-${{ steps.get_filename.outputs.GIT_TAG }}-${{ steps.get_filename.outputs.ASSET_NAME }}/transfersh-${{ steps.get_filename.outputs.GIT_TAG }}-${{ steps.get_filename.outputs.ASSET_NAME }}*
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
18
.github/workflows/test.yml
vendored
18
.github/workflows/test.yml
vendored
@@ -13,10 +13,10 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go_version:
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
- 1.16.x
|
||||
- 1.17.x
|
||||
- 1.18.X
|
||||
name: Test with ${{ matrix.go_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -28,3 +28,17 @@ jobs:
|
||||
go version
|
||||
go vet ./...
|
||||
go test ./...
|
||||
golangci:
|
||||
name: Linting
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.18
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
skip-go-installation: true
|
||||
args: "--config .golangci.yml"
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -20,3 +20,5 @@ transfersh-server/run.sh
|
||||
.elasticbeanstalk/*
|
||||
!.elasticbeanstalk/*.cfg.yml
|
||||
!.elasticbeanstalk/*.global.yml
|
||||
|
||||
!.github/build/
|
||||
|
||||
20
.golangci.yml
Normal file
20
.golangci.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
run:
|
||||
deadline: 10m
|
||||
issues-exit-code: 1
|
||||
tests: true
|
||||
|
||||
output:
|
||||
format: colored-line-number
|
||||
print-issued-lines: true
|
||||
print-linter-name: true
|
||||
|
||||
linters:
|
||||
disable:
|
||||
- deadcode
|
||||
- unused
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
new: false
|
||||
exclude-use-default: false
|
||||
@@ -13,6 +13,7 @@ Examples of unacceptable behavior by participants include:
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct
|
||||
* Use of harsh language
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team.
|
||||
|
||||
|
||||
22
Dockerfile
22
Dockerfile
@@ -1,5 +1,5 @@
|
||||
# Default to Go 1.16
|
||||
ARG GO_VERSION=1.16
|
||||
# Default to Go 1.17
|
||||
ARG GO_VERSION=1.17
|
||||
FROM golang:${GO_VERSION}-alpine as build
|
||||
|
||||
# Necessary to run 'go get' and to compile the linked binary
|
||||
@@ -14,12 +14,28 @@ ENV GO111MODULE=on
|
||||
# build & install server
|
||||
RUN CGO_ENABLED=0 go build -tags netgo -ldflags "-X github.com/dutchcoders/transfer.sh/cmd.Version=$(git describe --tags) -a -s -w -extldflags '-static'" -o /go/bin/transfersh
|
||||
|
||||
ARG PUID=5000 \
|
||||
PGID=5000 \
|
||||
RUNAS
|
||||
|
||||
RUN mkdir -p /tmp/useradd /tmp/empty && \
|
||||
if [ ! -z "$RUNAS" ]; then \
|
||||
echo "${RUNAS}:x:${PUID}:${PGID}::/nonexistent:/sbin/nologin" >> /tmp/useradd/passwd && \
|
||||
echo "${RUNAS}:!:::::::" >> /tmp/useradd/shadow && \
|
||||
echo "${RUNAS}:x:${PGID}:" >> /tmp/useradd/group && \
|
||||
echo "${RUNAS}:!::" >> /tmp/useradd/groupshadow; else touch /tmp/useradd/unused; fi
|
||||
|
||||
FROM scratch AS final
|
||||
LABEL maintainer="Andrea Spacca <andrea.spacca@gmail.com>"
|
||||
ARG RUNAS
|
||||
|
||||
COPY --from=build /go/bin/transfersh /go/bin/transfersh
|
||||
COPY --from=build /tmp/empty /tmp
|
||||
COPY --from=build /tmp/useradd/* /etc/
|
||||
COPY --from=build --chown=${RUNAS} /go/bin/transfersh /go/bin/transfersh
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
USER ${RUNAS}
|
||||
|
||||
ENTRYPOINT ["/go/bin/transfersh", "--listener", ":8080"]
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
5
Makefile
Normal file
5
Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
.PHONY: lint
|
||||
|
||||
lint:
|
||||
golangci-lint run --out-format=github-actions --config .golangci.yml
|
||||
|
||||
223
README.md
223
README.md
@@ -1,4 +1,4 @@
|
||||
# transfer.sh [](https://gitter.im/dutchcoders/transfer.sh?utm_source=badge&utm_medium=badge&utm_campaign=&utm_campaign=pr-badge&utm_content=badge) [](https://goreportcard.com/report/github.com/dutchcoders/transfer.sh) [](https://hub.docker.com/r/dutchcoders/transfer.sh/) [](https://github.com/dutchcoders/transfer.sh/actions/workflows/test.yml?query=branch%3Amaster)
|
||||
# transfer.sh [](https://goreportcard.com/report/github.com/dutchcoders/transfer.sh) [](https://hub.docker.com/r/dutchcoders/transfer.sh/) [](https://github.com/dutchcoders/transfer.sh/actions/workflows/test.yml?query=branch%3Amain)
|
||||
|
||||
Easy and fast file sharing from the command-line. This code contains the server with everything you need to create your own instance.
|
||||
|
||||
@@ -12,20 +12,20 @@ The service at transfersh.com is of unknown origin and reported as cloud malware
|
||||
|
||||
### Upload:
|
||||
```bash
|
||||
$ curl --upload-file ./hello.txt https://transfer.sh/hello.txt
|
||||
$ curl -v --upload-file ./hello.txt https://transfer.sh/hello.txt
|
||||
```
|
||||
|
||||
### Encrypt & upload:
|
||||
### Encrypt & Upload:
|
||||
```bash
|
||||
$ cat /tmp/hello.txt|gpg -ac -o-|curl -X PUT --upload-file "-" https://transfer.sh/test.txt
|
||||
````
|
||||
|
||||
### Download & decrypt:
|
||||
### Download & Decrypt:
|
||||
```bash
|
||||
$ curl https://transfer.sh/1lDau/test.txt|gpg -o- > /tmp/hello.txt
|
||||
```
|
||||
|
||||
### Upload to virustotal:
|
||||
### Upload to Virustotal:
|
||||
```bash
|
||||
$ curl -X PUT --upload-file nhgbhhj https://transfer.sh/test.txt/virustotal
|
||||
```
|
||||
@@ -51,10 +51,11 @@ $ curl --upload-file ./hello.txt https://transfer.sh/hello.txt -H "Max-Days: 1"
|
||||
|
||||
### X-Url-Delete
|
||||
|
||||
The URL used to request the deletion of a file. Returned as a response header.
|
||||
The URL used to request the deletion of a file and returned as a response header.
|
||||
```bash
|
||||
curl -sD - --upload-file ./hello https://transfer.sh/hello.txt | grep 'X-Url-Delete'
|
||||
X-Url-Delete: https://transfer.sh/hello.txt/BAYh0/hello.txt/PDw0NHPcqU
|
||||
curl -sD - --upload-file ./hello.txt https://transfer.sh/hello.txt | grep -i -E 'transfer\.sh|x-url-delete'
|
||||
x-url-delete: https://transfer.sh/hello.txt/BAYh0/hello.txt/PDw0NHPcqU
|
||||
https://transfer.sh/hello.txt/BAYh0/hello.txt
|
||||
```
|
||||
|
||||
## Examples
|
||||
@@ -90,6 +91,7 @@ temp-path | path to temp folder | system temp | TEMP_PATH |
|
||||
web-path | path to static web files (for development or custom front end) | | WEB_PATH |
|
||||
proxy-path | path prefix when service is run behind a proxy | | PROXY_PATH |
|
||||
proxy-port | port of the proxy when the service is run behind a proxy | | PROXY_PORT |
|
||||
email-contact | email contact for the front end | | EMAIL_CONTACT |
|
||||
ga-key | google analytics key for the front end | | GA_KEY |
|
||||
provider | which storage provider to use | (s3, storj, gdrive or local) |
|
||||
uservoice-key | user voice key for the front end | | USERVOICE_KEY |
|
||||
@@ -110,6 +112,7 @@ lets-encrypt-hosts | hosts to use for lets encrypt certificates (comma seperated
|
||||
log | path to log file| | LOG |
|
||||
cors-domains | comma separated list of domains for CORS, setting it enable CORS | | CORS_DOMAINS |
|
||||
clamav-host | host for clamav feature | | CLAMAV_HOST |
|
||||
perform-clamav-prescan | prescan every upload through clamav feature (clamav-host must be a local clamd unix socket) | | PERFORM_CLAMAV_PRESCAN |
|
||||
rate-limit | request per minute | | RATE_LIMIT |
|
||||
max-upload-size | max upload size in kilobytes | | MAX_UPLOAD_SIZE |
|
||||
purge-days | number of days after the uploads are purged automatically | | PURGE_DAYS |
|
||||
@@ -138,12 +141,31 @@ $ go build -o transfersh main.go
|
||||
|
||||
## Docker
|
||||
|
||||
For easy deployment, we've created a Docker container.
|
||||
For easy deployment, we've created an official Docker container. There are two variants, differing only by which user runs the process.
|
||||
|
||||
The default one will run as `root`:
|
||||
|
||||
```bash
|
||||
docker run --publish 8080:8080 dutchcoders/transfer.sh:latest --provider local --basedir /tmp/
|
||||
```
|
||||
|
||||
The one tagged with the suffix `-noroot` will use `5000` as both UID and GID:
|
||||
```bash
|
||||
docker run --publish 8080:8080 dutchcoders/transfer.sh:latest-noroot --provider local --basedir /tmp/
|
||||
```
|
||||
|
||||
### Building the Container
|
||||
You can also build the container yourself. This allows you to choose which UID/GID will be used, e.g. when using NFS mounts:
|
||||
```bash
|
||||
# Build arguments:
|
||||
# * RUNAS: If empty, the container will run as root.
|
||||
# Set this to anything to enable UID/GID selection.
|
||||
# * PUID: UID of the process. Needs RUNAS != "". Defaults to 5000.
|
||||
# * PGID: GID of the process. Needs RUNAS != "". Defaults to 5000.
|
||||
|
||||
docker build -t transfer.sh-noroot --build-arg RUNAS=doesntmatter --build-arg PUID=1337 --build-arg PGID=1338 .
|
||||
```
|
||||
|
||||
## S3 Usage
|
||||
|
||||
For the usage with a AWS S3 Bucket, you just need to specify the following options:
|
||||
@@ -161,23 +183,23 @@ To use a custom non-AWS S3 provider, you need to specify the endpoint as defined
|
||||
|
||||
## Storj Network Provider
|
||||
|
||||
To use the Storj Network as storage provider you need to specify the following flags:
|
||||
To use the Storj Network as a storage provider you need to specify the following flags:
|
||||
- provider `--provider storj`
|
||||
- storj-access _(either via flag or environment variable STORJ_ACCESS)_
|
||||
- storj-bucket _(either via flag or environment variable STORJ_BUCKET)_
|
||||
|
||||
### Creating Bucket and Scope
|
||||
|
||||
In preparation you need to create an access grant (or copy it from the uplink configuration) and a bucket.
|
||||
You need to create an access grant (or copy it from the uplink configuration) and a bucket in preparation.
|
||||
|
||||
To get started, login to your account and go to the Access Grant Menu and start the Wizard on the upper right.
|
||||
To get started, log in to your account and go to the Access Grant Menu and start the Wizard on the upper right.
|
||||
|
||||
Enter your access grant name of choice, hit *Next* and restrict it as necessary/preferred.
|
||||
Aftwards continue either in CLI or within the Browser. You'll be asked for a Passphrase used as Encryption Key.
|
||||
**Make sure to save it in a safe place, without it you will lose the ability to decrypt your files!**
|
||||
Afterwards continue either in CLI or within the Browser. Next, you'll be asked for a Passphrase used as Encryption Key.
|
||||
**Make sure to save it in a safe place. Without it, you will lose the ability to decrypt your files!**
|
||||
|
||||
Afterwards you can copy the access grant and then start the startup of the transfer.sh endpoint.
|
||||
For enhanced security its recommended to provide both the access grant and the bucket name as ENV Variables.
|
||||
Afterwards, you can copy the access grant and then start the startup of the transfer.sh endpoint.
|
||||
It is recommended to provide both the access grant and the bucket name as ENV Variables for enhanced security.
|
||||
|
||||
Example:
|
||||
```
|
||||
@@ -196,13 +218,176 @@ For the usage with Google drive, you need to specify the following options:
|
||||
|
||||
### Creating Gdrive Client Json
|
||||
|
||||
You need to create a Oauth Client id from console.cloud.google.com
|
||||
download the file and place into a safe directory
|
||||
You need to create an OAuth Client id from console.cloud.google.com, download the file, and place it into a safe directory.
|
||||
|
||||
### Usage example
|
||||
|
||||
```go run main.go --provider gdrive --basedir /tmp/ --gdrive-client-json-filepath /[credential_dir] --gdrive-local-config-path [directory_to_save_config] ```
|
||||
|
||||
## Shell functions
|
||||
|
||||
### Bash and zsh (multiple files uploaded as zip archive)
|
||||
##### Add this to .bashrc or .zshrc or its equivalent
|
||||
```bash
|
||||
transfer(){ if [ $# -eq 0 ];then echo "No arguments specified.\nUsage:\n transfer <file|directory>\n ... | transfer <file_name>">&2;return 1;fi;if tty -s;then file="$1";file_name=$(basename "$file");if [ ! -e "$file" ];then echo "$file: No such file or directory">&2;return 1;fi;if [ -d "$file" ];then file_name="$file_name.zip" ,;(cd "$file"&&zip -r -q - .)|curl --progress-bar --upload-file "-" "https://transfer.sh/$file_name"|tee /dev/null,;else cat "$file"|curl --progress-bar --upload-file "-" "https://transfer.sh/$file_name"|tee /dev/null;fi;else file_name=$1;curl --progress-bar --upload-file "-" "https://transfer.sh/$file_name"|tee /dev/null;fi;}
|
||||
```
|
||||
|
||||
#### Now you can use transfer function
|
||||
```
|
||||
$ transfer hello.txt
|
||||
```
|
||||
|
||||
|
||||
### Bash and zsh (with delete url, delete token output and prompt before uploading)
|
||||
##### Add this to .bashrc or .zshrc or its equivalent
|
||||
|
||||
<details><summary>Expand</summary><p>
|
||||
|
||||
```bash
|
||||
transfer()
|
||||
{
|
||||
local file
|
||||
declare -a file_array
|
||||
file_array=("${@}")
|
||||
|
||||
if [[ "${file_array[@]}" == "" || "${1}" == "--help" || "${1}" == "-h" ]]
|
||||
then
|
||||
echo "${0} - Upload arbitrary files to \"transfer.sh\"."
|
||||
echo ""
|
||||
echo "Usage: ${0} [options] [<file>]..."
|
||||
echo ""
|
||||
echo "OPTIONS:"
|
||||
echo " -h, --help"
|
||||
echo " show this message"
|
||||
echo ""
|
||||
echo "EXAMPLES:"
|
||||
echo " Upload a single file from the current working directory:"
|
||||
echo " ${0} \"image.img\""
|
||||
echo ""
|
||||
echo " Upload multiple files from the current working directory:"
|
||||
echo " ${0} \"image.img\" \"image2.img\""
|
||||
echo ""
|
||||
echo " Upload a file from a different directory:"
|
||||
echo " ${0} \"/tmp/some_file\""
|
||||
echo ""
|
||||
echo " Upload all files from the current working directory. Be aware of the webserver's rate limiting!:"
|
||||
echo " ${0} *"
|
||||
echo ""
|
||||
echo " Upload a single file from the current working directory and filter out the delete token and download link:"
|
||||
echo " ${0} \"image.img\" | awk --field-separator=\": \" '/Delete token:/ { print \$2 } /Download link:/ { print \$2 }'"
|
||||
echo ""
|
||||
echo " Show help text from \"transfer.sh\":"
|
||||
echo " curl --request GET \"https://transfer.sh\""
|
||||
return 0
|
||||
else
|
||||
for file in "${file_array[@]}"
|
||||
do
|
||||
if [[ ! -f "${file}" ]]
|
||||
then
|
||||
echo -e "\e[01;31m'${file}' could not be found or is not a file.\e[0m" >&2
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
unset file
|
||||
fi
|
||||
|
||||
local upload_files
|
||||
local curl_output
|
||||
local awk_output
|
||||
|
||||
du -c -k -L "${file_array[@]}" >&2
|
||||
# be compatible with "bash"
|
||||
if [[ "${ZSH_NAME}" == "zsh" ]]
|
||||
then
|
||||
read $'upload_files?\e[01;31mDo you really want to upload the above files ('"${#file_array[@]}"$') to "transfer.sh"? (Y/n): \e[0m'
|
||||
elif [[ "${BASH}" == *"bash"* ]]
|
||||
then
|
||||
read -p $'\e[01;31mDo you really want to upload the above files ('"${#file_array[@]}"$') to "transfer.sh"? (Y/n): \e[0m' upload_files
|
||||
fi
|
||||
|
||||
case "${upload_files:-y}" in
|
||||
"y"|"Y")
|
||||
# for the sake of the progress bar, execute "curl" for each file.
|
||||
# the parameters "--include" and "--form" will suppress the progress bar.
|
||||
for file in "${file_array[@]}"
|
||||
do
|
||||
# show delete link and filter out the delete token from the response header after upload.
|
||||
# it is important to save "curl's" "stdout" via a subshell to a variable or redirect it to another command,
|
||||
# which just redirects to "stdout" in order to have a sane output afterwards.
|
||||
# the progress bar is redirected to "stderr" and is only displayed,
|
||||
# if "stdout" is redirected to something; e.g. ">/dev/null", "tee /dev/null" or "| <some_command>".
|
||||
# the response header is redirected to "stdout", so redirecting "stdout" to "/dev/null" does not make any sense.
|
||||
# redirecting "curl's" "stderr" to "stdout" ("2>&1") will suppress the progress bar.
|
||||
curl_output=$(curl --request PUT --progress-bar --dump-header - --upload-file "${file}" "https://transfer.sh/")
|
||||
awk_output=$(awk \
|
||||
'gsub("\r", "", $0) && tolower($1) ~ /x-url-delete/ \
|
||||
{
|
||||
delete_link=$2;
|
||||
print "Delete command: curl --request DELETE " "\""delete_link"\"";
|
||||
|
||||
gsub(".*/", "", delete_link);
|
||||
delete_token=delete_link;
|
||||
print "Delete token: " delete_token;
|
||||
}
|
||||
|
||||
END{
|
||||
print "Download link: " $0;
|
||||
}' <<< "${curl_output}")
|
||||
|
||||
# return the results via "stdout", "awk" does not do this for some reason.
|
||||
echo -e "${awk_output}\n"
|
||||
|
||||
# avoid rate limiting as much as possible; nginx: too many requests.
|
||||
if (( ${#file_array[@]} > 4 ))
|
||||
then
|
||||
sleep 5
|
||||
fi
|
||||
done
|
||||
;;
|
||||
|
||||
"n"|"N")
|
||||
return 1
|
||||
;;
|
||||
|
||||
*)
|
||||
echo -e "\e[01;31mWrong input: '${upload_files}'.\e[0m" >&2
|
||||
return 1
|
||||
esac
|
||||
}
|
||||
```
|
||||
|
||||
</p></details>
|
||||
|
||||
#### Sample output
|
||||
```bash
|
||||
$ ls -lh
|
||||
total 20M
|
||||
-rw-r--r-- 1 <some_username> <some_username> 10M Apr 4 21:08 image.img
|
||||
-rw-r--r-- 1 <some_username> <some_username> 10M Apr 4 21:08 image2.img
|
||||
$ transfer image*
|
||||
10240K image2.img
|
||||
10240K image.img
|
||||
20480K total
|
||||
Do you really want to upload the above files (2) to "transfer.sh"? (Y/n):
|
||||
######################################################################################################################################################################################################################################## 100.0%
|
||||
Delete command: curl --request DELETE "https://transfer.sh/wJw9pz/image2.img/mSctGx7pYCId"
|
||||
Delete token: mSctGx7pYCId
|
||||
Download link: https://transfer.sh/wJw9pz/image2.img
|
||||
|
||||
######################################################################################################################################################################################################################################## 100.0%
|
||||
Delete command: curl --request DELETE "https://transfer.sh/ljJc5I/image.img/nw7qaoiKUwCU"
|
||||
Delete token: nw7qaoiKUwCU
|
||||
Download link: https://transfer.sh/ljJc5I/image.img
|
||||
|
||||
$ transfer "image.img" | awk --field-separator=": " '/Delete token:/ { print $2 } /Download link:/ { print $2 }'
|
||||
10240K image.img
|
||||
10240K total
|
||||
Do you really want to upload the above files (1) to "transfer.sh"? (Y/n):
|
||||
######################################################################################################################################################################################################################################## 100.0%
|
||||
tauN5dE3fWJe
|
||||
https://transfer.sh/MYkuqn/image.img
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are welcome.
|
||||
@@ -221,7 +406,7 @@ Contributions are welcome.
|
||||
|
||||
**Stefan Benten**
|
||||
|
||||
## Copyright and license
|
||||
## Copyright and License
|
||||
|
||||
Code and documentation copyright 2011-2018 Remco Verhoef.
|
||||
Code and documentation copyright 2018-2020 Andrea Spacca.
|
||||
|
||||
63
cmd/cmd.go
63
cmd/cmd.go
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dutchcoders/transfer.sh/server/storage"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// Version is inject at build time
|
||||
var Version = "0.0.0"
|
||||
var helpTemplate = `NAME:
|
||||
{{.Name}} - {{.Usage}}
|
||||
@@ -97,6 +99,12 @@ var globalFlags = []cli.Flag{
|
||||
Value: "",
|
||||
EnvVar: "PROXY_PORT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "email-contact",
|
||||
Usage: "email address to link in Contact Us (front end)",
|
||||
Value: "",
|
||||
EnvVar: "EMAIL_CONTACT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "ga-key",
|
||||
Usage: "key for google analytics (front end)",
|
||||
@@ -233,6 +241,11 @@ var globalFlags = []cli.Flag{
|
||||
Value: "",
|
||||
EnvVar: "CLAMAV_HOST",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "perform-clamav-prescan",
|
||||
Usage: "perform-clamav-prescan",
|
||||
EnvVar: "PERFORM_CLAMAV_PRESCAN",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "virustotal-key",
|
||||
Usage: "virustotal-key",
|
||||
@@ -282,14 +295,16 @@ var globalFlags = []cli.Flag{
|
||||
},
|
||||
}
|
||||
|
||||
// Cmd wraps cli.app
|
||||
type Cmd struct {
|
||||
*cli.App
|
||||
}
|
||||
|
||||
func VersionAction(c *cli.Context) {
|
||||
fmt.Println(color.YellowString(fmt.Sprintf("transfer.sh %s: Easy file sharing from the command line", Version)))
|
||||
func versionCommand(_ *cli.Context) {
|
||||
fmt.Println(color.YellowString("transfer.sh %s: Easy file sharing from the command line", Version))
|
||||
}
|
||||
|
||||
// New is the factory for transfer.sh
|
||||
func New() *Cmd {
|
||||
logger := log.New(os.Stdout, "[transfer.sh]", log.LstdFlags)
|
||||
|
||||
@@ -304,7 +319,7 @@ func New() *Cmd {
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "version",
|
||||
Action: VersionAction,
|
||||
Action: versionCommand,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -313,7 +328,7 @@ func New() *Cmd {
|
||||
}
|
||||
|
||||
app.Action = func(c *cli.Context) {
|
||||
options := []server.OptionFn{}
|
||||
var options []server.OptionFn
|
||||
if v := c.String("listener"); v != "" {
|
||||
options = append(options, server.Listener(v))
|
||||
}
|
||||
@@ -345,6 +360,10 @@ func New() *Cmd {
|
||||
options = append(options, server.ProxyPort(v))
|
||||
}
|
||||
|
||||
if v := c.String("email-contact"); v != "" {
|
||||
options = append(options, server.EmailContact(v))
|
||||
}
|
||||
|
||||
if v := c.String("ga-key"); v != "" {
|
||||
options = append(options, server.GoogleAnalytics(v))
|
||||
}
|
||||
@@ -375,6 +394,14 @@ func New() *Cmd {
|
||||
options = append(options, server.ClamavHost(v))
|
||||
}
|
||||
|
||||
if v := c.Bool("perform-clamav-prescan"); v {
|
||||
if c.String("clamav-host") == "" {
|
||||
panic("clamav-host not set")
|
||||
}
|
||||
|
||||
options = append(options, server.PerformClamavPrescan(v))
|
||||
}
|
||||
|
||||
if v := c.Int64("max-upload-size"); v > 0 {
|
||||
options = append(options, server.MaxUploadSize(v))
|
||||
}
|
||||
@@ -403,13 +430,13 @@ func New() *Cmd {
|
||||
}
|
||||
|
||||
if c.Bool("force-https") {
|
||||
options = append(options, server.ForceHTTPs())
|
||||
options = append(options, server.ForceHTTPS())
|
||||
}
|
||||
|
||||
if httpAuthUser := c.String("http-auth-user"); httpAuthUser == "" {
|
||||
} else if httpAuthPass := c.String("http-auth-pass"); httpAuthPass == "" {
|
||||
} else {
|
||||
options = append(options, server.HttpAuthCredentials(httpAuthUser, httpAuthPass))
|
||||
options = append(options, server.HTTPAuthCredentials(httpAuthUser, httpAuthPass))
|
||||
}
|
||||
|
||||
applyIPFilter := false
|
||||
@@ -437,42 +464,42 @@ func New() *Cmd {
|
||||
panic("secret-key not set.")
|
||||
} else if bucket := c.String("bucket"); bucket == "" {
|
||||
panic("bucket not set.")
|
||||
} else if storage, err := server.NewS3Storage(accessKey, secretKey, bucket, purgeDays, c.String("s3-region"), c.String("s3-endpoint"), c.Bool("s3-no-multipart"), c.Bool("s3-path-style"), logger); err != nil {
|
||||
} else if store, err := storage.NewS3Storage(accessKey, secretKey, bucket, purgeDays, c.String("s3-region"), c.String("s3-endpoint"), c.Bool("s3-no-multipart"), c.Bool("s3-path-style"), logger); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
options = append(options, server.UseStorage(storage))
|
||||
options = append(options, server.UseStorage(store))
|
||||
}
|
||||
case "gdrive":
|
||||
chunkSize := c.Int("gdrive-chunk-size")
|
||||
chunkSize := c.Int("gdrive-chunk-size") * 1024 * 1024
|
||||
|
||||
if clientJsonFilepath := c.String("gdrive-client-json-filepath"); clientJsonFilepath == "" {
|
||||
panic("client-json-filepath not set.")
|
||||
if clientJSONFilepath := c.String("gdrive-client-json-filepath"); clientJSONFilepath == "" {
|
||||
panic("gdrive-client-json-filepath not set.")
|
||||
} else if localConfigPath := c.String("gdrive-local-config-path"); localConfigPath == "" {
|
||||
panic("local-config-path not set.")
|
||||
panic("gdrive-local-config-path not set.")
|
||||
} else if basedir := c.String("basedir"); basedir == "" {
|
||||
panic("basedir not set.")
|
||||
} else if storage, err := server.NewGDriveStorage(clientJsonFilepath, localConfigPath, basedir, chunkSize, logger); err != nil {
|
||||
} else if store, err := storage.NewGDriveStorage(clientJSONFilepath, localConfigPath, basedir, chunkSize, logger); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
options = append(options, server.UseStorage(storage))
|
||||
options = append(options, server.UseStorage(store))
|
||||
}
|
||||
case "storj":
|
||||
if access := c.String("storj-access"); access == "" {
|
||||
panic("storj-access not set.")
|
||||
} else if bucket := c.String("storj-bucket"); bucket == "" {
|
||||
panic("storj-bucket not set.")
|
||||
} else if storage, err := server.NewStorjStorage(access, bucket, purgeDays, logger); err != nil {
|
||||
} else if store, err := storage.NewStorjStorage(access, bucket, purgeDays, logger); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
options = append(options, server.UseStorage(storage))
|
||||
options = append(options, server.UseStorage(store))
|
||||
}
|
||||
case "local":
|
||||
if v := c.String("basedir"); v == "" {
|
||||
panic("basedir not set.")
|
||||
} else if storage, err := server.NewLocalStorage(v, logger); err != nil {
|
||||
} else if store, err := storage.NewLocalStorage(v, logger); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
options = append(options, server.UseStorage(storage))
|
||||
options = append(options, server.UseStorage(store))
|
||||
}
|
||||
default:
|
||||
panic("Provider not set or invalid.")
|
||||
|
||||
89
examples.md
89
examples.md
@@ -5,6 +5,7 @@
|
||||
* [Archiving and backups](#archiving-and-backups)
|
||||
* [Encrypting and decrypting](#encrypting-and-decrypting)
|
||||
* [Scanning for viruses](#scanning-for-viruses)
|
||||
* [Uploading and copy download command](#uploading-and-copy-download-command)
|
||||
|
||||
## Aliases
|
||||
<a name="aliases"/>
|
||||
@@ -173,4 +174,90 @@ $ curl -X PUT --upload-file ./eicar.com https://transfer.sh/eicar.com/scan
|
||||
### Upload malware to VirusTotal, get a permalink in return
|
||||
```bash
|
||||
$ curl -X PUT --upload-file nhgbhhj https://transfer.sh/test.txt/virustotal
|
||||
```
|
||||
```
|
||||
## Uploading and copy download command
|
||||
|
||||
Download commands can be automatically copied to the clipboard after files are uploaded using transfer.sh.
|
||||
|
||||
It was designed for Linux or macOS.
|
||||
|
||||
### 1. Install xclip or xsel for Linux, macOS skips this step
|
||||
|
||||
- install xclip see https://command-not-found.com/xclip
|
||||
|
||||
- install xsel see https://command-not-found.com/xsel
|
||||
|
||||
Install later, add pbcopy and pbpaste to .bashrc or .zshrc or its equivalent.
|
||||
|
||||
- If use xclip, paste the following lines:
|
||||
|
||||
```sh
|
||||
alias pbcopy='xclip -selection clipboard'
|
||||
alias pbpaste='xclip -selection clipboard -o'
|
||||
```
|
||||
|
||||
- If use xsel, paste the following lines:
|
||||
|
||||
```sh
|
||||
alias pbcopy='xsel --clipboard --input'
|
||||
alias pbpaste='xsel --clipboard --output'
|
||||
```
|
||||
|
||||
### 2. Add Uploading and copy download command shell function
|
||||
|
||||
1. Open .bashrc or .zshrc or its equivalent.
|
||||
|
||||
2. Add the following shell script:
|
||||
|
||||
```sh
|
||||
transfer() {
|
||||
curl --progress-bar --upload-file "$1" https://transfer.sh/$(basename "$1") | pbcopy;
|
||||
echo "1) Download link:"
|
||||
echo "$(pbpaste)"
|
||||
|
||||
echo "\n2) Linux or macOS download command:"
|
||||
linux_macos_download_command="wget $(pbpaste)"
|
||||
echo $linux_macos_download_command
|
||||
|
||||
echo "\n3) Windows download command:"
|
||||
windows_download_command="Invoke-WebRequest -Uri "$(pbpaste)" -OutFile $(basename $1)"
|
||||
echo $windows_download_command
|
||||
|
||||
case $2 in
|
||||
l|m) echo $linux_macos_download_command | pbcopy
|
||||
;;
|
||||
w) echo $windows_download_command | pbcopy
|
||||
;;
|
||||
esac
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### 3. Test
|
||||
|
||||
The transfer command has two parameters:
|
||||
|
||||
1. The first parameter is the path to upload the file.
|
||||
|
||||
2. The second parameter indicates which system's download command is copied. optional:
|
||||
|
||||
- This parameter is empty to copy the download link.
|
||||
|
||||
- `l` or `m` copy the Linux or macOS command that downloaded the file.
|
||||
|
||||
- `w` copy the Windows command that downloaded the file.
|
||||
|
||||
For example, The command to download the file on Windows will be copied:
|
||||
|
||||
```sh
|
||||
$ transfer ~/temp/a.log w
|
||||
######################################################################## 100.0%
|
||||
1) Download link:
|
||||
https://transfer.sh/y0qr2c/a.log
|
||||
|
||||
2) Linux or macOS download command:
|
||||
wget https://transfer.sh/y0qr2c/a.log
|
||||
|
||||
3) Windows download command:
|
||||
Invoke-WebRequest -Uri https://transfer.sh/y0qr2c/a.log -OutFile a.log
|
||||
```
|
||||
|
||||
41
flake.lock
generated
Normal file
41
flake.lock
generated
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1631561581,
|
||||
"narHash": "sha256-3VQMV5zvxaVLvqqUrNz3iJelLw30mIVSfZmAaauM3dA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "7e5bf3925f6fbdfaf50a2a7ca0be2879c4261d19",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1632470817,
|
||||
"narHash": "sha256-tGyOesdpqQEVqlmVeElsC98OJ2GDy+LNaCThSby/GQM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "39e8ec2db68b863543bd377e44fbe02f8d05864e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
212
flake.nix
Normal file
212
flake.nix
Normal file
@@ -0,0 +1,212 @@
|
||||
{
|
||||
description = "Transfer.sh";
|
||||
|
||||
inputs.flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils }:
|
||||
let
|
||||
transfer-sh = pkgs: pkgs.buildGoModule {
|
||||
src = self;
|
||||
name = "transfer.sh";
|
||||
vendorSha256 = "sha256-bgQUMiC33yVorcKOWhegT1/YU+fvxsz2pkeRvjf3R7g=";
|
||||
};
|
||||
in
|
||||
|
||||
flake-utils.lib.eachDefaultSystem (
|
||||
system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
rec {
|
||||
packages = flake-utils.lib.flattenTree {
|
||||
transfer-sh = transfer-sh pkgs;
|
||||
};
|
||||
defaultPackage = packages.transfer-sh;
|
||||
apps.transfer-sh = flake-utils.lib.mkApp { drv = packages.transfer-sh; };
|
||||
defaultApp = apps.transfer-sh;
|
||||
}
|
||||
) // rec {
|
||||
|
||||
nixosModules = {
|
||||
transfer-sh = { config, lib, pkgs, ... }: with lib; let
|
||||
RUNTIME_DIR = "/var/lib/transfer.sh";
|
||||
cfg = config.services.transfer-sh;
|
||||
|
||||
general_options = {
|
||||
|
||||
enable = mkEnableOption "Transfer.sh service";
|
||||
listener = mkOption { default = 80; type = types.int; description = "port to use for http (:80)"; };
|
||||
profile-listener = mkOption { default = 6060; type = types.int; description = "port to use for profiler (:6060)"; };
|
||||
force-https = mkOption { type = types.nullOr types.bool; description = "redirect to https"; };
|
||||
tls-listener = mkOption { default = 443; type = types.int; description = "port to use for https (:443)"; };
|
||||
tls-listener-only = mkOption { type = types.nullOr types.bool; description = "flag to enable tls listener only"; };
|
||||
tls-cert-file = mkOption { type = types.nullOr types.str; description = "path to tls certificate"; };
|
||||
tls-private-key = mkOption { type = types.nullOr types.str; description = "path to tls private key "; };
|
||||
http-auth-user = mkOption { type = types.nullOr types.str; description = "user for basic http auth on upload"; };
|
||||
http-auth-pass = mkOption { type = types.nullOr types.str; description = "pass for basic http auth on upload"; };
|
||||
ip-whitelist = mkOption { type = types.nullOr types.str; description = "comma separated list of ips allowed to connect to the service"; };
|
||||
ip-blacklist = mkOption { type = types.nullOr types.str; description = "comma separated list of ips not allowed to connect to the service"; };
|
||||
temp-path = mkOption { type = types.nullOr types.str; description = "path to temp folder"; };
|
||||
web-path = mkOption { type = types.nullOr types.str; description = "path to static web files (for development or custom front end)"; };
|
||||
proxy-path = mkOption { type = types.nullOr types.str; description = "path prefix when service is run behind a proxy"; };
|
||||
proxy-port = mkOption { type = types.nullOr types.str; description = "port of the proxy when the service is run behind a proxy"; };
|
||||
ga-key = mkOption { type = types.nullOr types.str; description = "google analytics key for the front end"; };
|
||||
email-contact = mkOption { type = types.nullOr types.str; description = "email contact for the front end"; };
|
||||
uservoice-key = mkOption { type = types.nullOr types.str; description = "user voice key for the front end"; };
|
||||
lets-encrypt-hosts = mkOption { type = types.nullOr (types.listOf types.str); description = "hosts to use for lets encrypt certificates"; };
|
||||
log = mkOption { type = types.nullOr types.str; description = "path to log file"; };
|
||||
cors-domains = mkOption { type = types.nullOr (types.listOf types.str); description = "comma separated list of domains for CORS, setting it enable CORS "; };
|
||||
clamav-host = mkOption { type = types.nullOr types.str; description = "host for clamav feature"; };
|
||||
rate-limit = mkOption { type = types.nullOr types.int; description = "request per minute"; };
|
||||
max-upload-size = mkOption { type = types.nullOr types.int; description = "max upload size in kilobytes "; };
|
||||
purge-days = mkOption { type = types.nullOr types.int; description = "number of days after the uploads are purged automatically "; };
|
||||
random-token-length = mkOption { type = types.nullOr types.int; description = "length of the random token for the upload path (double the size for delete path)"; };
|
||||
|
||||
};
|
||||
|
||||
provider_options = {
|
||||
|
||||
aws = {
|
||||
enable = mkEnableOption "Enable AWS backend";
|
||||
aws-access-key = mkOption { type = types.str; description = "aws access key"; };
|
||||
aws-secret-key = mkOption { type = types.str; description = "aws secret key"; };
|
||||
bucket = mkOption { type = types.str; description = "aws bucket "; };
|
||||
s3-endpoint = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
description = ''
|
||||
Custom S3 endpoint.
|
||||
If you specify the s3-region, you don't need to set the endpoint URL since the correct endpoint will used automatically.
|
||||
'';
|
||||
};
|
||||
s3-region = mkOption { type = types.str; description = "region of the s3 bucket eu-west-"; };
|
||||
s3-no-multipart = mkOption { type = types.nullOr types.bool; description = "disables s3 multipart upload "; };
|
||||
s3-path-style = mkOption { type = types.nullOr types.str; description = "Forces path style URLs, required for Minio. "; };
|
||||
};
|
||||
|
||||
storj = {
|
||||
enable = mkEnableOption "Enable storj backend";
|
||||
storj-access = mkOption { type = types.str; description = "Access for the project"; };
|
||||
storj-bucket = mkOption { type = types.str; description = "Bucket to use within the project"; };
|
||||
};
|
||||
|
||||
gdrive = {
|
||||
enable = mkEnableOption "Enable gdrive backend";
|
||||
gdrive-client-json = mkOption { type = types.str; description = "oauth client json config for gdrive provider"; };
|
||||
gdrive-chunk-size = mkOption { default = 8; type = types.nullOr types.int; description = "chunk size for gdrive upload in megabytes, must be lower than available memory (8 MB)"; };
|
||||
basedir = mkOption { type = types.str; description = "path storage for gdrive provider"; default = "${cfg.stateDir}/store"; };
|
||||
purge-interval = mkOption { type = types.nullOr types.int; description = "interval in hours to run the automatic purge for (not applicable to S3 and Storj)"; };
|
||||
|
||||
};
|
||||
|
||||
local = {
|
||||
enable = mkEnableOption "Enable local backend";
|
||||
basedir = mkOption { type = types.str; description = "path storage for local provider"; default = "${cfg.stateDir}/store"; };
|
||||
purge-interval = mkOption { type = types.nullOr types.int; description = "interval in hours to run the automatic purge for (not applicable to S3 and Storj)"; };
|
||||
};
|
||||
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.transfer-sh = fold recursiveUpdate {} [
|
||||
general_options
|
||||
{
|
||||
provider = provider_options;
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
description = "User to run the service under";
|
||||
default = "transfer.sh";
|
||||
};
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
description = "Group to run the service under";
|
||||
default = "transfer.sh";
|
||||
};
|
||||
stateDir = mkOption {
|
||||
type = types.path;
|
||||
description = "Variable state directory";
|
||||
default = RUNTIME_DIR;
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
config = let
|
||||
|
||||
mkFlags = cfg: options:
|
||||
let
|
||||
mkBoolFlag = option: if cfg.${option} then [ "--${option}" ] else [];
|
||||
mkFlag = option:
|
||||
if isBool cfg.${option}
|
||||
then mkBoolFlag option
|
||||
else [ "--${option}" "${cfg.${option}}" ];
|
||||
|
||||
in
|
||||
lists.flatten (map (mkFlag) (filter (option: cfg.${option} != null && option != "enable") options));
|
||||
|
||||
aws-config = (mkFlags cfg.provider.aws (attrNames provider_options)) ++ [ "--provider" "aws" ];
|
||||
gdrive-config = mkFlags cfg.provider.gdrive (attrNames provider_options.gdrive) ++ [ "--provider" "gdrive" ];
|
||||
storj-config = mkFlags cfg.provider.storj (attrNames provider_options.storj) ++ [ "--provider" "storj" ];
|
||||
local-config = mkFlags cfg.provider.local (attrNames provider_options.local) ++ [ "--provider" "local" ];
|
||||
|
||||
general-config = concatStringsSep " " (mkFlags cfg (attrNames general_options));
|
||||
provider-config = concatStringsSep " " (
|
||||
if cfg.provider.aws.enable && !cfg.provider.storj.enable && !cfg.provider.gdrive.enable && !cfg.provider.local.enable then aws-config
|
||||
else if !cfg.provider.aws.enable && cfg.provider.storj.enable && !cfg.provider.gdrive.enable && !cfg.provider.local.enable then storj-config
|
||||
else if !cfg.provider.aws.enable && !cfg.provider.storj.enable && cfg.provider.gdrive.enable && !cfg.provider.local.enable then gdrive-config
|
||||
else if !cfg.provider.aws.enable && !cfg.provider.storj.enable && !cfg.provider.gdrive.enable && cfg.provider.local.enable then local-config
|
||||
else throw "transfer.sh requires exactly one provider (aws, storj, gdrive, local)"
|
||||
);
|
||||
|
||||
in
|
||||
lib.mkIf cfg.enable
|
||||
{
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -"
|
||||
] ++ optional cfg.provider.gdrive.enable cfg.provider.gdrive.basedir
|
||||
++ optional cfg.provider.local.enable cfg.provider.local.basedir;
|
||||
|
||||
systemd.services.transfer-sh = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStart = "${transfer-sh pkgs}/bin/transfer.sh ${general-config} ${provider-config} ";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ cfg.listener cfg.profile-listener cfg.tls-listener ];
|
||||
};
|
||||
};
|
||||
|
||||
default = { self, pkgs, ... }: {
|
||||
imports = [ nixosModules.transfer-sh ];
|
||||
# Network configuration.
|
||||
|
||||
# useDHCP is generally considered to better be turned off in favor
|
||||
# of <adapter>.useDHCP
|
||||
networking.useDHCP = false;
|
||||
networking.firewall.allowedTCPPorts = [];
|
||||
|
||||
# Enable the inventaire server.
|
||||
services.transfer-sh = {
|
||||
enable = true;
|
||||
provider.local = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
nixosConfigurations."container" = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [
|
||||
nixosModules.default
|
||||
({ ... }: { boot.isContainer = true; })
|
||||
];
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
29
go.mod
29
go.mod
@@ -1,37 +1,40 @@
|
||||
module github.com/dutchcoders/transfer.sh
|
||||
|
||||
go 1.13
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.77.0 // indirect
|
||||
cloud.google.com/go/compute v1.18.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/PuerkitoBio/ghost v0.0.0-20160324114900-206e6e460e14
|
||||
github.com/VojtechVitek/ratelimit v0.0.0-20160722140851-dc172bc0f6d2
|
||||
github.com/aws/aws-sdk-go v1.37.14
|
||||
github.com/calebcase/tmpfile v1.0.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
|
||||
github.com/dutchcoders/go-virustotal v0.0.0-20140923143438-24cc8e6fa329
|
||||
github.com/dutchcoders/transfer.sh-web v0.0.0-20210723094506-f0946ebceb7a
|
||||
github.com/dutchcoders/transfer.sh-web v0.0.0-20220824020025-7240e75c3bb8
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.1
|
||||
github.com/fatih/color v1.10.0
|
||||
github.com/garyburd/redigo v1.6.2 // indirect
|
||||
github.com/golang/gddo v0.0.0-20210115222349-20d68f94ee1f
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.2 // indirect
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.5
|
||||
github.com/microcosm-cc/bluemonday v1.0.16
|
||||
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||
github.com/urfave/cli v1.22.5
|
||||
go.opencensus.io v0.22.6 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210415154028-4f45737414dc
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99
|
||||
google.golang.org/api v0.40.0
|
||||
google.golang.org/genproto v0.0.0-20210218151259-fe80b386bf06 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838
|
||||
golang.org/x/net v0.6.0 // indirect
|
||||
golang.org/x/oauth2 v0.5.0
|
||||
google.golang.org/api v0.109.0
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15
|
||||
storj.io/common v0.0.0-20210504141454-bcb03a80052f
|
||||
storj.io/uplink v1.5.0-rc.1.0.20210512164354-e2e5889614a9
|
||||
storj.io/common v0.0.0-20220405183405-ffdc3ab808c6
|
||||
storj.io/uplink v1.8.2
|
||||
)
|
||||
|
||||
12
main.go
12
main.go
@@ -1,8 +1,16 @@
|
||||
package main
|
||||
|
||||
import "github.com/dutchcoders/transfer.sh/cmd"
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/dutchcoders/transfer.sh/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cmd.New()
|
||||
app.RunAndExitOnError()
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,19 +27,19 @@ THE SOFTWARE.
|
||||
package server
|
||||
|
||||
import (
|
||||
// _ "transfer.sh/app/handlers"
|
||||
// _ "transfer.sh/app/utils"
|
||||
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
clamd "github.com/dutchcoders/go-clamd"
|
||||
|
||||
"github.com/dutchcoders/go-clamd"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const clamavScanStatusOK = "OK"
|
||||
|
||||
func (s *Server) scanHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
|
||||
@@ -50,26 +50,53 @@ func (s *Server) scanHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
s.logger.Printf("Scanning %s %d %s", filename, contentLength, contentType)
|
||||
|
||||
var reader io.Reader
|
||||
|
||||
reader = r.Body
|
||||
|
||||
c := clamd.NewClamd(s.ClamAVDaemonHost)
|
||||
|
||||
abort := make(chan bool)
|
||||
response, err := c.ScanStream(reader, abort)
|
||||
file, err := ioutil.TempFile(s.tempPath, "clamav-")
|
||||
defer s.cleanTmpFile(file)
|
||||
if err != nil {
|
||||
s.logger.Printf("%s", err.Error())
|
||||
http.Error(w, err.Error(), 500)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case s := <-response:
|
||||
w.Write([]byte(fmt.Sprintf("%v\n", s.Status)))
|
||||
case <-time.After(time.Second * 60):
|
||||
abort <- true
|
||||
_, err = io.Copy(file, r.Body)
|
||||
if err != nil {
|
||||
s.logger.Printf("%s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
close(abort)
|
||||
status, err := s.performScan(file.Name())
|
||||
if err != nil {
|
||||
s.logger.Printf("%s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = w.Write([]byte(fmt.Sprintf("%v\n", status)))
|
||||
}
|
||||
|
||||
func (s *Server) performScan(path string) (string, error) {
|
||||
c := clamd.NewClamd(s.ClamAVDaemonHost)
|
||||
|
||||
responseCh := make(chan chan *clamd.ScanResult)
|
||||
errCh := make(chan error)
|
||||
go func(responseCh chan chan *clamd.ScanResult, errCh chan error) {
|
||||
response, err := c.ScanFile(path)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
responseCh <- response
|
||||
}(responseCh, errCh)
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return "", err
|
||||
case response := <-responseCh:
|
||||
st := <-response
|
||||
return st.Status, nil
|
||||
case <-time.After(time.Second * 60):
|
||||
return "", errors.New("clamav scan timeout")
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,16 +13,16 @@ import (
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
var (
|
||||
_ = Suite(&SuiteRedirectWithForceHTTPs{})
|
||||
_ = Suite(&SuiteRedirectWithoutForceHTTPs{})
|
||||
_ = Suite(&suiteRedirectWithForceHTTPS{})
|
||||
_ = Suite(&suiteRedirectWithoutForceHTTPS{})
|
||||
)
|
||||
|
||||
type SuiteRedirectWithForceHTTPs struct {
|
||||
type suiteRedirectWithForceHTTPS struct {
|
||||
handler http.HandlerFunc
|
||||
}
|
||||
|
||||
func (s *SuiteRedirectWithForceHTTPs) SetUpTest(c *C) {
|
||||
srvr, err := New(ForceHTTPs())
|
||||
func (s *suiteRedirectWithForceHTTPS) SetUpTest(c *C) {
|
||||
srvr, err := New(ForceHTTPS())
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -32,7 +32,7 @@ func (s *SuiteRedirectWithForceHTTPs) SetUpTest(c *C) {
|
||||
s.handler = srvr.RedirectHandler(handler)
|
||||
}
|
||||
|
||||
func (s *SuiteRedirectWithForceHTTPs) TestHTTPs(c *C) {
|
||||
func (s *suiteRedirectWithForceHTTPS) TestHTTPs(c *C) {
|
||||
req := httptest.NewRequest("GET", "https://test/test", nil)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
@@ -42,7 +42,7 @@ func (s *SuiteRedirectWithForceHTTPs) TestHTTPs(c *C) {
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *SuiteRedirectWithForceHTTPs) TestOnion(c *C) {
|
||||
func (s *suiteRedirectWithForceHTTPS) TestOnion(c *C) {
|
||||
req := httptest.NewRequest("GET", "http://test.onion/test", nil)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
@@ -52,7 +52,7 @@ func (s *SuiteRedirectWithForceHTTPs) TestOnion(c *C) {
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *SuiteRedirectWithForceHTTPs) TestXForwardedFor(c *C) {
|
||||
func (s *suiteRedirectWithForceHTTPS) TestXForwardedFor(c *C) {
|
||||
req := httptest.NewRequest("GET", "http://127.0.0.1/test", nil)
|
||||
req.Header.Set("X-Forwarded-Proto", "https")
|
||||
|
||||
@@ -63,7 +63,7 @@ func (s *SuiteRedirectWithForceHTTPs) TestXForwardedFor(c *C) {
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *SuiteRedirectWithForceHTTPs) TestHTTP(c *C) {
|
||||
func (s *suiteRedirectWithForceHTTPS) TestHTTP(c *C) {
|
||||
req := httptest.NewRequest("GET", "http://127.0.0.1/test", nil)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
@@ -74,11 +74,11 @@ func (s *SuiteRedirectWithForceHTTPs) TestHTTP(c *C) {
|
||||
c.Assert(resp.Header.Get("Location"), Equals, "https://127.0.0.1/test")
|
||||
}
|
||||
|
||||
type SuiteRedirectWithoutForceHTTPs struct {
|
||||
type suiteRedirectWithoutForceHTTPS struct {
|
||||
handler http.HandlerFunc
|
||||
}
|
||||
|
||||
func (s *SuiteRedirectWithoutForceHTTPs) SetUpTest(c *C) {
|
||||
func (s *suiteRedirectWithoutForceHTTPS) SetUpTest(c *C) {
|
||||
srvr, err := New()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
@@ -89,7 +89,7 @@ func (s *SuiteRedirectWithoutForceHTTPs) SetUpTest(c *C) {
|
||||
s.handler = srvr.RedirectHandler(handler)
|
||||
}
|
||||
|
||||
func (s *SuiteRedirectWithoutForceHTTPs) TestHTTP(c *C) {
|
||||
func (s *suiteRedirectWithoutForceHTTPS) TestHTTP(c *C) {
|
||||
req := httptest.NewRequest("GET", "http://127.0.0.1/test", nil)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
@@ -99,7 +99,7 @@ func (s *SuiteRedirectWithoutForceHTTPs) TestHTTP(c *C) {
|
||||
c.Assert(resp.StatusCode, Equals, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *SuiteRedirectWithoutForceHTTPs) TestHTTPs(c *C) {
|
||||
func (s *suiteRedirectWithoutForceHTTPS) TestHTTPs(c *C) {
|
||||
req := httptest.NewRequest("GET", "https://127.0.0.1/test", nil)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"github.com/tomasen/realip"
|
||||
)
|
||||
|
||||
//IPFilterOptions for IPFilter. Allowed takes precendence over Blocked.
|
||||
//IPFilterOptions for ipFilter. Allowed takes precedence over Blocked.
|
||||
//IPs can be IPv4 or IPv6 and can optionally contain subnet
|
||||
//masks (/24). Note however, determining if a given IP is
|
||||
//included in a subnet requires a linear scan so is less performant
|
||||
@@ -43,7 +43,8 @@ type IPFilterOptions struct {
|
||||
}
|
||||
}
|
||||
|
||||
type IPFilter struct {
|
||||
// ipFilter
|
||||
type ipFilter struct {
|
||||
opts IPFilterOptions
|
||||
//mut protects the below
|
||||
//rw since writes are rare
|
||||
@@ -59,13 +60,12 @@ type subnet struct {
|
||||
allowed bool
|
||||
}
|
||||
|
||||
//New constructs IPFilter instance.
|
||||
func NewIPFilter(opts IPFilterOptions) *IPFilter {
|
||||
func newIPFilter(opts IPFilterOptions) *ipFilter {
|
||||
if opts.Logger == nil {
|
||||
flags := log.LstdFlags
|
||||
opts.Logger = log.New(os.Stdout, "", flags)
|
||||
}
|
||||
f := &IPFilter{
|
||||
f := &ipFilter{
|
||||
opts: opts,
|
||||
ips: map[string]bool{},
|
||||
defaultAllowed: !opts.BlockByDefault,
|
||||
@@ -79,19 +79,19 @@ func NewIPFilter(opts IPFilterOptions) *IPFilter {
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *IPFilter) AllowIP(ip string) bool {
|
||||
func (f *ipFilter) AllowIP(ip string) bool {
|
||||
return f.ToggleIP(ip, true)
|
||||
}
|
||||
|
||||
func (f *IPFilter) BlockIP(ip string) bool {
|
||||
func (f *ipFilter) BlockIP(ip string) bool {
|
||||
return f.ToggleIP(ip, false)
|
||||
}
|
||||
|
||||
func (f *IPFilter) ToggleIP(str string, allowed bool) bool {
|
||||
//check if has subnet
|
||||
if ip, net, err := net.ParseCIDR(str); err == nil {
|
||||
func (f *ipFilter) ToggleIP(str string, allowed bool) bool {
|
||||
//check if provided string describes a subnet
|
||||
if ip, network, err := net.ParseCIDR(str); err == nil {
|
||||
// containing only one ip?
|
||||
if n, total := net.Mask.Size(); n == total {
|
||||
if n, total := network.Mask.Size(); n == total {
|
||||
f.mut.Lock()
|
||||
f.ips[ip.String()] = allowed
|
||||
f.mut.Unlock()
|
||||
@@ -110,7 +110,7 @@ func (f *IPFilter) ToggleIP(str string, allowed bool) bool {
|
||||
if !found {
|
||||
f.subnets = append(f.subnets, &subnet{
|
||||
str: str,
|
||||
ipnet: net,
|
||||
ipnet: network,
|
||||
allowed: allowed,
|
||||
})
|
||||
}
|
||||
@@ -128,19 +128,19 @@ func (f *IPFilter) ToggleIP(str string, allowed bool) bool {
|
||||
}
|
||||
|
||||
//ToggleDefault alters the default setting
|
||||
func (f *IPFilter) ToggleDefault(allowed bool) {
|
||||
func (f *ipFilter) ToggleDefault(allowed bool) {
|
||||
f.mut.Lock()
|
||||
f.defaultAllowed = allowed
|
||||
f.mut.Unlock()
|
||||
}
|
||||
|
||||
//Allowed returns if a given IP can pass through the filter
|
||||
func (f *IPFilter) Allowed(ipstr string) bool {
|
||||
func (f *ipFilter) Allowed(ipstr string) bool {
|
||||
return f.NetAllowed(net.ParseIP(ipstr))
|
||||
}
|
||||
|
||||
//NetAllowed returns if a given net.IP can pass through the filter
|
||||
func (f *IPFilter) NetAllowed(ip net.IP) bool {
|
||||
func (f *ipFilter) NetAllowed(ip net.IP) bool {
|
||||
//invalid ip
|
||||
if ip == nil {
|
||||
return false
|
||||
@@ -173,35 +173,35 @@ func (f *IPFilter) NetAllowed(ip net.IP) bool {
|
||||
}
|
||||
|
||||
//Blocked returns if a given IP can NOT pass through the filter
|
||||
func (f *IPFilter) Blocked(ip string) bool {
|
||||
func (f *ipFilter) Blocked(ip string) bool {
|
||||
return !f.Allowed(ip)
|
||||
}
|
||||
|
||||
//NetBlocked returns if a given net.IP can NOT pass through the filter
|
||||
func (f *IPFilter) NetBlocked(ip net.IP) bool {
|
||||
func (f *ipFilter) NetBlocked(ip net.IP) bool {
|
||||
return !f.NetAllowed(ip)
|
||||
}
|
||||
|
||||
//WrapIPFilter the provided handler with simple IP blocking middleware
|
||||
//Wrap the provided handler with simple IP blocking middleware
|
||||
//using this IP filter and its configuration
|
||||
func (f *IPFilter) Wrap(next http.Handler) http.Handler {
|
||||
return &ipFilterMiddleware{IPFilter: f, next: next}
|
||||
func (f *ipFilter) Wrap(next http.Handler) http.Handler {
|
||||
return &ipFilterMiddleware{ipFilter: f, next: next}
|
||||
}
|
||||
|
||||
//WrapIPFilter is equivalent to NewIPFilter(opts) then Wrap(next)
|
||||
//WrapIPFilter is equivalent to newIPFilter(opts) then Wrap(next)
|
||||
func WrapIPFilter(next http.Handler, opts IPFilterOptions) http.Handler {
|
||||
return NewIPFilter(opts).Wrap(next)
|
||||
return newIPFilter(opts).Wrap(next)
|
||||
}
|
||||
|
||||
type ipFilterMiddleware struct {
|
||||
*IPFilter
|
||||
*ipFilter
|
||||
next http.Handler
|
||||
}
|
||||
|
||||
func (m *ipFilterMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
remoteIP := realip.FromRequest(r)
|
||||
|
||||
if !m.IPFilter.Allowed(remoteIP) {
|
||||
if !m.ipFilter.Allowed(remoteIP) {
|
||||
//show simple forbidden text
|
||||
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||
return
|
||||
|
||||
141
server/server.go
141
server/server.go
@@ -25,62 +25,68 @@ THE SOFTWARE.
|
||||
package server
|
||||
|
||||
import (
|
||||
crypto_rand "crypto/rand"
|
||||
"context"
|
||||
cryptoRand "crypto/rand"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
gorillaHandlers "github.com/gorilla/handlers"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
context "golang.org/x/net/context"
|
||||
|
||||
"github.com/PuerkitoBio/ghost/handlers"
|
||||
"github.com/VojtechVitek/ratelimit"
|
||||
"github.com/VojtechVitek/ratelimit/memory"
|
||||
gorillaHandlers "github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
_ "net/http/pprof"
|
||||
|
||||
"crypto/tls"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
|
||||
web "github.com/dutchcoders/transfer.sh-web"
|
||||
"github.com/dutchcoders/transfer.sh/server/storage"
|
||||
assetfs "github.com/elazarl/go-bindata-assetfs"
|
||||
|
||||
autocert "golang.org/x/crypto/acme/autocert"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const SERVER_INFO = "transfer.sh"
|
||||
|
||||
// parse request with maximum memory of _24Kilobits
|
||||
const _24K = (1 << 3) * 24
|
||||
|
||||
// parse request with maximum memory of _5Megabytes
|
||||
const _5M = (1 << 20) * 5
|
||||
|
||||
// OptionFn is the option function type
|
||||
type OptionFn func(*Server)
|
||||
|
||||
// ClamavHost sets clamav host
|
||||
func ClamavHost(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.ClamAVDaemonHost = s
|
||||
}
|
||||
}
|
||||
|
||||
// PerformClamavPrescan enables clamav prescan on upload
|
||||
func PerformClamavPrescan(b bool) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.performClamavPrescan = b
|
||||
}
|
||||
}
|
||||
|
||||
// VirustotalKey sets virus total key
|
||||
func VirustotalKey(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.VirusTotalKey = s
|
||||
}
|
||||
}
|
||||
|
||||
// Listener set listener
|
||||
func Listener(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.ListenerString = s
|
||||
@@ -88,6 +94,7 @@ func Listener(s string) OptionFn {
|
||||
|
||||
}
|
||||
|
||||
// CorsDomains sets CORS domains
|
||||
func CorsDomains(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.CorsDomains = s
|
||||
@@ -95,18 +102,28 @@ func CorsDomains(s string) OptionFn {
|
||||
|
||||
}
|
||||
|
||||
// EmailContact sets email contact
|
||||
func EmailContact(emailContact string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.emailContact = emailContact
|
||||
}
|
||||
}
|
||||
|
||||
// GoogleAnalytics sets GA key
|
||||
func GoogleAnalytics(gaKey string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.gaKey = gaKey
|
||||
}
|
||||
}
|
||||
|
||||
// UserVoice sets UV key
|
||||
func UserVoice(userVoiceKey string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.userVoiceKey = userVoiceKey
|
||||
}
|
||||
}
|
||||
|
||||
// TLSListener sets TLS listener and option
|
||||
func TLSListener(s string, t bool) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.TLSListenerString = s
|
||||
@@ -115,12 +132,14 @@ func TLSListener(s string, t bool) OptionFn {
|
||||
|
||||
}
|
||||
|
||||
// ProfileListener sets profile listener
|
||||
func ProfileListener(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.ProfileListenerString = s
|
||||
}
|
||||
}
|
||||
|
||||
// WebPath sets web path
|
||||
func WebPath(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
if s[len(s)-1:] != "/" {
|
||||
@@ -131,6 +150,7 @@ func WebPath(s string) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyPath sets proxy path
|
||||
func ProxyPath(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
if s[len(s)-1:] != "/" {
|
||||
@@ -141,12 +161,14 @@ func ProxyPath(s string) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyPort sets proxy port
|
||||
func ProxyPort(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.proxyPort = s
|
||||
}
|
||||
}
|
||||
|
||||
// TempPath sets temp path
|
||||
func TempPath(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
if s[len(s)-1:] != "/" {
|
||||
@@ -157,6 +179,7 @@ func TempPath(s string) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
// LogFile sets log file
|
||||
func LogFile(logger *log.Logger, s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
f, err := os.OpenFile(s, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
|
||||
@@ -169,30 +192,36 @@ func LogFile(logger *log.Logger, s string) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
// Logger sets logger
|
||||
func Logger(logger *log.Logger) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// MaxUploadSize sets max upload size
|
||||
func MaxUploadSize(kbytes int64) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.maxUploadSize = kbytes * 1024
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RateLimit set rate limit
|
||||
func RateLimit(requests int) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.rateLimitRequests = requests
|
||||
}
|
||||
}
|
||||
|
||||
// RandomTokenLength sets random token length
|
||||
func RandomTokenLength(length int) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.randomTokenLength = length
|
||||
}
|
||||
}
|
||||
|
||||
// Purge sets purge days and option
|
||||
func Purge(days, interval int) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.purgeDays = time.Duration(days) * time.Hour * 24
|
||||
@@ -200,24 +229,28 @@ func Purge(days, interval int) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
func ForceHTTPs() OptionFn {
|
||||
// ForceHTTPS sets forcing https
|
||||
func ForceHTTPS() OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.forceHTTPs = true
|
||||
srvr.forceHTTPS = true
|
||||
}
|
||||
}
|
||||
|
||||
// EnableProfiler sets enable profiler
|
||||
func EnableProfiler() OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.profilerEnabled = true
|
||||
}
|
||||
}
|
||||
|
||||
func UseStorage(s Storage) OptionFn {
|
||||
// UseStorage set storage to use
|
||||
func UseStorage(s storage.Storage) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.storage = s
|
||||
}
|
||||
}
|
||||
|
||||
// UseLetsEncrypt set letsencrypt usage
|
||||
func UseLetsEncrypt(hosts []string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
cacheDir := "./cache/"
|
||||
@@ -246,6 +279,7 @@ func UseLetsEncrypt(hosts []string) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
// TLSConfig sets TLS config
|
||||
func TLSConfig(cert, pk string) OptionFn {
|
||||
certificate, err := tls.LoadX509KeyPair(cert, pk)
|
||||
return func(srvr *Server) {
|
||||
@@ -257,13 +291,15 @@ func TLSConfig(cert, pk string) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
func HttpAuthCredentials(user string, pass string) OptionFn {
|
||||
// HTTPAuthCredentials sets basic http auth credentials
|
||||
func HTTPAuthCredentials(user string, pass string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.AuthUser = user
|
||||
srvr.AuthPass = pass
|
||||
}
|
||||
}
|
||||
|
||||
// FilterOptions sets ip filtering
|
||||
func FilterOptions(options IPFilterOptions) OptionFn {
|
||||
for i, allowedIP := range options.AllowedIPs {
|
||||
options.AllowedIPs[i] = strings.TrimSpace(allowedIP)
|
||||
@@ -278,6 +314,7 @@ func FilterOptions(options IPFilterOptions) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
// Server is the main application
|
||||
type Server struct {
|
||||
AuthUser string
|
||||
AuthPass string
|
||||
@@ -296,22 +333,24 @@ type Server struct {
|
||||
purgeDays time.Duration
|
||||
purgeInterval time.Duration
|
||||
|
||||
storage Storage
|
||||
storage storage.Storage
|
||||
|
||||
forceHTTPs bool
|
||||
forceHTTPS bool
|
||||
|
||||
randomTokenLength int
|
||||
|
||||
ipFilterOptions *IPFilterOptions
|
||||
|
||||
VirusTotalKey string
|
||||
ClamAVDaemonHost string
|
||||
VirusTotalKey string
|
||||
ClamAVDaemonHost string
|
||||
performClamavPrescan bool
|
||||
|
||||
tempPath string
|
||||
|
||||
webPath string
|
||||
proxyPath string
|
||||
proxyPort string
|
||||
emailContact string
|
||||
gaKey string
|
||||
userVoiceKey string
|
||||
|
||||
@@ -327,6 +366,7 @@ type Server struct {
|
||||
LetsEncryptCache string
|
||||
}
|
||||
|
||||
// New is the factory fot Server
|
||||
func New(options ...OptionFn) (*Server, error) {
|
||||
s := &Server{
|
||||
locks: sync.Map{},
|
||||
@@ -341,12 +381,13 @@ func New(options ...OptionFn) (*Server, error) {
|
||||
|
||||
func init() {
|
||||
var seedBytes [8]byte
|
||||
if _, err := crypto_rand.Read(seedBytes[:]); err != nil {
|
||||
if _, err := cryptoRand.Read(seedBytes[:]); err != nil {
|
||||
panic("cannot obtain cryptographically secure seed")
|
||||
}
|
||||
rand.Seed(int64(binary.LittleEndian.Uint64(seedBytes[:])))
|
||||
}
|
||||
|
||||
// Run starts Server
|
||||
func (s *Server) Run() {
|
||||
listening := false
|
||||
|
||||
@@ -356,7 +397,7 @@ func (s *Server) Run() {
|
||||
go func() {
|
||||
s.logger.Println("Profiled listening at: :6060")
|
||||
|
||||
http.ListenAndServe(":6060", nil)
|
||||
_ = http.ListenAndServe(":6060", nil)
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -387,8 +428,18 @@ func (s *Server) Run() {
|
||||
s.logger.Panicf("Unable to parse: path=%s, err=%s", path, err)
|
||||
}
|
||||
|
||||
htmlTemplates.New(stripPrefix(path)).Parse(string(bytes))
|
||||
textTemplates.New(stripPrefix(path)).Parse(string(bytes))
|
||||
if strings.HasSuffix(path, ".html") {
|
||||
_, err = htmlTemplates.New(stripPrefix(path)).Parse(string(bytes))
|
||||
if err != nil {
|
||||
s.logger.Println("Unable to parse html template", err)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(path, ".txt") {
|
||||
_, err = textTemplates.New(stripPrefix(path)).Parse(string(bytes))
|
||||
if err != nil {
|
||||
s.logger.Println("Unable to parse text template", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -402,7 +453,7 @@ func (s *Server) Run() {
|
||||
r.HandleFunc("/favicon.ico", staticHandler.ServeHTTP).Methods("GET")
|
||||
r.HandleFunc("/robots.txt", staticHandler.ServeHTTP).Methods("GET")
|
||||
|
||||
r.HandleFunc("/{filename:(?:favicon\\.ico|robots\\.txt|health\\.html)}", s.BasicAuthHandler(http.HandlerFunc(s.putHandler))).Methods("PUT")
|
||||
r.HandleFunc("/{filename:(?:favicon\\.ico|robots\\.txt|health\\.html)}", s.basicAuthHandler(http.HandlerFunc(s.putHandler))).Methods("PUT")
|
||||
|
||||
r.HandleFunc("/health.html", healthHandler).Methods("GET")
|
||||
r.HandleFunc("/", s.viewHandler).Methods("GET")
|
||||
@@ -424,7 +475,7 @@ func (s *Server) Run() {
|
||||
return false
|
||||
}
|
||||
|
||||
match = (r.Referer() == "")
|
||||
match = r.Referer() == ""
|
||||
|
||||
u, err := url.Parse(r.Referer())
|
||||
if err != nil {
|
||||
@@ -446,17 +497,17 @@ func (s *Server) Run() {
|
||||
|
||||
r.HandleFunc("/{filename}/virustotal", s.virusTotalHandler).Methods("PUT")
|
||||
r.HandleFunc("/{filename}/scan", s.scanHandler).Methods("PUT")
|
||||
r.HandleFunc("/put/{filename}", s.BasicAuthHandler(http.HandlerFunc(s.putHandler))).Methods("PUT")
|
||||
r.HandleFunc("/upload/{filename}", s.BasicAuthHandler(http.HandlerFunc(s.putHandler))).Methods("PUT")
|
||||
r.HandleFunc("/{filename}", s.BasicAuthHandler(http.HandlerFunc(s.putHandler))).Methods("PUT")
|
||||
r.HandleFunc("/", s.BasicAuthHandler(http.HandlerFunc(s.postHandler))).Methods("POST")
|
||||
r.HandleFunc("/put/{filename}", s.basicAuthHandler(http.HandlerFunc(s.putHandler))).Methods("PUT")
|
||||
r.HandleFunc("/upload/{filename}", s.basicAuthHandler(http.HandlerFunc(s.putHandler))).Methods("PUT")
|
||||
r.HandleFunc("/{filename}", s.basicAuthHandler(http.HandlerFunc(s.putHandler))).Methods("PUT")
|
||||
r.HandleFunc("/", s.basicAuthHandler(http.HandlerFunc(s.postHandler))).Methods("POST")
|
||||
// r.HandleFunc("/{page}", viewHandler).Methods("GET")
|
||||
|
||||
r.HandleFunc("/{token}/{filename}/{deletionToken}", s.deleteHandler).Methods("DELETE")
|
||||
|
||||
r.NotFoundHandler = http.HandlerFunc(s.notFoundHandler)
|
||||
|
||||
mime.AddExtensionType(".md", "text/x-markdown")
|
||||
_ = mime.AddExtensionType(".md", "text/x-markdown")
|
||||
|
||||
s.logger.Printf("Transfer.sh server started.\nusing temp folder: %s\nusing storage provider: %s", s.tempPath, s.storage.Type())
|
||||
|
||||
@@ -474,7 +525,7 @@ func (s *Server) Run() {
|
||||
}
|
||||
|
||||
h := handlers.PanicHandler(
|
||||
IPFilterHandler(
|
||||
ipFilterHandler(
|
||||
handlers.LogHandler(
|
||||
LoveHandler(
|
||||
s.RedirectHandler(cors(r))),
|
||||
@@ -486,32 +537,34 @@ func (s *Server) Run() {
|
||||
)
|
||||
|
||||
if !s.TLSListenerOnly {
|
||||
srvr := &http.Server{
|
||||
Addr: s.ListenerString,
|
||||
Handler: h,
|
||||
}
|
||||
|
||||
listening = true
|
||||
s.logger.Printf("listening on port: %v\n", s.ListenerString)
|
||||
s.logger.Printf("starting to listen on: %v\n", s.ListenerString)
|
||||
|
||||
go func() {
|
||||
srvr.ListenAndServe()
|
||||
srvr := &http.Server{
|
||||
Addr: s.ListenerString,
|
||||
Handler: h,
|
||||
}
|
||||
|
||||
if err := srvr.ListenAndServe(); err != nil {
|
||||
s.logger.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if s.TLSListenerString != "" {
|
||||
listening = true
|
||||
s.logger.Printf("listening on port: %v\n", s.TLSListenerString)
|
||||
s.logger.Printf("starting to listen for TLS on: %v\n", s.TLSListenerString)
|
||||
|
||||
go func() {
|
||||
s := &http.Server{
|
||||
srvr := &http.Server{
|
||||
Addr: s.TLSListenerString,
|
||||
Handler: h,
|
||||
TLSConfig: s.tlsConfig,
|
||||
}
|
||||
|
||||
if err := s.ListenAndServeTLS("", ""); err != nil {
|
||||
panic(err)
|
||||
if err := srvr.ListenAndServeTLS("", ""); err != nil {
|
||||
s.logger.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -1,750 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
type Storage interface {
|
||||
Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error)
|
||||
Head(token string, filename string) (contentLength uint64, err error)
|
||||
Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error
|
||||
Delete(token string, filename string) error
|
||||
IsNotExist(err error) bool
|
||||
Purge(days time.Duration) error
|
||||
|
||||
Type() string
|
||||
}
|
||||
|
||||
type LocalStorage struct {
|
||||
Storage
|
||||
basedir string
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func NewLocalStorage(basedir string, logger *log.Logger) (*LocalStorage, error) {
|
||||
return &LocalStorage{basedir: basedir, logger: logger}, nil
|
||||
}
|
||||
|
||||
func (s *LocalStorage) Type() string {
|
||||
return "local"
|
||||
}
|
||||
|
||||
func (s *LocalStorage) Head(token string, filename string) (contentLength uint64, err error) {
|
||||
path := filepath.Join(s.basedir, token, filename)
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = os.Lstat(path); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
contentLength = uint64(fi.Size())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
|
||||
path := filepath.Join(s.basedir, token, filename)
|
||||
|
||||
// content type , content length
|
||||
if reader, err = os.Open(path); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = os.Lstat(path); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
contentLength = uint64(fi.Size())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *LocalStorage) Delete(token string, filename string) (err error) {
|
||||
metadata := filepath.Join(s.basedir, token, fmt.Sprintf("%s.metadata", filename))
|
||||
os.Remove(metadata)
|
||||
|
||||
path := filepath.Join(s.basedir, token, filename)
|
||||
err = os.Remove(path)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *LocalStorage) Purge(days time.Duration) (err error) {
|
||||
err = filepath.Walk(s.basedir,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.ModTime().Before(time.Now().Add(-1 * days)) {
|
||||
err = os.Remove(path)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *LocalStorage) IsNotExist(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return os.IsNotExist(err)
|
||||
}
|
||||
|
||||
func (s *LocalStorage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
|
||||
var f io.WriteCloser
|
||||
var err error
|
||||
|
||||
path := filepath.Join(s.basedir, token)
|
||||
|
||||
if err = os.MkdirAll(path, 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if f, err = os.OpenFile(filepath.Join(path, filename), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
if _, err = io.Copy(f, reader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type S3Storage struct {
|
||||
Storage
|
||||
bucket string
|
||||
session *session.Session
|
||||
s3 *s3.S3
|
||||
logger *log.Logger
|
||||
purgeDays time.Duration
|
||||
noMultipart bool
|
||||
}
|
||||
|
||||
func NewS3Storage(accessKey, secretKey, bucketName string, purgeDays int, region, endpoint string, disableMultipart bool, forcePathStyle bool, logger *log.Logger) (*S3Storage, error) {
|
||||
sess := getAwsSession(accessKey, secretKey, region, endpoint, forcePathStyle)
|
||||
|
||||
return &S3Storage{
|
||||
bucket: bucketName,
|
||||
s3: s3.New(sess),
|
||||
session: sess,
|
||||
logger: logger,
|
||||
noMultipart: disableMultipart,
|
||||
purgeDays: time.Duration(purgeDays*24) * time.Hour,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *S3Storage) Type() string {
|
||||
return "s3"
|
||||
}
|
||||
|
||||
func (s *S3Storage) Head(token string, filename string) (contentLength uint64, err error) {
|
||||
key := fmt.Sprintf("%s/%s", token, filename)
|
||||
|
||||
headRequest := &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
}
|
||||
|
||||
// content type , content length
|
||||
response, err := s.s3.HeadObject(headRequest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if response.ContentLength != nil {
|
||||
contentLength = uint64(*response.ContentLength)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *S3Storage) Purge(days time.Duration) (err error) {
|
||||
// NOOP expiration is set at upload time
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *S3Storage) IsNotExist(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
switch aerr.Code() {
|
||||
case s3.ErrCodeNoSuchKey:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *S3Storage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
|
||||
key := fmt.Sprintf("%s/%s", token, filename)
|
||||
|
||||
getRequest := &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
}
|
||||
|
||||
response, err := s.s3.GetObject(getRequest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if response.ContentLength != nil {
|
||||
contentLength = uint64(*response.ContentLength)
|
||||
}
|
||||
|
||||
reader = response.Body
|
||||
return
|
||||
}
|
||||
|
||||
func (s *S3Storage) Delete(token string, filename string) (err error) {
|
||||
metadata := fmt.Sprintf("%s/%s.metadata", token, filename)
|
||||
deleteRequest := &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(metadata),
|
||||
}
|
||||
|
||||
_, err = s.s3.DeleteObject(deleteRequest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s/%s", token, filename)
|
||||
deleteRequest = &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
}
|
||||
|
||||
_, err = s.s3.DeleteObject(deleteRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *S3Storage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
|
||||
key := fmt.Sprintf("%s/%s", token, filename)
|
||||
|
||||
s.logger.Printf("Uploading file %s to S3 Bucket", filename)
|
||||
var concurrency int
|
||||
if !s.noMultipart {
|
||||
concurrency = 20
|
||||
} else {
|
||||
concurrency = 1
|
||||
}
|
||||
|
||||
// Create an uploader with the session and custom options
|
||||
uploader := s3manager.NewUploader(s.session, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = concurrency // default is 5
|
||||
u.LeavePartsOnError = false
|
||||
})
|
||||
|
||||
var expire *time.Time
|
||||
if s.purgeDays.Hours() > 0 {
|
||||
expire = aws.Time(time.Now().Add(s.purgeDays))
|
||||
}
|
||||
|
||||
_, err = uploader.Upload(&s3manager.UploadInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
Body: reader,
|
||||
Expires: expire,
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type GDrive struct {
|
||||
service *drive.Service
|
||||
rootId string
|
||||
basedir string
|
||||
localConfigPath string
|
||||
chunkSize int
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func NewGDriveStorage(clientJsonFilepath string, localConfigPath string, basedir string, chunkSize int, logger *log.Logger) (*GDrive, error) {
|
||||
b, err := ioutil.ReadFile(clientJsonFilepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If modifying these scopes, delete your previously saved client_secret.json.
|
||||
config, err := google.ConfigFromJSON(b, drive.DriveScope, drive.DriveMetadataScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srv, err := drive.New(getGDriveClient(config, localConfigPath, logger))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunkSize = chunkSize * 1024 * 1024
|
||||
storage := &GDrive{service: srv, basedir: basedir, rootId: "", localConfigPath: localConfigPath, chunkSize: chunkSize, logger: logger}
|
||||
err = storage.setupRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
const GDriveRootConfigFile = "root_id.conf"
|
||||
const GDriveTokenJsonFile = "token.json"
|
||||
const GDriveDirectoryMimeType = "application/vnd.google-apps.folder"
|
||||
|
||||
func (s *GDrive) setupRoot() error {
|
||||
rootFileConfig := filepath.Join(s.localConfigPath, GDriveRootConfigFile)
|
||||
|
||||
rootId, err := ioutil.ReadFile(rootFileConfig)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(rootId) != "" {
|
||||
s.rootId = string(rootId)
|
||||
return nil
|
||||
}
|
||||
|
||||
dir := &drive.File{
|
||||
Name: s.basedir,
|
||||
MimeType: GDriveDirectoryMimeType,
|
||||
}
|
||||
|
||||
di, err := s.service.Files.Create(dir).Fields("id").Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.rootId = di.Id
|
||||
err = ioutil.WriteFile(rootFileConfig, []byte(s.rootId), os.FileMode(0600))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GDrive) hasChecksum(f *drive.File) bool {
|
||||
return f.Md5Checksum != ""
|
||||
}
|
||||
|
||||
func (s *GDrive) list(nextPageToken string, q string) (*drive.FileList, error) {
|
||||
return s.service.Files.List().Fields("nextPageToken, files(id, name, mimeType)").Q(q).PageToken(nextPageToken).Do()
|
||||
}
|
||||
|
||||
func (s *GDrive) findId(filename string, token string) (string, error) {
|
||||
filename = strings.Replace(filename, `'`, `\'`, -1)
|
||||
filename = strings.Replace(filename, `"`, `\"`, -1)
|
||||
|
||||
fileId, tokenId, nextPageToken := "", "", ""
|
||||
|
||||
q := fmt.Sprintf("'%s' in parents and name='%s' and mimeType='%s' and trashed=false", s.rootId, token, GDriveDirectoryMimeType)
|
||||
l, err := s.list(nextPageToken, q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for 0 < len(l.Files) {
|
||||
for _, fi := range l.Files {
|
||||
tokenId = fi.Id
|
||||
break
|
||||
}
|
||||
|
||||
if l.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
|
||||
l, err = s.list(l.NextPageToken, q)
|
||||
}
|
||||
|
||||
if filename == "" {
|
||||
return tokenId, nil
|
||||
} else if tokenId == "" {
|
||||
return "", fmt.Errorf("Cannot find file %s/%s", token, filename)
|
||||
}
|
||||
|
||||
q = fmt.Sprintf("'%s' in parents and name='%s' and mimeType!='%s' and trashed=false", tokenId, filename, GDriveDirectoryMimeType)
|
||||
l, err = s.list(nextPageToken, q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for 0 < len(l.Files) {
|
||||
for _, fi := range l.Files {
|
||||
|
||||
fileId = fi.Id
|
||||
break
|
||||
}
|
||||
|
||||
if l.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
|
||||
l, err = s.list(l.NextPageToken, q)
|
||||
}
|
||||
|
||||
if fileId == "" {
|
||||
return "", fmt.Errorf("Cannot find file %s/%s", token, filename)
|
||||
}
|
||||
|
||||
return fileId, nil
|
||||
}
|
||||
|
||||
func (s *GDrive) Type() string {
|
||||
return "gdrive"
|
||||
}
|
||||
|
||||
func (s *GDrive) Head(token string, filename string) (contentLength uint64, err error) {
|
||||
var fileId string
|
||||
fileId, err = s.findId(filename, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi *drive.File
|
||||
if fi, err = s.service.Files.Get(fileId).Fields("size").Do(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
contentLength = uint64(fi.Size)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
|
||||
var fileId string
|
||||
fileId, err = s.findId(filename, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi *drive.File
|
||||
fi, err = s.service.Files.Get(fileId).Fields("size", "md5Checksum").Do()
|
||||
if !s.hasChecksum(fi) {
|
||||
err = fmt.Errorf("Cannot find file %s/%s", token, filename)
|
||||
return
|
||||
}
|
||||
|
||||
contentLength = uint64(fi.Size)
|
||||
|
||||
ctx := context.Background()
|
||||
var res *http.Response
|
||||
res, err = s.service.Files.Get(fileId).Context(ctx).Download()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
reader = res.Body
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *GDrive) Delete(token string, filename string) (err error) {
|
||||
metadata, _ := s.findId(fmt.Sprintf("%s.metadata", filename), token)
|
||||
s.service.Files.Delete(metadata).Do()
|
||||
|
||||
var fileId string
|
||||
fileId, err = s.findId(filename, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.service.Files.Delete(fileId).Do()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *GDrive) Purge(days time.Duration) (err error) {
|
||||
nextPageToken := ""
|
||||
|
||||
expirationDate := time.Now().Add(-1 * days).Format(time.RFC3339)
|
||||
q := fmt.Sprintf("'%s' in parents and modifiedTime < '%s' and mimeType!='%s' and trashed=false", s.rootId, expirationDate, GDriveDirectoryMimeType)
|
||||
l, err := s.list(nextPageToken, q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for 0 < len(l.Files) {
|
||||
for _, fi := range l.Files {
|
||||
err = s.service.Files.Delete(fi.Id).Do()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if l.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
|
||||
l, err = s.list(l.NextPageToken, q)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *GDrive) IsNotExist(err error) bool {
|
||||
if err != nil {
|
||||
if e, ok := err.(*googleapi.Error); ok {
|
||||
return e.Code == http.StatusNotFound
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *GDrive) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
|
||||
dirId, err := s.findId("", token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dirId == "" {
|
||||
dir := &drive.File{
|
||||
Name: token,
|
||||
Parents: []string{s.rootId},
|
||||
MimeType: GDriveDirectoryMimeType,
|
||||
}
|
||||
|
||||
di, err := s.service.Files.Create(dir).Fields("id").Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dirId = di.Id
|
||||
}
|
||||
|
||||
// Instantiate empty drive file
|
||||
dst := &drive.File{
|
||||
Name: filename,
|
||||
Parents: []string{dirId},
|
||||
MimeType: contentType,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = s.service.Files.Create(dst).Context(ctx).Media(reader, googleapi.ChunkSize(s.chunkSize)).Do()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve a token, saves the token, then returns the generated client.
|
||||
func getGDriveClient(config *oauth2.Config, localConfigPath string, logger *log.Logger) *http.Client {
|
||||
tokenFile := filepath.Join(localConfigPath, GDriveTokenJsonFile)
|
||||
tok, err := gDriveTokenFromFile(tokenFile)
|
||||
if err != nil {
|
||||
tok = getGDriveTokenFromWeb(config, logger)
|
||||
saveGDriveToken(tokenFile, tok, logger)
|
||||
}
|
||||
|
||||
return config.Client(context.Background(), tok)
|
||||
}
|
||||
|
||||
// Request a token from the web, then returns the retrieved token.
|
||||
func getGDriveTokenFromWeb(config *oauth2.Config, logger *log.Logger) *oauth2.Token {
|
||||
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline)
|
||||
fmt.Printf("Go to the following link in your browser then type the "+
|
||||
"authorization code: \n%v\n", authURL)
|
||||
|
||||
var authCode string
|
||||
if _, err := fmt.Scan(&authCode); err != nil {
|
||||
logger.Fatalf("Unable to read authorization code %v", err)
|
||||
}
|
||||
|
||||
tok, err := config.Exchange(context.TODO(), authCode)
|
||||
if err != nil {
|
||||
logger.Fatalf("Unable to retrieve token from web %v", err)
|
||||
}
|
||||
return tok
|
||||
}
|
||||
|
||||
// Retrieves a token from a local file.
|
||||
func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
|
||||
f, err := os.Open(file)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tok := &oauth2.Token{}
|
||||
err = json.NewDecoder(f).Decode(tok)
|
||||
return tok, err
|
||||
}
|
||||
|
||||
// Saves a token to a file path.
|
||||
func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) {
|
||||
logger.Printf("Saving credential file to: %s\n", path)
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
logger.Fatalf("Unable to cache oauth token: %v", err)
|
||||
}
|
||||
|
||||
json.NewEncoder(f).Encode(token)
|
||||
}
|
||||
|
||||
type StorjStorage struct {
|
||||
Storage
|
||||
project *uplink.Project
|
||||
bucket *uplink.Bucket
|
||||
purgeDays time.Duration
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func NewStorjStorage(access, bucket string, purgeDays int, logger *log.Logger) (*StorjStorage, error) {
|
||||
var instance StorjStorage
|
||||
var err error
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
parsedAccess, err := uplink.ParseAccess(access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance.project, err = uplink.OpenProject(ctx, parsedAccess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance.bucket, err = instance.project.EnsureBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
//Ignoring the error to return the one that occurred first, but try to clean up.
|
||||
_ = instance.project.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance.purgeDays = time.Duration(purgeDays*24) * time.Hour
|
||||
|
||||
instance.logger = logger
|
||||
|
||||
return &instance, nil
|
||||
}
|
||||
|
||||
func (s *StorjStorage) Type() string {
|
||||
return "storj"
|
||||
}
|
||||
|
||||
func (s *StorjStorage) Head(token string, filename string) (contentLength uint64, err error) {
|
||||
key := storj.JoinPaths(token, filename)
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
obj, err := s.project.StatObject(ctx, s.bucket.Name, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
contentLength = uint64(obj.System.ContentLength)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *StorjStorage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
|
||||
key := storj.JoinPaths(token, filename)
|
||||
|
||||
s.logger.Printf("Getting file %s from Storj Bucket", filename)
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
download, err := s.project.DownloadObject(ctx, s.bucket.Name, key, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
contentLength = uint64(download.Info().System.ContentLength)
|
||||
|
||||
reader = download
|
||||
return
|
||||
}
|
||||
|
||||
func (s *StorjStorage) Delete(token string, filename string) (err error) {
|
||||
key := storj.JoinPaths(token, filename)
|
||||
|
||||
s.logger.Printf("Deleting file %s from Storj Bucket", filename)
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
_, err = s.project.DeleteObject(ctx, s.bucket.Name, key)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *StorjStorage) Purge(days time.Duration) (err error) {
|
||||
// NOOP expiration is set at upload time
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StorjStorage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
|
||||
key := storj.JoinPaths(token, filename)
|
||||
|
||||
s.logger.Printf("Uploading file %s to Storj Bucket", filename)
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
var uploadOptions *uplink.UploadOptions
|
||||
if s.purgeDays.Hours() > 0 {
|
||||
uploadOptions = &uplink.UploadOptions{Expires: time.Now().Add(s.purgeDays)}
|
||||
}
|
||||
|
||||
writer, err := s.project.UploadObject(ctx, s.bucket.Name, key, uploadOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := io.Copy(writer, reader)
|
||||
if err != nil || uint64(n) != contentLength {
|
||||
//Ignoring the error to return the one that occurred first, but try to clean up.
|
||||
_ = writer.Abort()
|
||||
return err
|
||||
}
|
||||
err = writer.SetCustomMetadata(ctx, uplink.CustomMetadata{"content-type": contentType})
|
||||
if err != nil {
|
||||
//Ignoring the error to return the one that occurred first, but try to clean up.
|
||||
_ = writer.Abort()
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Commit()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *StorjStorage) IsNotExist(err error) bool {
|
||||
return errors.Is(err, uplink.ErrObjectNotFound)
|
||||
}
|
||||
120
server/storage/common.go
Normal file
120
server/storage/common.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type Range struct {
|
||||
Start uint64
|
||||
Limit uint64
|
||||
contentRange string
|
||||
}
|
||||
|
||||
// Range Reconstructs Range header and returns it
|
||||
func (r *Range) Range() string {
|
||||
if r.Limit > 0 {
|
||||
return fmt.Sprintf("bytes=%d-%d", r.Start, r.Start+r.Limit-1)
|
||||
} else {
|
||||
return fmt.Sprintf("bytes=%d-", r.Start)
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptLength Tries to accept given range
|
||||
// returns newContentLength if range was satisfied, otherwise returns given contentLength
|
||||
func (r *Range) AcceptLength(contentLength uint64) (newContentLength uint64) {
|
||||
newContentLength = contentLength
|
||||
if r.Limit == 0 {
|
||||
r.Limit = newContentLength - r.Start
|
||||
}
|
||||
if contentLength < r.Start {
|
||||
return
|
||||
}
|
||||
if r.Limit > contentLength-r.Start {
|
||||
return
|
||||
}
|
||||
r.contentRange = fmt.Sprintf("bytes %d-%d/%d", r.Start, r.Start+r.Limit-1, contentLength)
|
||||
newContentLength = r.Limit
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Range) SetContentRange(cr string) {
|
||||
r.contentRange = cr
|
||||
}
|
||||
|
||||
// Returns accepted Content-Range header. If range wasn't accepted empty string is returned
|
||||
func (r *Range) ContentRange() string {
|
||||
return r.contentRange
|
||||
}
|
||||
|
||||
var rexp *regexp.Regexp = regexp.MustCompile(`^bytes=([0-9]+)-([0-9]*)$`)
|
||||
|
||||
// Parses HTTP Range header and returns struct on success
|
||||
// only bytes=start-finish supported
|
||||
func ParseRange(rng string) *Range {
|
||||
if rng == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
matches := rexp.FindAllStringSubmatch(rng, -1)
|
||||
if len(matches) != 1 || len(matches[0]) != 3 {
|
||||
return nil
|
||||
}
|
||||
if len(matches[0][0]) != len(rng) || len(matches[0][1]) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
start, err := strconv.ParseUint(matches[0][1], 10, 64)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(matches[0][2]) == 0 {
|
||||
return &Range{Start: start, Limit: 0}
|
||||
}
|
||||
|
||||
finish, err := strconv.ParseUint(matches[0][2], 10, 64)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if finish < start || finish+1 < finish {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &Range{Start: start, Limit: finish - start + 1}
|
||||
}
|
||||
|
||||
// Storage is the interface for storage operation
|
||||
type Storage interface {
|
||||
// Get retrieves a file from storage
|
||||
Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error)
|
||||
// Head retrieves content length of a file from storage
|
||||
Head(ctx context.Context, token string, filename string) (contentLength uint64, err error)
|
||||
// Put saves a file on storage
|
||||
Put(ctx context.Context, token string, filename string, reader io.Reader, contentType string, contentLength uint64) error
|
||||
// Delete removes a file from storage
|
||||
Delete(ctx context.Context, token string, filename string) error
|
||||
// IsNotExist indicates if a file doesn't exist on storage
|
||||
IsNotExist(err error) bool
|
||||
// Purge cleans up the storage
|
||||
Purge(ctx context.Context, days time.Duration) error
|
||||
// Whether storage supports Get with Range header
|
||||
IsRangeSupported() bool
|
||||
// Type returns the storage type
|
||||
Type() string
|
||||
}
|
||||
|
||||
func CloseCheck(c io.Closer) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.Close(); err != nil {
|
||||
fmt.Println("Received close error:", err)
|
||||
}
|
||||
}
|
||||
394
server/storage/gdrive.go
Normal file
394
server/storage/gdrive.go
Normal file
@@ -0,0 +1,394 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// GDrive is a storage backed by GDrive
|
||||
type GDrive struct {
|
||||
service *drive.Service
|
||||
rootID string
|
||||
basedir string
|
||||
localConfigPath string
|
||||
chunkSize int
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
const gDriveRootConfigFile = "root_id.conf"
|
||||
const gDriveTokenJSONFile = "token.json"
|
||||
const gDriveDirectoryMimeType = "application/vnd.google-apps.folder"
|
||||
|
||||
// NewGDriveStorage is the factory for GDrive
|
||||
func NewGDriveStorage(clientJSONFilepath string, localConfigPath string, basedir string, chunkSize int, logger *log.Logger) (*GDrive, error) {
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
b, err := ioutil.ReadFile(clientJSONFilepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If modifying these scopes, delete your previously saved client_secret.json.
|
||||
config, err := google.ConfigFromJSON(b, drive.DriveScope, drive.DriveMetadataScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
httpClient := getGDriveClient(ctx, config, localConfigPath, logger)
|
||||
|
||||
srv, err := drive.NewService(ctx, option.WithHTTPClient(httpClient))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storage := &GDrive{service: srv, basedir: basedir, rootID: "", localConfigPath: localConfigPath, chunkSize: chunkSize, logger: logger}
|
||||
err = storage.setupRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
func (s *GDrive) setupRoot() error {
|
||||
rootFileConfig := filepath.Join(s.localConfigPath, gDriveRootConfigFile)
|
||||
|
||||
rootID, err := ioutil.ReadFile(rootFileConfig)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(rootID) != "" {
|
||||
s.rootID = string(rootID)
|
||||
return nil
|
||||
}
|
||||
|
||||
dir := &drive.File{
|
||||
Name: s.basedir,
|
||||
MimeType: gDriveDirectoryMimeType,
|
||||
}
|
||||
|
||||
di, err := s.service.Files.Create(dir).Fields("id").Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.rootID = di.Id
|
||||
err = ioutil.WriteFile(rootFileConfig, []byte(s.rootID), os.FileMode(0600))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GDrive) hasChecksum(f *drive.File) bool {
|
||||
return f.Md5Checksum != ""
|
||||
}
|
||||
|
||||
func (s *GDrive) list(nextPageToken string, q string) (*drive.FileList, error) {
|
||||
return s.service.Files.List().Fields("nextPageToken, files(id, name, mimeType)").Q(q).PageToken(nextPageToken).Do()
|
||||
}
|
||||
|
||||
func (s *GDrive) findID(filename string, token string) (string, error) {
|
||||
filename = strings.Replace(filename, `'`, `\'`, -1)
|
||||
filename = strings.Replace(filename, `"`, `\"`, -1)
|
||||
|
||||
fileID, tokenID, nextPageToken := "", "", ""
|
||||
|
||||
q := fmt.Sprintf("'%s' in parents and name='%s' and mimeType='%s' and trashed=false", s.rootID, token, gDriveDirectoryMimeType)
|
||||
l, err := s.list(nextPageToken, q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for 0 < len(l.Files) {
|
||||
for _, fi := range l.Files {
|
||||
tokenID = fi.Id
|
||||
break
|
||||
}
|
||||
|
||||
if l.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
|
||||
l, err = s.list(l.NextPageToken, q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if filename == "" {
|
||||
return tokenID, nil
|
||||
} else if tokenID == "" {
|
||||
return "", fmt.Errorf("cannot find file %s/%s", token, filename)
|
||||
}
|
||||
|
||||
q = fmt.Sprintf("'%s' in parents and name='%s' and mimeType!='%s' and trashed=false", tokenID, filename, gDriveDirectoryMimeType)
|
||||
l, err = s.list(nextPageToken, q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for 0 < len(l.Files) {
|
||||
for _, fi := range l.Files {
|
||||
|
||||
fileID = fi.Id
|
||||
break
|
||||
}
|
||||
|
||||
if l.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
|
||||
l, err = s.list(l.NextPageToken, q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if fileID == "" {
|
||||
return "", fmt.Errorf("cannot find file %s/%s", token, filename)
|
||||
}
|
||||
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
// Type returns the storage type
|
||||
func (s *GDrive) Type() string {
|
||||
return "gdrive"
|
||||
}
|
||||
|
||||
// Head retrieves content length of a file from storage
|
||||
func (s *GDrive) Head(ctx context.Context, token string, filename string) (contentLength uint64, err error) {
|
||||
var fileID string
|
||||
fileID, err = s.findID(filename, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi *drive.File
|
||||
if fi, err = s.service.Files.Get(fileID).Context(ctx).Fields("size").Do(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
contentLength = uint64(fi.Size)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves a file from storage
|
||||
func (s *GDrive) Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {
|
||||
var fileID string
|
||||
fileID, err = s.findID(filename, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var fi *drive.File
|
||||
fi, err = s.service.Files.Get(fileID).Fields("size", "md5Checksum").Do()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !s.hasChecksum(fi) {
|
||||
err = fmt.Errorf("cannot find file %s/%s", token, filename)
|
||||
return
|
||||
}
|
||||
|
||||
contentLength = uint64(fi.Size)
|
||||
|
||||
fileGetCall := s.service.Files.Get(fileID)
|
||||
if rng != nil {
|
||||
header := fileGetCall.Header()
|
||||
header.Set("Range", rng.Range())
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
res, err = fileGetCall.Context(ctx).Download()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if rng != nil {
|
||||
reader = res.Body
|
||||
rng.AcceptLength(contentLength)
|
||||
return
|
||||
}
|
||||
|
||||
reader = res.Body
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Delete removes a file from storage
|
||||
func (s *GDrive) Delete(ctx context.Context, token string, filename string) (err error) {
|
||||
metadata, _ := s.findID(fmt.Sprintf("%s.metadata", filename), token)
|
||||
_ = s.service.Files.Delete(metadata).Do()
|
||||
|
||||
var fileID string
|
||||
fileID, err = s.findID(filename, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = s.service.Files.Delete(fileID).Context(ctx).Do()
|
||||
return
|
||||
}
|
||||
|
||||
// Purge cleans up the storage
|
||||
func (s *GDrive) Purge(ctx context.Context, days time.Duration) (err error) {
|
||||
nextPageToken := ""
|
||||
|
||||
expirationDate := time.Now().Add(-1 * days).Format(time.RFC3339)
|
||||
q := fmt.Sprintf("'%s' in parents and modifiedTime < '%s' and mimeType!='%s' and trashed=false", s.rootID, expirationDate, gDriveDirectoryMimeType)
|
||||
l, err := s.list(nextPageToken, q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for 0 < len(l.Files) {
|
||||
for _, fi := range l.Files {
|
||||
err = s.service.Files.Delete(fi.Id).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if l.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
|
||||
l, err = s.list(l.NextPageToken, q)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// IsNotExist indicates if a file doesn't exist on storage
|
||||
func (s *GDrive) IsNotExist(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if e, ok := err.(*googleapi.Error); ok {
|
||||
return e.Code == http.StatusNotFound
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Put saves a file on storage
|
||||
func (s *GDrive) Put(ctx context.Context, token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
|
||||
dirID, err := s.findID("", token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dirID == "" {
|
||||
dir := &drive.File{
|
||||
Name: token,
|
||||
Parents: []string{s.rootID},
|
||||
MimeType: gDriveDirectoryMimeType,
|
||||
}
|
||||
|
||||
di, err := s.service.Files.Create(dir).Fields("id").Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dirID = di.Id
|
||||
}
|
||||
|
||||
// Instantiate empty drive file
|
||||
dst := &drive.File{
|
||||
Name: filename,
|
||||
Parents: []string{dirID},
|
||||
MimeType: contentType,
|
||||
}
|
||||
|
||||
_, err = s.service.Files.Create(dst).Context(ctx).Media(reader, googleapi.ChunkSize(s.chunkSize)).Do()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GDrive) IsRangeSupported() bool { return true }
|
||||
|
||||
// Retrieve a token, saves the token, then returns the generated client.
|
||||
func getGDriveClient(ctx context.Context, config *oauth2.Config, localConfigPath string, logger *log.Logger) *http.Client {
|
||||
tokenFile := filepath.Join(localConfigPath, gDriveTokenJSONFile)
|
||||
tok, err := gDriveTokenFromFile(tokenFile)
|
||||
if err != nil {
|
||||
tok = getGDriveTokenFromWeb(ctx, config, logger)
|
||||
saveGDriveToken(tokenFile, tok, logger)
|
||||
}
|
||||
|
||||
return config.Client(ctx, tok)
|
||||
}
|
||||
|
||||
// Request a token from the web, then returns the retrieved token.
|
||||
func getGDriveTokenFromWeb(ctx context.Context, config *oauth2.Config, logger *log.Logger) *oauth2.Token {
|
||||
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline)
|
||||
fmt.Printf("Go to the following link in your browser then type the "+
|
||||
"authorization code: \n%v\n", authURL)
|
||||
|
||||
var authCode string
|
||||
if _, err := fmt.Scan(&authCode); err != nil {
|
||||
logger.Fatalf("Unable to read authorization code %v", err)
|
||||
}
|
||||
|
||||
tok, err := config.Exchange(ctx, authCode)
|
||||
if err != nil {
|
||||
logger.Fatalf("Unable to retrieve token from web %v", err)
|
||||
}
|
||||
return tok
|
||||
}
|
||||
|
||||
// Retrieves a token from a local file.
|
||||
func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
|
||||
f, err := os.Open(file)
|
||||
defer CloseCheck(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tok := &oauth2.Token{}
|
||||
err = json.NewDecoder(f).Decode(tok)
|
||||
return tok, err
|
||||
}
|
||||
|
||||
// Saves a token to a file path.
|
||||
func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) {
|
||||
logger.Printf("Saving credential file to: %s\n", path)
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
defer CloseCheck(f)
|
||||
if err != nil {
|
||||
logger.Fatalf("Unable to cache oauth token: %v", err)
|
||||
}
|
||||
|
||||
err = json.NewEncoder(f).Encode(token)
|
||||
if err != nil {
|
||||
logger.Fatalf("Unable to encode oauth token: %v", err)
|
||||
}
|
||||
}
|
||||
138
server/storage/local.go
Normal file
138
server/storage/local.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LocalStorage is a local storage
|
||||
type LocalStorage struct {
|
||||
Storage
|
||||
basedir string
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewLocalStorage is the factory for LocalStorage
|
||||
func NewLocalStorage(basedir string, logger *log.Logger) (*LocalStorage, error) {
|
||||
return &LocalStorage{basedir: basedir, logger: logger}, nil
|
||||
}
|
||||
|
||||
// Type returns the storage type
|
||||
func (s *LocalStorage) Type() string {
|
||||
return "local"
|
||||
}
|
||||
|
||||
// Head retrieves content length of a file from storage
|
||||
func (s *LocalStorage) Head(_ context.Context, token string, filename string) (contentLength uint64, err error) {
|
||||
path := filepath.Join(s.basedir, token, filename)
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = os.Lstat(path); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
contentLength = uint64(fi.Size())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves a file from storage
|
||||
func (s *LocalStorage) Get(_ context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {
|
||||
path := filepath.Join(s.basedir, token, filename)
|
||||
|
||||
var file *os.File
|
||||
|
||||
// content type , content length
|
||||
if file, err = os.Open(path); err != nil {
|
||||
return
|
||||
}
|
||||
reader = file
|
||||
|
||||
var fi os.FileInfo
|
||||
if fi, err = os.Lstat(path); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
contentLength = uint64(fi.Size())
|
||||
if rng != nil {
|
||||
contentLength = rng.AcceptLength(contentLength)
|
||||
if _, err = file.Seek(int64(rng.Start), 0); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Delete removes a file from storage
|
||||
func (s *LocalStorage) Delete(_ context.Context, token string, filename string) (err error) {
|
||||
metadata := filepath.Join(s.basedir, token, fmt.Sprintf("%s.metadata", filename))
|
||||
_ = os.Remove(metadata)
|
||||
|
||||
path := filepath.Join(s.basedir, token, filename)
|
||||
err = os.Remove(path)
|
||||
return
|
||||
}
|
||||
|
||||
// Purge cleans up the storage
|
||||
func (s *LocalStorage) Purge(_ context.Context, days time.Duration) (err error) {
|
||||
err = filepath.Walk(s.basedir,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.ModTime().Before(time.Now().Add(-1 * days)) {
|
||||
err = os.Remove(path)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// IsNotExist indicates if a file doesn't exist on storage
|
||||
func (s *LocalStorage) IsNotExist(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return os.IsNotExist(err)
|
||||
}
|
||||
|
||||
// Put saves a file on storage
|
||||
func (s *LocalStorage) Put(_ context.Context, token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
|
||||
var f io.WriteCloser
|
||||
var err error
|
||||
|
||||
path := filepath.Join(s.basedir, token)
|
||||
|
||||
if err = os.MkdirAll(path, 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err = os.OpenFile(filepath.Join(path, filename), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
defer CloseCheck(f)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(f, reader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LocalStorage) IsRangeSupported() bool { return true }
|
||||
188
server/storage/s3.go
Normal file
188
server/storage/s3.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
)
|
||||
|
||||
// S3Storage is a storage backed by AWS S3
|
||||
type S3Storage struct {
|
||||
Storage
|
||||
bucket string
|
||||
session *session.Session
|
||||
s3 *s3.S3
|
||||
logger *log.Logger
|
||||
purgeDays time.Duration
|
||||
noMultipart bool
|
||||
}
|
||||
|
||||
// NewS3Storage is the factory for S3Storage
|
||||
func NewS3Storage(accessKey, secretKey, bucketName string, purgeDays int, region, endpoint string, disableMultipart bool, forcePathStyle bool, logger *log.Logger) (*S3Storage, error) {
|
||||
sess := getAwsSession(accessKey, secretKey, region, endpoint, forcePathStyle)
|
||||
|
||||
return &S3Storage{
|
||||
bucket: bucketName,
|
||||
s3: s3.New(sess),
|
||||
session: sess,
|
||||
logger: logger,
|
||||
noMultipart: disableMultipart,
|
||||
purgeDays: time.Duration(purgeDays*24) * time.Hour,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Type returns the storage type
|
||||
func (s *S3Storage) Type() string {
|
||||
return "s3"
|
||||
}
|
||||
|
||||
// Head retrieves content length of a file from storage
|
||||
func (s *S3Storage) Head(ctx context.Context, token string, filename string) (contentLength uint64, err error) {
|
||||
key := fmt.Sprintf("%s/%s", token, filename)
|
||||
|
||||
headRequest := &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
}
|
||||
|
||||
// content type , content length
|
||||
response, err := s.s3.HeadObjectWithContext(ctx, headRequest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if response.ContentLength != nil {
|
||||
contentLength = uint64(*response.ContentLength)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Purge cleans up the storage
|
||||
func (s *S3Storage) Purge(context.Context, time.Duration) (err error) {
|
||||
// NOOP expiration is set at upload time
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsNotExist indicates if a file doesn't exist on storage
|
||||
func (s *S3Storage) IsNotExist(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
switch aerr.Code() {
|
||||
case s3.ErrCodeNoSuchKey:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Get retrieves a file from storage
|
||||
func (s *S3Storage) Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {
|
||||
key := fmt.Sprintf("%s/%s", token, filename)
|
||||
|
||||
getRequest := &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
}
|
||||
|
||||
if rng != nil {
|
||||
getRequest.Range = aws.String(rng.Range())
|
||||
}
|
||||
|
||||
response, err := s.s3.GetObjectWithContext(ctx, getRequest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if response.ContentLength != nil {
|
||||
contentLength = uint64(*response.ContentLength)
|
||||
}
|
||||
if rng != nil && response.ContentRange != nil {
|
||||
rng.SetContentRange(*response.ContentRange)
|
||||
}
|
||||
|
||||
reader = response.Body
|
||||
return
|
||||
}
|
||||
|
||||
// Delete removes a file from storage
|
||||
func (s *S3Storage) Delete(ctx context.Context, token string, filename string) (err error) {
|
||||
metadata := fmt.Sprintf("%s/%s.metadata", token, filename)
|
||||
deleteRequest := &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(metadata),
|
||||
}
|
||||
|
||||
_, err = s.s3.DeleteObjectWithContext(ctx, deleteRequest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s/%s", token, filename)
|
||||
deleteRequest = &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
}
|
||||
|
||||
_, err = s.s3.DeleteObjectWithContext(ctx, deleteRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Put saves a file on storage
|
||||
func (s *S3Storage) Put(ctx context.Context, token string, filename string, reader io.Reader, contentType string, _ uint64) (err error) {
|
||||
key := fmt.Sprintf("%s/%s", token, filename)
|
||||
|
||||
s.logger.Printf("Uploading file %s to S3 Bucket", filename)
|
||||
var concurrency int
|
||||
if !s.noMultipart {
|
||||
concurrency = 20
|
||||
} else {
|
||||
concurrency = 1
|
||||
}
|
||||
|
||||
// Create an uploader with the session and custom options
|
||||
uploader := s3manager.NewUploader(s.session, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = concurrency // default is 5
|
||||
u.LeavePartsOnError = false
|
||||
})
|
||||
|
||||
var expire *time.Time
|
||||
if s.purgeDays.Hours() > 0 {
|
||||
expire = aws.Time(time.Now().Add(s.purgeDays))
|
||||
}
|
||||
|
||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
Body: reader,
|
||||
Expires: expire,
|
||||
ContentType: aws.String(contentType),
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *S3Storage) IsRangeSupported() bool { return true }
|
||||
|
||||
func getAwsSession(accessKey, secretKey, region, endpoint string, forcePathStyle bool) *session.Session {
|
||||
return session.Must(session.NewSession(&aws.Config{
|
||||
Region: aws.String(region),
|
||||
Endpoint: aws.String(endpoint),
|
||||
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
||||
S3ForcePathStyle: aws.Bool(forcePathStyle),
|
||||
}))
|
||||
}
|
||||
163
server/storage/storj.go
Normal file
163
server/storage/storj.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"storj.io/common/fpath"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// StorjStorage is a storage backed by Storj
|
||||
type StorjStorage struct {
|
||||
Storage
|
||||
project *uplink.Project
|
||||
bucket *uplink.Bucket
|
||||
purgeDays time.Duration
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewStorjStorage is the factory for StorjStorage
|
||||
func NewStorjStorage(access, bucket string, purgeDays int, logger *log.Logger) (*StorjStorage, error) {
|
||||
var instance StorjStorage
|
||||
var err error
|
||||
|
||||
pCtx := context.TODO()
|
||||
|
||||
ctx := fpath.WithTempData(pCtx, "", true)
|
||||
|
||||
uplConf := &uplink.Config{
|
||||
UserAgent: "transfer-sh",
|
||||
}
|
||||
|
||||
parsedAccess, err := uplink.ParseAccess(access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance.project, err = uplConf.OpenProject(ctx, parsedAccess)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance.bucket, err = instance.project.EnsureBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
//Ignoring the error to return the one that occurred first, but try to clean up.
|
||||
_ = instance.project.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instance.purgeDays = time.Duration(purgeDays*24) * time.Hour
|
||||
|
||||
instance.logger = logger
|
||||
|
||||
return &instance, nil
|
||||
}
|
||||
|
||||
// Type returns the storage type
|
||||
func (s *StorjStorage) Type() string {
|
||||
return "storj"
|
||||
}
|
||||
|
||||
// Head retrieves content length of a file from storage
|
||||
func (s *StorjStorage) Head(ctx context.Context, token string, filename string) (contentLength uint64, err error) {
|
||||
key := storj.JoinPaths(token, filename)
|
||||
|
||||
obj, err := s.project.StatObject(fpath.WithTempData(ctx, "", true), s.bucket.Name, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
contentLength = uint64(obj.System.ContentLength)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get retrieves a file from storage
|
||||
func (s *StorjStorage) Get(ctx context.Context, token string, filename string, rng *Range) (reader io.ReadCloser, contentLength uint64, err error) {
|
||||
key := storj.JoinPaths(token, filename)
|
||||
|
||||
s.logger.Printf("Getting file %s from Storj Bucket", filename)
|
||||
|
||||
options := uplink.DownloadOptions{}
|
||||
if rng != nil {
|
||||
options.Offset = int64(rng.Start)
|
||||
if rng.Limit > 0 {
|
||||
options.Length = int64(rng.Limit)
|
||||
}
|
||||
}
|
||||
|
||||
download, err := s.project.DownloadObject(fpath.WithTempData(ctx, "", true), s.bucket.Name, key, &options)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
contentLength = uint64(download.Info().System.ContentLength)
|
||||
if rng != nil {
|
||||
contentLength = rng.AcceptLength(contentLength)
|
||||
}
|
||||
|
||||
reader = download
|
||||
return
|
||||
}
|
||||
|
||||
// Delete removes a file from storage
|
||||
func (s *StorjStorage) Delete(ctx context.Context, token string, filename string) (err error) {
|
||||
key := storj.JoinPaths(token, filename)
|
||||
|
||||
s.logger.Printf("Deleting file %s from Storj Bucket", filename)
|
||||
|
||||
_, err = s.project.DeleteObject(fpath.WithTempData(ctx, "", true), s.bucket.Name, key)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Purge cleans up the storage
|
||||
func (s *StorjStorage) Purge(context.Context, time.Duration) (err error) {
|
||||
// NOOP expiration is set at upload time
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put saves a file on storage
|
||||
func (s *StorjStorage) Put(ctx context.Context, token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
|
||||
key := storj.JoinPaths(token, filename)
|
||||
|
||||
s.logger.Printf("Uploading file %s to Storj Bucket", filename)
|
||||
|
||||
var uploadOptions *uplink.UploadOptions
|
||||
if s.purgeDays.Hours() > 0 {
|
||||
uploadOptions = &uplink.UploadOptions{Expires: time.Now().Add(s.purgeDays)}
|
||||
}
|
||||
|
||||
writer, err := s.project.UploadObject(fpath.WithTempData(ctx, "", true), s.bucket.Name, key, uploadOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := io.Copy(writer, reader)
|
||||
if err != nil || uint64(n) != contentLength {
|
||||
//Ignoring the error to return the one that occurred first, but try to clean up.
|
||||
_ = writer.Abort()
|
||||
return err
|
||||
}
|
||||
err = writer.SetCustomMetadata(ctx, uplink.CustomMetadata{"content-type": contentType})
|
||||
if err != nil {
|
||||
//Ignoring the error to return the one that occurred first, but try to clean up.
|
||||
_ = writer.Abort()
|
||||
return err
|
||||
}
|
||||
|
||||
err = writer.Commit()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *StorjStorage) IsRangeSupported() bool { return true }
|
||||
|
||||
// IsNotExist indicates if a file doesn't exist on storage
|
||||
func (s *StorjStorage) IsNotExist(err error) bool {
|
||||
return errors.Is(err, uplink.ErrObjectNotFound)
|
||||
}
|
||||
@@ -29,12 +29,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// characters used for short-urls
|
||||
// SYMBOLS characters used for short-urls
|
||||
SYMBOLS = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
)
|
||||
|
||||
// generate a token
|
||||
func Token(length int) string {
|
||||
func token(length int) string {
|
||||
result := ""
|
||||
for i := 0; i < length; i++ {
|
||||
x := rand.Intn(len(SYMBOLS) - 1)
|
||||
|
||||
@@ -4,12 +4,12 @@ import "testing"
|
||||
|
||||
func BenchmarkTokenConcat(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = Token(5) + Token(5)
|
||||
_ = token(5) + token(5)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTokenLonger(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = Token(10)
|
||||
_ = token(10)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,27 +30,14 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/mail"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/golang/gddo/httputil/header"
|
||||
)
|
||||
|
||||
func getAwsSession(accessKey, secretKey, region, endpoint string, forcePathStyle bool) *session.Session {
|
||||
return session.Must(session.NewSession(&aws.Config{
|
||||
Region: aws.String(region),
|
||||
Endpoint: aws.String(endpoint),
|
||||
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
||||
S3ForcePathStyle: aws.Bool(forcePathStyle),
|
||||
}))
|
||||
}
|
||||
|
||||
func formatNumber(format string, s uint64) string {
|
||||
return RenderFloat(format, float64(s))
|
||||
return renderFloat(format, float64(s))
|
||||
}
|
||||
|
||||
var renderFloatPrecisionMultipliers = [10]float64{
|
||||
@@ -79,7 +66,7 @@ var renderFloatPrecisionRounders = [10]float64{
|
||||
0.0000000005,
|
||||
}
|
||||
|
||||
func RenderFloat(format string, n float64) string {
|
||||
func renderFloat(format string, n float64) string {
|
||||
// Special cases:
|
||||
// NaN = "NaN"
|
||||
// +Inf = "+Infinity"
|
||||
@@ -127,7 +114,7 @@ func RenderFloat(format string, n float64) string {
|
||||
// +0000
|
||||
if formatDirectiveIndices[0] == 0 {
|
||||
if formatDirectiveChars[formatDirectiveIndices[0]] != '+' {
|
||||
panic("RenderFloat(): invalid positive sign directive")
|
||||
panic("renderFloat(): invalid positive sign directive")
|
||||
}
|
||||
positiveStr = "+"
|
||||
formatDirectiveIndices = formatDirectiveIndices[1:]
|
||||
@@ -141,7 +128,7 @@ func RenderFloat(format string, n float64) string {
|
||||
// 000,000.00
|
||||
if len(formatDirectiveIndices) == 2 {
|
||||
if (formatDirectiveIndices[1] - formatDirectiveIndices[0]) != 4 {
|
||||
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
|
||||
panic("renderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
|
||||
}
|
||||
thousandStr = string(formatDirectiveChars[formatDirectiveIndices[0]])
|
||||
formatDirectiveIndices = formatDirectiveIndices[1:]
|
||||
@@ -201,10 +188,6 @@ func RenderFloat(format string, n float64) string {
|
||||
return signStr + intStr + decimalStr + fracStr
|
||||
}
|
||||
|
||||
func RenderInteger(format string, n int) string {
|
||||
return RenderFloat(format, float64(n))
|
||||
}
|
||||
|
||||
// Request.RemoteAddress contains port, which we want to remove i.e.:
|
||||
// "[::1]:58292" => "[::1]"
|
||||
func ipAddrFromRemoteAddr(s string) string {
|
||||
@@ -215,45 +198,16 @@ func ipAddrFromRemoteAddr(s string) string {
|
||||
return s[:idx]
|
||||
}
|
||||
|
||||
func getIPAddress(r *http.Request) string {
|
||||
hdr := r.Header
|
||||
hdrRealIP := hdr.Get("X-Real-Ip")
|
||||
hdrForwardedFor := hdr.Get("X-Forwarded-For")
|
||||
if hdrRealIP == "" && hdrForwardedFor == "" {
|
||||
return ipAddrFromRemoteAddr(r.RemoteAddr)
|
||||
}
|
||||
if hdrForwardedFor != "" {
|
||||
// X-Forwarded-For is potentially a list of addresses separated with ","
|
||||
parts := strings.Split(hdrForwardedFor, ",")
|
||||
for i, p := range parts {
|
||||
parts[i] = strings.TrimSpace(p)
|
||||
}
|
||||
|
||||
// TODO: should return first non-local address
|
||||
return parts[0]
|
||||
}
|
||||
return hdrRealIP
|
||||
}
|
||||
|
||||
func encodeRFC2047(s string) string {
|
||||
// use mail's rfc2047 to encode any string
|
||||
addr := mail.Address{
|
||||
Name: s,
|
||||
Address: "",
|
||||
}
|
||||
return strings.Trim(addr.String(), " <>")
|
||||
}
|
||||
|
||||
func acceptsHTML(hdr http.Header) bool {
|
||||
actual := header.ParseAccept(hdr, "Accept")
|
||||
|
||||
for _, s := range actual {
|
||||
if s.Value == "text/html" {
|
||||
return (true)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return (false)
|
||||
return false
|
||||
}
|
||||
|
||||
func formatSize(size int64) string {
|
||||
|
||||
@@ -26,13 +26,11 @@ package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
_ "github.com/PuerkitoBio/ghost/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
virustotal "github.com/dutchcoders/go-virustotal"
|
||||
"github.com/dutchcoders/go-virustotal"
|
||||
)
|
||||
|
||||
func (s *Server) virusTotalHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -47,18 +45,16 @@ func (s *Server) virusTotalHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
vt, err := virustotal.NewVirusTotal(s.VirusTotalKey)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
|
||||
reader = r.Body
|
||||
reader := r.Body
|
||||
|
||||
result, err := vt.Scan(filename, reader)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
s.logger.Println(result)
|
||||
w.Write([]byte(fmt.Sprintf("%v\n", result.Permalink)))
|
||||
_, _ = w.Write([]byte(fmt.Sprintf("%v\n", result.Permalink)))
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user