mirror of
https://github.com/dutchcoders/transfer.sh.git
synced 2026-02-08 08:19:54 +00:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3bad0912c4 | ||
|
|
7b00a41d49 | ||
|
|
92055f1b3c | ||
|
|
9430e53689 | ||
|
|
a26b32dd86 | ||
|
|
0a6b5817a9 | ||
|
|
5e7e3a1b39 | ||
|
|
42adceb4c6 | ||
|
|
f909ad3ce2 | ||
|
|
d830bf1afc | ||
|
|
f366e8217e | ||
|
|
8a5c737140 | ||
|
|
b920eb842a | ||
|
|
45e0967a37 | ||
|
|
28614c991d | ||
|
|
ef28bcb28f | ||
|
|
2dd23bff3c | ||
|
|
663c59e754 | ||
|
|
9297c253aa |
@@ -10,9 +10,11 @@ ADD . /go/src/github.com/dutchcoders/transfer.sh
|
||||
WORKDIR /go/src/github.com/dutchcoders/transfer.sh
|
||||
|
||||
ENV GO111MODULE=on
|
||||
ENV GOOS=${GOOS}
|
||||
ENV GOARCH=${GOARCH}
|
||||
|
||||
# build & install server
|
||||
RUN go get -u ./... && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags -a -tags netgo -ldflags '-w -extldflags "-static"' -o /go/bin/transfersh github.com/dutchcoders/transfer.sh
|
||||
RUN go get -u ./... && CGO_ENABLED=0 go build -ldflags -a -tags netgo -ldflags '-w -extldflags "-static"' -o /go/bin/transfersh github.com/dutchcoders/transfer.sh
|
||||
|
||||
FROM scratch AS final
|
||||
LABEL maintainer="Andrea Spacca <andrea.spacca@gmail.com>"
|
||||
|
||||
66
README.md
66
README.md
@@ -76,40 +76,44 @@ https://transfer.sh/1lDau/test.txt --> https://transfer.sh/inline/1lDau/test.txt
|
||||
|
||||
Parameter | Description | Value | Env
|
||||
--- | --- | --- | ---
|
||||
listener | port to use for http (:80) | |
|
||||
profile-listener | port to use for profiler (:6060)| |
|
||||
force-https | redirect to https | false |
|
||||
tls-listener | port to use for https (:443) | |
|
||||
tls-listener-only | flag to enable tls listener only | |
|
||||
tls-cert-file | path to tls certificate | |
|
||||
tls-private-key | path to tls private key | |
|
||||
http-auth-user | user for basic http auth on upload | |
|
||||
http-auth-pass | pass for basic http auth on upload | |
|
||||
ip-whitelist | comma separated list of ips allowed to connect to the service | |
|
||||
ip-blacklist | comma separated list of ips not allowed to connect to the service | |
|
||||
temp-path | path to temp folder | system temp |
|
||||
web-path | path to static web files (for development or custom front end) | |
|
||||
proxy-path | path prefix when service is run behind a proxy | |
|
||||
ga-key | google analytics key for the front end | |
|
||||
uservoice-key | user voice key for the front end | |
|
||||
provider | which storage provider to use | (s3, gdrive or local) |
|
||||
aws-access-key | aws access key | | AWS_ACCESS_KEY
|
||||
aws-secret-key | aws access key | | AWS_SECRET_KEY
|
||||
bucket | aws bucket | | BUCKET
|
||||
s3-endpoint | Custom S3 endpoint. | |
|
||||
s3-region | region of the s3 bucket | eu-west-1 | S3_REGION
|
||||
s3-no-multipart | disables s3 multipart upload | false | |
|
||||
s3-path-style | Forces path style URLs, required for Minio. | false | |
|
||||
basedir | path storage for local/gdrive provider| |
|
||||
gdrive-client-json-filepath | path to oauth client json config for gdrive provider| |
|
||||
gdrive-local-config-path | path to store local transfer.sh config cache for gdrive provider| |
|
||||
gdrive-chunk-size | chunk size for gdrive upload in megabytes, must be lower than available memory (8 MB) | |
|
||||
lets-encrypt-hosts | hosts to use for lets encrypt certificates (comma seperated) | |
|
||||
log | path to log file| |
|
||||
listener | port to use for http (:80) | | LISTENER |
|
||||
profile-listener | port to use for profiler (:6060) | | PROFILE_LISTENER |
|
||||
force-https | redirect to https | false | FORCE_HTTPS
|
||||
tls-listener | port to use for https (:443) | | TLS_LISTENER |
|
||||
tls-listener-only | flag to enable tls listener only | | TLS_LISTENER_ONLY |
|
||||
tls-cert-file | path to tls certificate | | TLS_CERT_FILE |
|
||||
tls-private-key | path to tls private key | | TLS_PRIVATE_KEY |
|
||||
http-auth-user | user for basic http auth on upload | | HTTP_AUTH_USER |
|
||||
http-auth-pass | pass for basic http auth on upload | | HTTP_AUTH_PASS |
|
||||
ip-whitelist | comma separated list of ips allowed to connect to the service | | IP_WHITELIST |
|
||||
ip-blacklist | comma separated list of ips not allowed to connect to the service | | IP_BLACKLIST |
|
||||
temp-path | path to temp folder | system temp | TEMP_PATH |
|
||||
web-path | path to static web files (for development or custom front end) | | WEB_PATH |
|
||||
proxy-path | path prefix when service is run behind a proxy | | PROXY_PATH |
|
||||
proxy-port | port of the proxy when the service is run behind a proxy | | PROXY_PORT |
|
||||
ga-key | google analytics key for the front end | | GA_KEY |
|
||||
uservoice-key | user voice key for the front end | | USERVOICE_KEY |
|
||||
provider | which storage provider to use | (s3, gdrive or local) | PROVIDER |
|
||||
aws-access-key | aws access key | | AWS_ACCESS_KEY |
|
||||
aws-secret-key | aws access key | | AWS_SECRET_KEY |
|
||||
bucket | aws bucket | | BUCKET |
|
||||
s3-endpoint | Custom S3 endpoint. | | S3_ENDPOINT |
|
||||
s3-region | region of the s3 bucket | eu-west-1 | S3_REGION |
|
||||
s3-no-multipart | disables s3 multipart upload | false | S3_NO_MULTIPART |
|
||||
s3-path-style | Forces path style URLs, required for Minio. | false | S3_PATH_STYLE |
|
||||
basedir | path storage for local/gdrive provider | | BASEDIR |
|
||||
gdrive-client-json-filepath | path to oauth client json config for gdrive provider | | GDRIVE_CLIENT_JSON_FILEPATH |
|
||||
gdrive-local-config-path | path to store local transfer.sh config cache for gdrive provider| | GDRIVE_LOCAL_CONFIG_PATH |
|
||||
gdrive-chunk-size | chunk size for gdrive upload in megabytes, must be lower than available memory (8 MB) | | GDRIVE_CHUNK_SIZE |
|
||||
lets-encrypt-hosts | hosts to use for lets encrypt certificates (comma seperated) | | HOSTS |
|
||||
log | path to log file| | LOG |
|
||||
cors-domains | comma separated list of domains for CORS, setting it enable CORS | | CORS_DOMAINS |
|
||||
clamav-host | host for clamav feature | | CLAMAV_HOST |
|
||||
rate-limit | request per minute | | RATE_LIMIT |
|
||||
|
||||
If you want to use TLS using lets encrypt certificates, set lets-encrypt-hosts to your domain, set tls-listener to :443 and enable force-https.
|
||||
|
||||
If you want to use TLS using your own certificates, set tls-listener to :443, force-https, tls-cert=file and tls-private-key.
|
||||
If you want to use TLS using your own certificates, set tls-listener to :443, force-https, tls-cert-file and tls-private-key.
|
||||
|
||||
## Development
|
||||
|
||||
|
||||
49
cmd/cmd.go
49
cmd/cmd.go
@@ -12,7 +12,7 @@ import (
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
var Version = "1.1.4"
|
||||
var Version = "1.1.7"
|
||||
var helpTemplate = `NAME:
|
||||
{{.Name}} - {{.Usage}}
|
||||
|
||||
@@ -37,6 +37,7 @@ var globalFlags = []cli.Flag{
|
||||
Name: "listener",
|
||||
Usage: "127.0.0.1:8080",
|
||||
Value: "127.0.0.1:8080",
|
||||
EnvVar: "LISTENER",
|
||||
},
|
||||
// redirect to https?
|
||||
// hostnames
|
||||
@@ -44,57 +45,75 @@ var globalFlags = []cli.Flag{
|
||||
Name: "profile-listener",
|
||||
Usage: "127.0.0.1:6060",
|
||||
Value: "",
|
||||
EnvVar: "PROFILE_LISTENER",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "force-https",
|
||||
Usage: "",
|
||||
EnvVar: "FORCE_HTTPS",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "tls-listener",
|
||||
Usage: "127.0.0.1:8443",
|
||||
Value: "",
|
||||
EnvVar: "TLS_LISTENER",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tls-listener-only",
|
||||
Usage: "",
|
||||
EnvVar: "TLS_LISTENER_ONLY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "tls-cert-file",
|
||||
Value: "",
|
||||
EnvVar: "TLS_CERT_FILE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "tls-private-key",
|
||||
Value: "",
|
||||
EnvVar: "TLS_PRIVATE_KEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "temp-path",
|
||||
Usage: "path to temp files",
|
||||
Value: os.TempDir(),
|
||||
EnvVar: "TEMP_PATH",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "web-path",
|
||||
Usage: "path to static web files",
|
||||
Value: "",
|
||||
EnvVar: "WEB_PATH",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "proxy-path",
|
||||
Usage: "path prefix when service is run behind a proxy",
|
||||
Value: "",
|
||||
EnvVar: "PROXY_PATH",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "proxy-port",
|
||||
Usage: "port of the proxy when the service is run behind a proxy",
|
||||
Value: "",
|
||||
EnvVar: "PROXY_PORT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "ga-key",
|
||||
Usage: "key for google analytics (front end)",
|
||||
Value: "",
|
||||
EnvVar: "GA_KEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "uservoice-key",
|
||||
Usage: "key for user voice (front end)",
|
||||
Value: "",
|
||||
EnvVar: "USERVOICE_KEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "provider",
|
||||
Usage: "s3|gdrive|local",
|
||||
Value: "",
|
||||
EnvVar: "PROVIDER",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "s3-endpoint",
|
||||
@@ -129,31 +148,36 @@ var globalFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "s3-no-multipart",
|
||||
Usage: "Disables S3 Multipart Puts",
|
||||
EnvVar: "S3_NO_MULTIPART",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "s3-path-style",
|
||||
Usage: "Forces path style URLs, required for Minio.",
|
||||
EnvVar: "S3_PATH_STYLE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "gdrive-client-json-filepath",
|
||||
Usage: "",
|
||||
Value: "",
|
||||
EnvVar: "GDRIVE_CLIENT_JSON_FILEPATH",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "gdrive-local-config-path",
|
||||
Usage: "",
|
||||
Value: "",
|
||||
EnvVar: "GDRIVE_LOCAL_CONFIG_PATH",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "gdrive-chunk-size",
|
||||
Usage: "",
|
||||
Value: googleapi.DefaultUploadChunkSize / 1024 / 1024,
|
||||
EnvVar: "GDRIVE_CHUNK_SIZE",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "rate-limit",
|
||||
Usage: "requests per minute",
|
||||
Value: 0,
|
||||
EnvVar: "",
|
||||
EnvVar: "RATE_LIMIT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "lets-encrypt-hosts",
|
||||
@@ -165,11 +189,13 @@ var globalFlags = []cli.Flag{
|
||||
Name: "log",
|
||||
Usage: "/var/log/transfersh.log",
|
||||
Value: "",
|
||||
EnvVar: "LOG",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "basedir",
|
||||
Usage: "path to storage",
|
||||
Value: "",
|
||||
EnvVar: "BASEDIR",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "clamav-host",
|
||||
@@ -186,26 +212,37 @@ var globalFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "profiler",
|
||||
Usage: "enable profiling",
|
||||
EnvVar: "PROFILER",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "http-auth-user",
|
||||
Usage: "user for http basic auth",
|
||||
Value: "",
|
||||
EnvVar: "HTTP_AUTH_USER",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "http-auth-pass",
|
||||
Usage: "pass for http basic auth",
|
||||
Value: "",
|
||||
EnvVar: "HTTP_AUTH_PASS",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "ip-whitelist",
|
||||
Usage: "comma separated list of ips allowed to connect to the service",
|
||||
Value: "",
|
||||
EnvVar: "IP_WHITELIST",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "ip-blacklist",
|
||||
Usage: "comma separated list of ips not allowed to connect to the service",
|
||||
Value: "",
|
||||
EnvVar: "IP_BLACKLIST",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cors-domains",
|
||||
Usage: "comma separated list of domains allowed for CORS requests",
|
||||
Value: "",
|
||||
EnvVar: "CORS_DOMAINS",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -245,6 +282,10 @@ func New() *Cmd {
|
||||
options = append(options, server.Listener(v))
|
||||
}
|
||||
|
||||
if v := c.String("cors-domains"); v != "" {
|
||||
options = append(options, server.CorsDomains(v))
|
||||
}
|
||||
|
||||
if v := c.String("tls-listener"); v == "" {
|
||||
} else if c.Bool("tls-listener-only") {
|
||||
options = append(options, server.TLSListener(v, true))
|
||||
@@ -264,6 +305,10 @@ func New() *Cmd {
|
||||
options = append(options, server.ProxyPath(v))
|
||||
}
|
||||
|
||||
if v := c.String("proxy-port"); v != "" {
|
||||
options = append(options, server.ProxyPort(v))
|
||||
}
|
||||
|
||||
if v := c.String("ga-key"); v != "" {
|
||||
options = append(options, server.GoogleAnalytics(v))
|
||||
}
|
||||
|
||||
1
go.mod
1
go.mod
@@ -16,6 +16,7 @@ require (
|
||||
github.com/garyburd/redigo v1.6.0 // indirect
|
||||
github.com/golang/gddo v0.0.0-20200310004957-95ce5a452273
|
||||
github.com/golang/protobuf v1.3.5 // indirect
|
||||
github.com/gorilla/handlers v1.4.2
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.3 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -127,6 +127,8 @@ github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
|
||||
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
|
||||
54
hooks/.config
Normal file
54
hooks/.config
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set +u
|
||||
echo "variables (see https://docs.docker.com/docker-hub/builds/advanced/):"
|
||||
echo "SOURCE_BRANCH: $SOURCE_BRANCH"
|
||||
echo "SOURCE_COMMIT: $SOURCE_COMMIT"
|
||||
echo "COMMIT_MSG: $COMMIT_MSG"
|
||||
echo "DOCKER_REPO: $DOCKER_REPO"
|
||||
echo "DOCKERFILE_PATH: $DOCKERFILE_PATH"
|
||||
echo "CACHE_TAG: $CACHE_TAG"
|
||||
echo "IMAGE_NAME: $IMAGE_NAME"
|
||||
echo
|
||||
|
||||
: "${DOCKERFILE_PATH:=./Dockerfile}"
|
||||
: "${IMAGE_NAME:=dutchcoders/transer.sh}"
|
||||
|
||||
echo "variables after applying defaults:"
|
||||
echo "DOCKERFILE_PATH: $DOCKERFILE_PATH"
|
||||
echo "IMAGE_NAME: $IMAGE_NAME"
|
||||
echo
|
||||
|
||||
export PATH="$PWD/docker:$PATH"
|
||||
|
||||
# =>
|
||||
# https://hub.docker.com/u/arm64v8/
|
||||
# https://hub.docker.com/u/arm32v7/
|
||||
# https://hub.docker.com/u/arm32v6/
|
||||
# https://hub.docker.com/u/arm32v5/
|
||||
declare -A base_image_prefix_map=( ["aarch64"]="arm64v8/" ["arm"]="arm32v5/" ["amd64"]="")
|
||||
|
||||
# => dpkg -L qemu-user-static | grep /usr/bin/
|
||||
declare -A docker_qemu_arch_map=( ["aarch64"]="aarch64" ["arm"]="arm" ["amd64"]="x86_64")
|
||||
|
||||
# => https://github.com/docker/docker-ce/blob/76ac3a4952a9c03f04f26fc88d3160acd51d1702/components/cli/cli/command/manifest/util.go#L22
|
||||
declare -A docker_to_manifest_map=( ["aarch64"]="arm64" ["arm"]="arm" ["amd64"]="amd64")
|
||||
|
||||
# what we want to build
|
||||
build_architectures=(amd64 aarch64 arm)
|
||||
verified_build_architectures=()
|
||||
verified_build_architectures+=("$(docker version -f '{{.Server.Arch}}')")
|
||||
|
||||
# what we can build
|
||||
for arch in ${build_architectures[@]}; do
|
||||
if [ -f "qemu-${docker_qemu_arch_map[${arch}]}-static" ]; then
|
||||
echo "qemu binary for $arch found";
|
||||
verified_build_architectures+=($arch)
|
||||
fi
|
||||
done
|
||||
|
||||
echo $verified_build_architectures
|
||||
set -u
|
||||
|
||||
docker -v
|
||||
echo
|
||||
57
hooks/build
Normal file
57
hooks/build
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
|
||||
echo "build"
|
||||
source hooks/.config
|
||||
|
||||
echo "Will build the following architectures: $verified_build_architectures"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
|
||||
for arch in ${verified_build_architectures[@]}; do
|
||||
echo "building $arch"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
|
||||
BASE_IMAGE_PREFIX="${base_image_prefix_map[${arch}]}"
|
||||
docker build \
|
||||
--build-arg GOOS=linux \
|
||||
--build-arg GOARCH=${arch} \
|
||||
--file $DOCKERFILE_PATH \
|
||||
--tag "${IMAGE_NAME}-${arch}" \
|
||||
.
|
||||
done
|
||||
|
||||
echo "images built:"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
docker image ls
|
||||
|
||||
# https://github.com/moby/moby/issues/36552
|
||||
#
|
||||
tempdir=$(mktemp -d -t yolo.XXXXXXXX)
|
||||
cd $tempdir
|
||||
|
||||
for arch in ${verified_build_architectures[@]}; do
|
||||
echo "yolo fixing platform $arch"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
|
||||
manifest_arch=${docker_to_manifest_map[${arch}]}
|
||||
docker save "${IMAGE_NAME}-${arch}"| tar xv
|
||||
|
||||
for filename in */json; do
|
||||
[ -e "$filename" ] || continue
|
||||
jq --compact-output 'del(.architecture)' < "$filename" | sponge "$filename"
|
||||
done
|
||||
|
||||
for filename in *.json; do
|
||||
[ -e "$filename" ] || continue
|
||||
! [ $filename = "manifest.json" ] || continue
|
||||
|
||||
jq --arg architecture "$manifest_arch" \
|
||||
--compact-output '.architecture=$architecture' < "$filename" | sponge "$filename"
|
||||
done
|
||||
|
||||
tar cv . | docker load
|
||||
rm -rf $tempdir/*
|
||||
done
|
||||
|
||||
trap "exit 1" HUP INT PIPE QUIT TERM
|
||||
trap "rm -rf $tempdir" EXIT
|
||||
15
hooks/get_qemu.sh
Normal file
15
hooks/get_qemu.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# NOTE: this url will change regularly because it's unstable
|
||||
PACKAGE=http://ftp.de.debian.org/debian/pool/main/q/qemu/qemu-user-static_4.2-2_amd64.deb
|
||||
|
||||
mkdir tmp/
|
||||
cd tmp/
|
||||
|
||||
curl $PACKAGE -o $(basename ${PACKAGE})
|
||||
dpkg-deb -X $(basename ${PACKAGE}) .
|
||||
cp usr/bin/qemu-aarch64-static ..
|
||||
cp usr/bin/qemu-arm-static ..
|
||||
cd ..
|
||||
rm -rf tmp
|
||||
48
hooks/post_checkout
Normal file
48
hooks/post_checkout
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
|
||||
echo "post_checkout"
|
||||
source hooks/.config
|
||||
|
||||
echo "Install qemu + binfmt support"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
# it's an Ubuntu VM and you can install stuff.
|
||||
apt-get update
|
||||
apt-get install -y curl qemu-user-static binfmt-support jq moreutils
|
||||
|
||||
# Sadly docker itself uses Docker EE 17.06 on Dockerhub which does not support
|
||||
# manifests.
|
||||
echo "Install a fresh docker cli binary"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
|
||||
curl https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz | \
|
||||
tar xvz docker/docker
|
||||
|
||||
echo "Build a usable config.json file"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
# Manifests are still experimental and enabled by a config file flag.
|
||||
# Interestingly, there is no config file and the credential parts to push
|
||||
# images is available in an environment variable. Let's create a config file to
|
||||
# combine the two things:
|
||||
#
|
||||
mkdir -p ~/.docker
|
||||
jq --null-input --argjson auths "$DOCKERCFG" '. + {auths: $auths}' | \
|
||||
jq --arg experimental enabled '. + {experimental: $experimental}' | \
|
||||
sponge ~/.docker/config.json
|
||||
|
||||
echo "copy qemu binaries into docker build context"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
# The current setup copies the qemu binary into the image (see Dockerfile)
|
||||
# Pro:
|
||||
# - it's easy to run non-amd64 images on amd64 systems for debugging
|
||||
# Contra:
|
||||
# - it's dead weight in the "destination" architecture and consumes space
|
||||
# Alternative:
|
||||
# - use a multistage Dockerfile (no RUN in the last stage possible of course)
|
||||
# - wait for https://github.com/moby/moby/issues/14080
|
||||
#
|
||||
for arch in ${build_architectures[@]}; do
|
||||
cp /usr/bin/qemu-${docker_qemu_arch_map[${arch}]}-static qemu-${arch}-static
|
||||
done
|
||||
|
||||
ls -la
|
||||
10
hooks/pre_build
Normal file
10
hooks/pre_build
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
|
||||
echo "pre_build"
|
||||
source hooks/.config
|
||||
|
||||
echo "Register qemu-*-static for all supported processors except current"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
|
||||
docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
53
hooks/push
Normal file
53
hooks/push
Normal file
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
|
||||
echo "push"
|
||||
source hooks/.config
|
||||
|
||||
# 1. push all images
|
||||
IMAGE_NAME="${IMAGE_NAME//index.docker.io\/}"
|
||||
|
||||
for arch in ${verified_build_architectures[@]}; do
|
||||
echo "Pushing ${IMAGE_NAME}-${arch}"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
echo
|
||||
docker push ${IMAGE_NAME}-${arch}
|
||||
done
|
||||
|
||||
docker image ls
|
||||
|
||||
# 2. build and push manifest
|
||||
#DOCKER_REPO="index.docker.io/${DOCKER_REPO}"
|
||||
manifests=""
|
||||
|
||||
for arch in ${verified_build_architectures[@]}; do
|
||||
manifests="${manifests} ${IMAGE_NAME}-${arch}"
|
||||
done
|
||||
|
||||
echo "Creating manifest ${IMAGE_NAME}"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
docker manifest create ${IMAGE_NAME} \
|
||||
$manifests
|
||||
echo
|
||||
|
||||
echo "Annotating manifest"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
for arch in ${verified_build_architectures[@]}; do
|
||||
docker manifest annotate ${IMAGE_NAME} \
|
||||
${IMAGE_NAME}-${arch} \
|
||||
--os linux \
|
||||
--arch ${docker_to_manifest_map[${arch}]}
|
||||
done
|
||||
|
||||
echo "Inspecting manifest ${IMAGE_NAME}-${arch}"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
docker manifest inspect ${IMAGE_NAME}-${arch}
|
||||
echo
|
||||
|
||||
echo "Pushing manifest ${IMAGE_NAME}"
|
||||
echo "⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯⎯"
|
||||
docker manifest push --purge "${IMAGE_NAME}"
|
||||
echo
|
||||
|
||||
echo
|
||||
echo "Done"
|
||||
@@ -158,9 +158,9 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
|
||||
resolvedURL := resolveURL(r, relativeURL)
|
||||
resolvedURL := resolveURL(r, relativeURL, s.proxyPort)
|
||||
relativeURLGet, _ := url.Parse(path.Join(s.proxyPath, getPathPart, token, filename))
|
||||
resolvedURLGet := resolveURL(r, relativeURLGet)
|
||||
resolvedURLGet := resolveURL(r, relativeURLGet, s.proxyPort)
|
||||
var png []byte
|
||||
png, err = qrcode.Encode(resolvedURL, qrcode.High, 150)
|
||||
if err != nil {
|
||||
@@ -170,8 +170,8 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
qrCode := base64.StdEncoding.EncodeToString(png)
|
||||
|
||||
hostname := getURL(r).Host
|
||||
webAddress := resolveWebAddress(r, s.proxyPath)
|
||||
hostname := getURL(r, s.proxyPort).Host
|
||||
webAddress := resolveWebAddress(r, s.proxyPath, s.proxyPort)
|
||||
|
||||
data := struct {
|
||||
ContentType string
|
||||
@@ -212,8 +212,8 @@ func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Server) viewHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// vars := mux.Vars(r)
|
||||
|
||||
hostname := getURL(r).Host
|
||||
webAddress := resolveWebAddress(r, s.proxyPath)
|
||||
hostname := getURL(r, s.proxyPort).Host
|
||||
webAddress := resolveWebAddress(r, s.proxyPath, s.proxyPort)
|
||||
|
||||
data := struct {
|
||||
Hostname string
|
||||
@@ -339,7 +339,7 @@ func (s *Server) postHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
filename = url.PathEscape(filename)
|
||||
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
|
||||
fmt.Fprintln(w, getURL(r).ResolveReference(relativeURL).String())
|
||||
fmt.Fprintln(w, getURL(r, s.proxyPort).ResolveReference(relativeURL).String())
|
||||
|
||||
cleanTmpFile(file)
|
||||
}
|
||||
@@ -500,15 +500,15 @@ func (s *Server) putHandler(w http.ResponseWriter, r *http.Request) {
|
||||
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
|
||||
deleteURL, _ := url.Parse(path.Join(s.proxyPath, token, filename, metadata.DeletionToken))
|
||||
|
||||
w.Header().Set("X-Url-Delete", resolveURL(r, deleteURL))
|
||||
w.Header().Set("X-Url-Delete", resolveURL(r, deleteURL, s.proxyPort))
|
||||
|
||||
fmt.Fprint(w, resolveURL(r, relativeURL))
|
||||
fmt.Fprint(w, resolveURL(r, relativeURL, s.proxyPort))
|
||||
}
|
||||
|
||||
func resolveURL(r *http.Request, u *url.URL) string {
|
||||
func resolveURL(r *http.Request, u *url.URL, proxyPort string) string {
|
||||
r.URL.Path = ""
|
||||
|
||||
return getURL(r).ResolveReference(u).String()
|
||||
return getURL(r, proxyPort).ResolveReference(u).String()
|
||||
}
|
||||
|
||||
func resolveKey(key, proxyPath string) string {
|
||||
@@ -525,8 +525,8 @@ func resolveKey(key, proxyPath string) string {
|
||||
return key
|
||||
}
|
||||
|
||||
func resolveWebAddress(r *http.Request, proxyPath string) string {
|
||||
url := getURL(r)
|
||||
func resolveWebAddress(r *http.Request, proxyPath string, proxyPort string) string {
|
||||
url := getURL(r, proxyPort)
|
||||
|
||||
var webAddress string
|
||||
|
||||
@@ -544,7 +544,7 @@ func resolveWebAddress(r *http.Request, proxyPath string) string {
|
||||
return webAddress
|
||||
}
|
||||
|
||||
func getURL(r *http.Request) *url.URL {
|
||||
func getURL(r *http.Request, proxyPort string) *url.URL {
|
||||
u, _ := url.Parse(r.URL.String())
|
||||
|
||||
if r.TLS != nil {
|
||||
@@ -555,16 +555,25 @@ func getURL(r *http.Request) *url.URL {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
|
||||
if u.Host != "" {
|
||||
} else if host, port, err := net.SplitHostPort(r.Host); err != nil {
|
||||
u.Host = r.Host
|
||||
} else {
|
||||
if port == "80" && u.Scheme == "http" {
|
||||
u.Host = host
|
||||
} else if port == "443" && u.Scheme == "https" {
|
||||
if u.Host == "" {
|
||||
host, port, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
host = r.Host
|
||||
port = ""
|
||||
}
|
||||
if len(proxyPort) != 0 {
|
||||
port = proxyPort
|
||||
}
|
||||
if len(port) == 0 {
|
||||
u.Host = host
|
||||
} else {
|
||||
u.Host = net.JoinHostPort(host, port)
|
||||
if port == "80" && u.Scheme == "http" {
|
||||
u.Host = host
|
||||
} else if port == "443" && u.Scheme == "https" {
|
||||
u.Host = host
|
||||
} else {
|
||||
u.Host = net.JoinHostPort(host, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -614,9 +623,7 @@ func (s *Server) CheckMetadata(token, filename string, increaseDownload bool) (M
|
||||
var metadata Metadata
|
||||
|
||||
r, _, err := s.storage.Get(token, fmt.Sprintf("%s.metadata", filename))
|
||||
if s.storage.IsNotExist(err) {
|
||||
return metadata, nil
|
||||
} else if err != nil {
|
||||
if err != nil {
|
||||
return metadata, err
|
||||
}
|
||||
|
||||
@@ -1011,7 +1018,7 @@ func (s *Server) RedirectHandler(h http.Handler) http.HandlerFunc {
|
||||
} else if r.Header.Get("X-Forwarded-Proto") == "https" {
|
||||
} else if r.URL.Scheme == "https" {
|
||||
} else {
|
||||
u := getURL(r)
|
||||
u := getURL(r, s.proxyPort)
|
||||
u.Scheme = "https"
|
||||
|
||||
http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
|
||||
|
||||
@@ -26,7 +26,10 @@ package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
gorillaHandlers "github.com/gorilla/handlers"
|
||||
"log"
|
||||
crypto_rand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net/http"
|
||||
@@ -85,6 +88,13 @@ func Listener(s string) OptionFn {
|
||||
|
||||
}
|
||||
|
||||
func CorsDomains(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.CorsDomains = s
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func GoogleAnalytics(gaKey string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.gaKey = gaKey
|
||||
@@ -131,6 +141,12 @@ func ProxyPath(s string) OptionFn {
|
||||
}
|
||||
}
|
||||
|
||||
func ProxyPort(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
srvr.proxyPort = s
|
||||
}
|
||||
}
|
||||
|
||||
func TempPath(s string) OptionFn {
|
||||
return func(srvr *Server) {
|
||||
if s[len(s)-1:] != "/" {
|
||||
@@ -270,11 +286,13 @@ type Server struct {
|
||||
|
||||
webPath string
|
||||
proxyPath string
|
||||
proxyPort string
|
||||
gaKey string
|
||||
userVoiceKey string
|
||||
|
||||
TLSListenerOnly bool
|
||||
|
||||
CorsDomains string
|
||||
ListenerString string
|
||||
TLSListenerString string
|
||||
ProfileListenerString string
|
||||
@@ -297,7 +315,11 @@ func New(options ...OptionFn) (*Server, error) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
var seedBytes [8]byte
|
||||
if _, err := crypto_rand.Read(seedBytes[:]); err != nil {
|
||||
panic("cannot obtain cryptographically secure seed")
|
||||
}
|
||||
rand.Seed(int64(binary.LittleEndian.Uint64(seedBytes[:])))
|
||||
}
|
||||
|
||||
func (s *Server) Run() {
|
||||
@@ -413,11 +435,24 @@ func (s *Server) Run() {
|
||||
|
||||
s.logger.Printf("Transfer.sh server started.\nusing temp folder: %s\nusing storage provider: %s", s.tempPath, s.storage.Type())
|
||||
|
||||
var cors func(http.Handler) http.Handler
|
||||
if len(s.CorsDomains) > 0 {
|
||||
cors = gorillaHandlers.CORS(
|
||||
gorillaHandlers.AllowedHeaders([]string{"*"}),
|
||||
gorillaHandlers.AllowedOrigins(strings.Split(s.CorsDomains, ",")),
|
||||
gorillaHandlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"}),
|
||||
)
|
||||
} else {
|
||||
cors = func(h http.Handler) http.Handler {
|
||||
return h
|
||||
}
|
||||
}
|
||||
|
||||
h := handlers.PanicHandler(
|
||||
IPFilterHandler(
|
||||
handlers.LogHandler(
|
||||
LoveHandler(
|
||||
s.RedirectHandler(r)),
|
||||
s.RedirectHandler(cors(r))),
|
||||
handlers.NewLogOptions(s.logger.Printf, "_default_"),
|
||||
),
|
||||
s.ipFilterOptions,
|
||||
|
||||
Reference in New Issue
Block a user