Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions pci-4464-investigation/flooding-one-worker/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# How To

1. Deploy the stack:
```
docker compose -f docker-compose.yml -f prometheus.yml up
```

2. Run the script
```
./script.sh
```

# Results

If two (or more) tasks lands on the same worker at the same time, it is a big.
150 changes: 150 additions & 0 deletions pci-4464-investigation/flooding-one-worker/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
# NOTES
# - Some environment variables are actually set in the file `.env` in the same directory.
# This allows, if you wish, to change some values and not having to modify this file.
# See https://docs.docker.com/compose/environment-variables/ for details.
# - The funky notation &foo and *foo are YAML anchors and references, used to keep the
# configuration DRYer. See for example
# https://docs.ansible.com/ansible/latest/user_guide/playbooks_advanced_syntax.html


services:
minio:
image: minio/minio
command: server /minio-storage --console-address ":9001"
ports:
- 9000:9000
- 9001:9001
environment:
MINIO_REGION_NAME: &minio-region ${MINIO_REGION_NAME}
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}

# This container will stop as soon as the script has run.
minio-setup:
image: minio/mc
depends_on:
- minio
volumes:
- "./scripts:/scripts"
environment:
MINIO_ADDR: &minio-addr http://minio:9000
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
entrypoint: /scripts/minio-setup.sh

vault:
image: vault:1.13.1
command: server -dev
ports:
- 8200:8200
environment:
VAULT_DEV_ROOT_TOKEN_ID: ${VAULT_DEV_ROOT_TOKEN_ID}
cap_add:
- IPC_LOCK

# This container will stop as soon as the script has run.
vault-setup:
image: vault:1.13.1
depends_on:
- vault
volumes:
- "./scripts:/scripts"
environment:
VAULT_ADDR: &vault-addr http://vault:8200
# NOTE In production you would NOT pass the root token!
VAULT_DEV_ROOT_TOKEN_ID: ${VAULT_DEV_ROOT_TOKEN_ID}
MINIO_ADDR : *minio-addr
MINIO_REGION_NAME: *minio-region
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}

entrypoint: /scripts/vault-setup.sh

db:
image: postgres:14.11
shm_size: 1gb
ports: [6543:5432]
environment:
POSTGRES_DB: &postgres-db concourse
POSTGRES_USER: &postgres-user dev
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}

web:
image: &concourse-image concourse/concourse:7.13.2
command: web
depends_on:
- db
- minio-setup
- vault-setup
ports: [8080:8080]
volumes:
- "./concourse-keys:/concourse-keys"
environment:
CONCOURSE_SESSION_SIGNING_KEY: /concourse-keys/session_signing_key
CONCOURSE_TSA_AUTHORIZED_KEYS: /concourse-keys/authorized_worker_keys
CONCOURSE_TSA_HOST_KEY: /concourse-keys/tsa_host_key

CONCOURSE_LOG_LEVEL: error
CONCOURSE_TSA_LOG_LEVEL: error
CONCOURSE_POSTGRES_HOST: db
CONCOURSE_POSTGRES_USER: *postgres-user
CONCOURSE_POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
CONCOURSE_POSTGRES_DATABASE: *postgres-db
CONCOURSE_EXTERNAL_URL: http://localhost:8080
CONCOURSE_ADD_LOCAL_USER: ${CONCOURSE_MAIN_USER}:${CONCOURSE_MAIN_PASSWORD},${CONCOURSE_NORMAL_USER}:${CONCOURSE_NORMAL_PASSWORD},${CONCOURSE_READONLY_USER}:${CONCOURSE_READONLY_PASSWORD}
CONCOURSE_MAIN_TEAM_LOCAL_USER: ${CONCOURSE_MAIN_USER}
CONCOURSE_CLUSTER_NAME: dev
CONCOURSE_ENABLE_PIPELINE_INSTANCES: "true"
CONCOURSE_ENABLE_ACROSS_STEP: "true"
# NOTE this connection is unencrypted!
CONCOURSE_VAULT_URL: *vault-addr
# NOTE In production you would NOT pass the root token!
CONCOURSE_VAULT_CLIENT_TOKEN: ${VAULT_DEV_ROOT_TOKEN_ID}
CONCOURSE_ENABLE_CACHE_STREAMED_VOLUMES: "true"
CONCOURSE_ENABLE_RESOURCE_CAUSALITY: "true"
CONCOURSE_CONTAINER_PLACEMENT_STRATEGY: limit-active-tasks,volume-locality
CONCOURSE_MAX_ACTIVE_TASKS_PER_WORKER: 1

# This container will stop as soon as the script has run.
web-setup:
image: alpine/curl
depends_on:
- web
volumes:
- "./scripts:/scripts"
environment:
CONCOURSE_ADDR: web:8080

entrypoint: /scripts/concourse-setup.sh

worker:
image: *concourse-image
command: worker
privileged: true
depends_on: [web]
ports:
- 7777:7777
- 7788:7788
volumes:
- "./concourse-keys:/concourse-keys"
stop_signal: SIGUSR2
environment:
CONCOURSE_RUNTIME: containerd
CONCOURSE_NAME: worker-1

CONCOURSE_TSA_PUBLIC_KEY: /concourse-keys/tsa_host_key.pub
CONCOURSE_TSA_WORKER_PRIVATE_KEY: /concourse-keys/worker_key

CONCOURSE_LOG_LEVEL: error
CONCOURSE_TSA_HOST: web:2222

CONCOURSE_BAGGAGECLAIM_LOG_LEVEL: error
CONCOURSE_CONTAINERD_LOG_LEVEL: error
CONCOURSE_BIND_IP: 0.0.0.0
CONCOURSE_BAGGAGECLAIM_BIND_IP: 0.0.0.0

# avoid using loopbacks
CONCOURSE_BAGGAGECLAIM_DRIVER: overlay

# work with docker-compose's dns
CONCOURSE_CONTAINERD_DNS_PROXY_ENABLE: "true"
18 changes: 18 additions & 0 deletions pci-4464-investigation/flooding-one-worker/pipeline.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
jobs:
- name: loop
plan:
- task: loop
config:
platform: linux
image_resource:
type: registry-image
source:
repository: busybox
run:
path: sh
args:
- -c
- |
echo "Executing on worker: `hostname`"
for i in `seq 1 30`; do sleep 1; echo "Slept for ${i} seconds."; done
40 changes: 40 additions & 0 deletions pci-4464-investigation/flooding-one-worker/prometheus.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# prometheus.yml - a docker-compose override that adds 'prometheus' to the stack
#
# once running, head to `localhost:9090` to get access to the Prometheus console.
#
# ref: https://prometheus.io/
# ref: https://docs.docker.com/compose/extends/
#
version: '3'

services:
web:
environment:
CONCOURSE_PROMETHEUS_BIND_IP: "0.0.0.0"
CONCOURSE_PROMETHEUS_BIND_PORT: "9100"
ports:
- '9100:9100'

prometheus:
image: prom/prometheus
entrypoint:
- /bin/sh
- -c
- |
echo "
global:
scrape_interval: '1s'
evaluation_interval: '1s'

scrape_configs:
- job_name: 'concourse'
static_configs:
- targets:
- 'web:9100'
" > config.yml

exec prometheus \
--config.file=config.yml \
--storage.tsdb.path=/prometheus
ports:
- '9090:9090'
11 changes: 11 additions & 0 deletions pci-4464-investigation/flooding-one-worker/script.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash -ex

for i in `seq 1 60`; do fly -t ci sp -p parallel${i}-second-linux -c pipeline.yml -n && fly -t ci up -p parallel${i}-second-linux; done

task(){
fly -t ci tj -j parallel${i}-second-linux/loop;
}

for i in `seq 1 60`; do
task "$i" &
done
27 changes: 27 additions & 0 deletions pci-4464-investigation/two-workers-with-flock/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# How To

1. Deploy the stack:
```
docker compose -f docker-compose.yml -f prometheus.yml up
```

2. Generate a big file
`dd bs=1024 count=1048576 </dev/urandom > bigfile-1`

3. Upload the file to minio
- connect to minio http://localhost:9000 with user minio and password in .env
- upload bigfile-1 to concourse bucket

4. Set the pipeline and trigger job1
```
$ fly --target=ci login --concourse-url=http://localhost:8080 --open-browser
$ fly -t ci sp -p two-workers-with-flock -c pipeline.yml
$ fly -t ci up -p two-workers-with-flock
$ fly -t ci tj -j two-workers-with-flock/job1
```

5. Look at the pipeline, when job1/use-big-file is streaming the file, trigger job2

# Results

If two (or more) tasks lands on the same worker at the same time, it is a big.
Loading