Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 16 additions & 7 deletions docs/modules/druid/examples/getting_started/druid.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,18 @@ spec:
image:
productVersion: 30.0.0
clusterConfig:
listenerClass: external-stable # This exposes your Stacklet outside of Kubernetes. Remove this configuration if this is not desired
zookeeperConfigMapName: simple-druid-znode
deepStorage:
hdfs:
configMapName: simple-hdfs
directory: /data
directory: /druid
metadataStorageDatabase:
dbType: derby
connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
host: localhost
port: 1527
tls: null
zookeeperConfigMapName: simple-druid-znode
dbType: postgresql
connString: jdbc:postgresql://postgresql-druid/druid
host: postgresql-druid
port: 5432
credentialsSecret: druid-db-credentials
brokers:
roleGroups:
default:
Expand All @@ -38,3 +39,11 @@ spec:
roleGroups:
default:
replicas: 1
---
apiVersion: v1
kind: Secret
metadata:
name: druid-db-credentials
stringData:
username: druid
password: druid
23 changes: 16 additions & 7 deletions docs/modules/druid/examples/getting_started/druid.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,18 @@ spec:
image:
productVersion: 30.0.0
clusterConfig:
listenerClass: external-stable # This exposes your Stacklet outside of Kubernetes. Remove this configuration if this is not desired
zookeeperConfigMapName: simple-druid-znode
deepStorage:
hdfs:
configMapName: simple-hdfs
directory: /data
directory: /druid
metadataStorageDatabase:
dbType: derby
connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
host: localhost
port: 1527
tls: null
zookeeperConfigMapName: simple-druid-znode
dbType: postgresql
connString: jdbc:postgresql://postgresql-druid/druid
host: postgresql-druid
port: 5432
credentialsSecret: druid-db-credentials
brokers:
roleGroups:
default:
Expand All @@ -38,3 +39,11 @@ spec:
roleGroups:
default:
replicas: 1
---
apiVersion: v1
kind: Secret
metadata:
name: druid-db-credentials
stringData:
username: druid
password: druid
23 changes: 17 additions & 6 deletions docs/modules/druid/examples/getting_started/getting_started.sh
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,17 @@ kubectl rollout status --watch statefulset/simple-hdfs-journalnode-default --tim
kubectl rollout status --watch statefulset/simple-hdfs-namenode-default --timeout=300s
# end::watch-hdfs-rollout[]

echo "Installing PostgreSQL for Druid"
# tag::helm-install-postgres[]
helm install postgresql-druid \
--repo https://charts.bitnami.com/bitnami postgresql \
--version 16.1.2 \
--set auth.database=druid \
--set auth.username=druid \
--set auth.password=druid \
--wait
# end::helm-install-postgres[]

echo "Install DruidCluster from druid.yaml"
# tag::install-druid[]
kubectl apply --server-side -f druid.yaml
Expand All @@ -124,10 +135,10 @@ kubectl rollout status --watch statefulset/simple-druid-middlemanager-default --
kubectl rollout status --watch statefulset/simple-druid-router-default --timeout=300s
# end::watch-druid-rollout[]

echo "Starting port-forwarding of port 8888"
echo "Starting port-forwarding of port 9088"
# shellcheck disable=2069 # we want all output to be blackholed
# tag::port-forwarding[]
kubectl port-forward svc/simple-druid-router 8888 > /dev/null 2>&1 &
kubectl port-forward svc/simple-druid-router 9088 > /dev/null 2>&1 &
# end::port-forwarding[]
PORT_FORWARD_PID=$!
# shellcheck disable=2064 # we want the PID evaluated now, not at the time the trap is
Expand All @@ -136,15 +147,15 @@ sleep 5

submit_job() {
# tag::submit-job[]
curl -s -X 'POST' -H 'Content-Type:application/json' -d @ingestion_spec.json http://localhost:8888/druid/indexer/v1/task
curl -s -k -X 'POST' -H 'Content-Type:application/json' -d @ingestion_spec.json https://localhost:9088/druid/indexer/v1/task
# end::submit-job[]
}

echo "Submitting job"
task_id=$(submit_job | sed -e 's/.*":"\([^"]\+\).*/\1/g')

request_job_status() {
curl -s "http://localhost:8888/druid/indexer/v1/task/${task_id}/status" | sed -e 's/.*statusCode":"\([^"]\+\).*/\1/g'
curl -s -k "https://localhost:9088/druid/indexer/v1/task/${task_id}/status" | sed -e 's/.*statusCode":"\([^"]\+\).*/\1/g'
}

while [ "$(request_job_status)" == "RUNNING" ]; do
Expand All @@ -162,7 +173,7 @@ else
fi

segment_load_status() {
curl -s http://localhost:8888/druid/coordinator/v1/loadstatus | sed -e 's/.*wikipedia":\([0-9\.]\+\).*/\1/g'
curl -s -k https://localhost:9088/druid/coordinator/v1/loadstatus | sed -e 's/.*wikipedia":\([0-9\.]\+\).*/\1/g'
}

while [ "$(segment_load_status)" != "100.0" ]; do
Expand All @@ -172,7 +183,7 @@ done

query_data() {
# tag::query-data[]
curl -s -X 'POST' -H 'Content-Type:application/json' -d @query.json http://localhost:8888/druid/v2/sql
curl -s -k -X 'POST' -H 'Content-Type:application/json' -d @query.json https://localhost:9088/druid/v2/sql
# end::query-data[]
}

Expand Down
23 changes: 17 additions & 6 deletions docs/modules/druid/examples/getting_started/getting_started.sh.j2
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,17 @@ kubectl rollout status --watch statefulset/simple-hdfs-journalnode-default --tim
kubectl rollout status --watch statefulset/simple-hdfs-namenode-default --timeout=300s
# end::watch-hdfs-rollout[]

echo "Installing PostgreSQL for Druid"
# tag::helm-install-postgres[]
helm install postgresql-druid \
--repo https://charts.bitnami.com/bitnami postgresql \
--version 16.1.2 \
--set auth.database=druid \
--set auth.username=druid \
--set auth.password=druid \
--wait
# end::helm-install-postgres[]

echo "Install DruidCluster from druid.yaml"
# tag::install-druid[]
kubectl apply --server-side -f druid.yaml
Expand All @@ -124,10 +135,10 @@ kubectl rollout status --watch statefulset/simple-druid-middlemanager-default --
kubectl rollout status --watch statefulset/simple-druid-router-default --timeout=300s
# end::watch-druid-rollout[]

echo "Starting port-forwarding of port 8888"
echo "Starting port-forwarding of port 9088"
# shellcheck disable=2069 # we want all output to be blackholed
# tag::port-forwarding[]
kubectl port-forward svc/simple-druid-router 8888 > /dev/null 2>&1 &
kubectl port-forward svc/simple-druid-router 9088 > /dev/null 2>&1 &
# end::port-forwarding[]
PORT_FORWARD_PID=$!
# shellcheck disable=2064 # we want the PID evaluated now, not at the time the trap is
Expand All @@ -136,15 +147,15 @@ sleep 5

submit_job() {
# tag::submit-job[]
curl -s -X 'POST' -H 'Content-Type:application/json' -d @ingestion_spec.json http://localhost:8888/druid/indexer/v1/task
curl -s -k -X 'POST' -H 'Content-Type:application/json' -d @ingestion_spec.json https://localhost:9088/druid/indexer/v1/task
# end::submit-job[]
}

echo "Submitting job"
task_id=$(submit_job | sed -e 's/.*":"\([^"]\+\).*/\1/g')

request_job_status() {
curl -s "http://localhost:8888/druid/indexer/v1/task/${task_id}/status" | sed -e 's/.*statusCode":"\([^"]\+\).*/\1/g'
curl -s -k "https://localhost:9088/druid/indexer/v1/task/${task_id}/status" | sed -e 's/.*statusCode":"\([^"]\+\).*/\1/g'
}

while [ "$(request_job_status)" == "RUNNING" ]; do
Expand All @@ -162,7 +173,7 @@ else
fi

segment_load_status() {
curl -s http://localhost:8888/druid/coordinator/v1/loadstatus | sed -e 's/.*wikipedia":\([0-9\.]\+\).*/\1/g'
curl -s -k https://localhost:9088/druid/coordinator/v1/loadstatus | sed -e 's/.*wikipedia":\([0-9\.]\+\).*/\1/g'
}

while [ "$(segment_load_status)" != "100.0" ]; do
Expand All @@ -172,7 +183,7 @@ done

query_data() {
# tag::query-data[]
curl -s -X 'POST' -H 'Content-Type:application/json' -d @query.json http://localhost:8888/druid/v2/sql
curl -s -k -X 'POST' -H 'Content-Type:application/json' -d @query.json https://localhost:9088/druid/v2/sql
# end::query-data[]
}

Expand Down
4 changes: 4 additions & 0 deletions docs/modules/druid/examples/getting_started/hdfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,14 @@ spec:
dfsReplication: 1
zookeeperConfigMapName: simple-hdfs-znode
nameNodes:
config:
listenerClass: external-stable # This exposes your Stacklet outside of Kubernetes. Remove this configuration if this is not desired
roleGroups:
default:
replicas: 2
dataNodes:
config:
listenerClass: external-unstable # This exposes your Stacklet outside of Kubernetes. Remove this configuration if this is not desired
roleGroups:
default:
replicas: 1
Expand Down
4 changes: 4 additions & 0 deletions docs/modules/druid/examples/getting_started/hdfs.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,14 @@ spec:
dfsReplication: 1
zookeeperConfigMapName: simple-hdfs-znode
nameNodes:
config:
listenerClass: external-stable # This exposes your Stacklet outside of Kubernetes. Remove this configuration if this is not desired
roleGroups:
default:
replicas: 2
dataNodes:
config:
listenerClass: external-unstable # This exposes your Stacklet outside of Kubernetes. Remove this configuration if this is not desired
roleGroups:
default:
replicas: 1
Expand Down
18 changes: 16 additions & 2 deletions docs/modules/druid/pages/getting_started/first_steps.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Three things need to be installed to have a Druid cluster:

* A ZooKeeper instance for internal use by Druid
* An HDFS instance to be used as a backend for deep storage
* A PostgreSQL database to store the metadata of Druid
* The Druid cluster itself

Create them in this order, each one is created by applying a manifest file.
Expand Down Expand Up @@ -47,6 +48,17 @@ And apply it:
include::example$getting_started/getting_started.sh[tag=install-hdfs]
----


=== PostgreSQL

Install a PostgreSQL database using `helm`.
If you already have a PostgreSQL instance, you can skip this step and use your own below.

[source,bash]
----
include::example$getting_started/getting_started.sh[tag=helm-install-postgres]
----

=== Druid

Create a file named `druid.yaml` with the following contents:
Expand Down Expand Up @@ -94,7 +106,9 @@ simple-hdfs-namenode-default 2/2 6m
simple-zk-server-default 3/3 7m
----

Then, create a port-forward for the Druid Router:
Ideally you use `stackablectl stacklet list` to find out the address the Druid router is reachable at and use that address.

As an alternative, you can create a port-forward for the Druid Router:

----
include::example$getting_started/getting_started.sh[tag=port-forwarding]
Expand Down Expand Up @@ -126,7 +140,7 @@ Continue with the <<_query_the_data,next section>>.

====

To open the web interface navigate your browser to https://localhost:8888/ to find the dashboard:
To open the web interface navigate your browser to https://localhost:9088/ to find the dashboard:

image::getting_started/dashboard.png[]

Expand Down
Loading