diff --git a/.fossa.yml b/.fossa.yml new file mode 100755 index 00000000..17efe363 --- /dev/null +++ b/.fossa.yml @@ -0,0 +1,38 @@ +# Generated by FOSSA CLI (https://github.com/fossas/fossa-cli) +# Visit https://fossa.com to learn more + +version: 2 +cli: + server: https://app.fossa.com + fetcher: custom + project: management-api-for-apache-cassandra +analyze: + modules: + - name: DataStax Management API for Apache Cassandra + type: mvn + target: pom.xml + path: . + - name: datastax-mgmtapi-agent-3.x + type: mvn + target: pom.xml + path: management-api-agent-3.x + - name: datastax-mgmtapi-agent-4.x + type: mvn + target: pom.xml + path: management-api-agent-4.x + - name: datastax-mgmtapi-agent-common + type: mvn + target: pom.xml + path: management-api-agent-common + - name: datastax-mgmtapi-agent-dse-6.8 + type: mvn + target: pom.xml + path: management-api-agent-dse-6.8 + - name: datastax-mgmtapi-common + type: mvn + target: pom.xml + path: management-api-common + - name: datastax-mgmtapi-server + type: mvn + target: pom.xml + path: management-api-server diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 98c34f87..e73c632f 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,6 +1,6 @@ name: Java CI -on: [push] +on: [push, pull_request] jobs: build: @@ -43,7 +43,7 @@ jobs: uses: docker/setup-buildx-action@v1 with: version: latest - - name: Build with Maven + - name: Build with Maven and run tests run: | cat < ~/.m2/settings.xml @@ -62,19 +62,10 @@ jobs: EOF cp ~/.m2/settings.xml settings.xml - set -e if [[ "${{ matrix.runDSEtests }}" == "true" ]] then - mvn -B -q package -DskipTests --file pom.xml -P dse + MAVEN_OPTS="-P dse -DrunDSEtests=true" else - mvn -B -q package -DskipTests --file pom.xml - fi - - name: Run Integration Tests - run: | - set -e - if [[ "${{ matrix.runDSEtests }}" == "true" ]] - then - mvn -B -q integration-test --file pom.xml -P dse -DrunDSEtests=true - else - mvn -B -q integration-test --file pom.xml -Drun311tests=${{ matrix.run311tests }} -Drun40tests=${{ matrix.run40tests }} + MAVEN_OPTS="-Drun311tests=${{ matrix.run311tests }} -Drun40tests=${{ matrix.run40tests }}" fi + mvn -B -q install --file pom.xml $MAVEN_OPTS diff --git a/.github/workflows/docker-release.yaml b/.github/workflows/docker-release.yaml index a19e1e68..69c9fc07 100644 --- a/.github/workflows/docker-release.yaml +++ b/.github/workflows/docker-release.yaml @@ -6,7 +6,7 @@ on: - 'v*.*.*' jobs: - build-dse: + build-dse-6_8: runs-on: ubuntu-latest steps: - uses: actions/checkout@master @@ -30,25 +30,64 @@ jobs: EOF cp ~/.m2/settings.xml settings.xml - docker build -t management-api-for-dse-builder -f ./Dockerfile-build-dse ./ - docker tag management-api-for-dse-builder management-api-for-apache-cassandra-builder + - name: Get Release Version + id: get_version + run: echo "RELEASE_VERSION=$(echo ${GITHUB_REF##*/})" >> $GITHUB_ENV - name: Publish DSE 6.8 to Registry - uses: elgohr/Publish-Docker-Github-Action@master + uses: elgohr/Publish-Docker-Github-Action@v5 with: name: datastax/dse-mgmtapi-6_8 - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - tag_names: true + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_PASSWORD }} + tags: '${{ env.RELEASE_VERSION }}' dockerfile: Dockerfile-dse-68 - - name: Publish 4.0 to Registry - uses: elgohr/Publish-Docker-Github-Action@master + build-oss-4_0_1: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Setup Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + with: + version: latest + - name: Login to Docker Hub + run: echo "${{ secrets.DOCKER_HUB_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_HUB_USERNAME }}" --password-stdin + - name: Publish 4.0.1 to Registry + run: | + RELEASE_VERSION="${GITHUB_REF##*/}" + docker buildx build --push \ + --build-arg CASSANDRA_VERSION=4.0.1 \ + --tag k8ssandra/cass-management-api:4.0.1 \ + --tag k8ssandra/cass-management-api:4.0.1-$RELEASE_VERSION \ + --file Dockerfile-4_0 \ + --target oss40 \ + --platform linux/amd64,linux/arm64 . + build-oss-4_0_0: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Setup Buildx + id: buildx + uses: docker/setup-buildx-action@v1 with: - name: datastax/cassandra-mgmtapi-4_0_0 - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - tag_names: true - dockerfile: Dockerfile-4_0 - build-oss: + version: latest + - name: Login to Docker Hub + run: echo "${{ secrets.DOCKER_HUB_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_HUB_USERNAME }}" --password-stdin + - name: Publish 4.0.0 to Registry + run: | + RELEASE_VERSION="${GITHUB_REF##*/}" + docker buildx build --push \ + --build-arg CASSANDRA_VERSION=4.0.0 \ + --tag k8ssandra/cass-management-api:4.0.0 \ + --tag k8ssandra/cass-management-api:4.0.0-$RELEASE_VERSION \ + --file Dockerfile-4_0 \ + --target oss40 \ + --platform linux/amd64,linux/arm64 . + build-oss-3_11_7: runs-on: ubuntu-latest steps: - uses: actions/checkout@master @@ -60,40 +99,60 @@ jobs: with: version: latest - name: Login to Docker Hub - run: echo "${{ secrets.DOCKER_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_USERNAME }}" --password-stdin + run: echo "${{ secrets.DOCKER_HUB_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_HUB_USERNAME }}" --password-stdin - name: Publish 3.11.7 to Registry run: | RELEASE_VERSION="${GITHUB_REF##*/}" docker buildx build --push \ --build-arg CASSANDRA_VERSION=3.11.7 \ - --tag datastax/cassandra-mgmtapi-3_11_7:$RELEASE_VERSION \ + --tag k8ssandra/cass-management-api:3.11.7 \ + --tag k8ssandra/cass-management-api:3.11.7-$RELEASE_VERSION \ --file Dockerfile-oss \ --target oss311 \ --platform linux/amd64,linux/arm64 . + build-oss-3_11_8: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Setup Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + with: + version: latest + - name: Login to Docker Hub + run: echo "${{ secrets.DOCKER_HUB_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_HUB_USERNAME }}" --password-stdin - name: Publish 3.11.8 to Registry run: | RELEASE_VERSION="${GITHUB_REF##*/}" docker buildx build --push \ --build-arg CASSANDRA_VERSION=3.11.8 \ - --tag datastax/cassandra-mgmtapi-3_11_8:$RELEASE_VERSION \ + --tag k8ssandra/cass-management-api:3.11.8 \ + --tag k8ssandra/cass-management-api:3.11.8-$RELEASE_VERSION \ --file Dockerfile-oss \ --target oss311 \ --platform linux/amd64,linux/arm64 . - - name: Publish 3.11.9 to Registry + build-oss-3_11_11: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Setup Buildx + id: buildx + uses: docker/setup-buildx-action@v1 + with: + version: latest + - name: Login to Docker Hub + run: echo "${{ secrets.DOCKER_HUB_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_HUB_USERNAME }}" --password-stdin + - name: Publish 3.11.11 to Registry run: | RELEASE_VERSION="${GITHUB_REF##*/}" docker buildx build --push \ - --build-arg CASSANDRA_VERSION=3.11.9 \ - --tag datastax/cassandra-mgmtapi-3_11_9:$RELEASE_VERSION \ + --build-arg CASSANDRA_VERSION=3.11.11 \ + --tag k8ssandra/cass-management-api:3.11.11 \ + --tag k8ssandra/cass-management-api:3.11.11-$RELEASE_VERSION \ --file Dockerfile-oss \ --target oss311 \ --platform linux/amd64,linux/arm64 . - - name: Publish 3.11.10 to Registry - run: | - RELEASE_VERSION="${GITHUB_REF##*/}" - docker buildx build --push \ - --build-arg CASSANDRA_VERSION=3.11.10 \ - --tag datastax/cassandra-mgmtapi-3_11_10:$RELEASE_VERSION \ - --file Dockerfile-oss \ - --target oss311 \ - --platform linux/amd64 . diff --git a/.github/workflows/license-check.yaml b/.github/workflows/license-check.yaml new file mode 100644 index 00000000..71795675 --- /dev/null +++ b/.github/workflows/license-check.yaml @@ -0,0 +1,17 @@ +name: Dependency and License Scan +on: + push: + branches: + - master +jobs: + scan-repo: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Install Fossa CLI + run: | + curl -H 'Cache-Control: no-cache' https://raw.githubusercontent.com/fossas/fossa-cli/master/install.sh | bash -s -- -b . + - name: Scan for dependencies and licenses + run: | + FOSSA_API_KEY=${{ secrets.FOSSA_PUSH_ONLY_API_KEY }} ./fossa analyze diff --git a/.gitignore b/.gitignore index 1fbee957..2caeaadf 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,7 @@ local.properties *.launch .classpath .project +.cassandra-bin # Intellij .idea diff --git a/Dockerfile-4_0 b/Dockerfile-4_0 index eeb15c4e..b0a964af 100644 --- a/Dockerfile-4_0 +++ b/Dockerfile-4_0 @@ -1,6 +1,6 @@ -ARG CASSANDRA_VERSION=4.0-beta4 +ARG CASSANDRA_VERSION=4.0.1 -FROM maven:3.6.3-jdk-8-slim as builder +FROM --platform=$BUILDPLATFORM maven:3.6.3-jdk-8-slim as builder ARG METRICS_COLLECTOR_VERSION=0.2.0 @@ -29,7 +29,9 @@ RUN mkdir ${MCAC_PATH} && \ if test ! -e datastax-mcac-agent-${METRICS_COLLECTOR_VERSION}.tar.gz; then curl -L -O "https://github.com/datastax/metric-collector-for-apache-cassandra/releases/download/v${METRICS_COLLECTOR_VERSION}/datastax-mcac-agent-${METRICS_COLLECTOR_VERSION}.tar.gz"; fi && \ tar --directory ${MCAC_PATH} --strip-components 1 --gzip --extract --file datastax-mcac-agent-${METRICS_COLLECTOR_VERSION}.tar.gz -FROM cassandra:${CASSANDRA_VERSION} +FROM cassandra:${CASSANDRA_VERSION} as oss40 + +ARG TARGETARCH ENV CASSANDRA_PATH /opt/cassandra ENV MAAC_PATH /opt/management-api @@ -43,6 +45,7 @@ ENV CASSANDRA_LOGS ${CASSANDRA_PATH}/logs # https://datastax.jira.com/browse/DB-4627 # https://issues.apache.org/jira/browse/CASSANDRA-16027 ENV MGMT_API_LOG_DIR /var/log/cassandra +ENV MGMT_API_HEAP_SIZE 16m COPY --from=builder /build/management-api-agent-4.x/target/datastax-mgmtapi-agent-4.x-0.1.0-SNAPSHOT.jar ${MAAC_PATH}/datastax-mgmtapi-agent-0.1.0-SNAPSHOT.jar COPY --from=builder /build/management-api-server/target/datastax-mgmtapi-server-0.1.0-SNAPSHOT.jar ${MAAC_PATH}/ @@ -54,7 +57,7 @@ RUN mkdir ${USER_HOME_PATH} && \ chmod -R g+w ${CASSANDRA_PATH} ${MAAC_PATH} ${MCAC_PATH} ENV TINI_VERSION v0.18.0 -ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${TARGETARCH} /tini RUN chmod +x /tini RUN set -eux; \ diff --git a/Dockerfile-dse-68 b/Dockerfile-dse-68 index 283286a0..10dd8984 100644 --- a/Dockerfile-dse-68 +++ b/Dockerfile-dse-68 @@ -41,6 +41,7 @@ ENV CASSANDRA_LOGS ${CASSANDRA_PATH}/logs # https://datastax.jira.com/browse/DB-4627 # https://issues.apache.org/jira/browse/CASSANDRA-16027 ENV MGMT_API_LOG_DIR /var/log/cassandra +ENV MGMT_API_HEAP_SIZE 16m COPY --from=builder /build/management-api-agent-dse-6.8/target/datastax-mgmtapi-agent-dse-6.8-0.1.0-SNAPSHOT.jar ${MAAC_PATH}/datastax-mgmtapi-agent-0.1.0-SNAPSHOT.jar COPY --from=builder /build/management-api-server/target/datastax-mgmtapi-server-0.1.0-SNAPSHOT.jar ${MAAC_PATH}/ diff --git a/Dockerfile-oss b/Dockerfile-oss index af1fd787..58948191 100644 --- a/Dockerfile-oss +++ b/Dockerfile-oss @@ -1,4 +1,4 @@ -ARG CASSANDRA_VERSION=3.11.9 +ARG CASSANDRA_VERSION=3.11.11 FROM --platform=$BUILDPLATFORM maven:3.6.3-jdk-8-slim as builder @@ -56,6 +56,7 @@ ENV CASSANDRA_LOGS ${CASSANDRA_PATH}/logs # https://datastax.jira.com/browse/DB-4627 # https://issues.apache.org/jira/browse/CASSANDRA-16027 ENV MGMT_API_LOG_DIR /var/log/cassandra +ENV MGMT_API_HEAP_SIZE 16m COPY --from=builder /build/management-api-agent-3.x/target/datastax-mgmtapi-agent-3.x-0.1.0-SNAPSHOT.jar ${MAAC_PATH}/datastax-mgmtapi-agent-0.1.0-SNAPSHOT.jar COPY --from=builder /build/management-api-server/target/datastax-mgmtapi-server-0.1.0-SNAPSHOT.jar ${MAAC_PATH}/ diff --git a/README.md b/README.md index 42586986..6cd4fd9f 100644 --- a/README.md +++ b/README.md @@ -1,117 +1,155 @@ # Management API for Apache Cassandra® -![Java CI](https://github.com/datastax/management-api-for-apache-cassandra/workflows/Java%20CI/badge.svg) -![Docker Release](https://github.com/datastax/management-api-for-apache-cassandra/workflows/Docker%20Release/badge.svg) +![Java CI](https://github.com/k8ssandra/management-api-for-apache-cassandra/workflows/Java%20CI/badge.svg) +![Docker Release](https://github.com/k8ssandra/management-api-for-apache-cassandra/workflows/Docker%20Release/badge.svg) ## Introduction - Cassandra® operations have historically been command line driven. - The management of operational tools for Apache Cassandra® have been mostly - outsourced to teams who manage their specific environments. - + Cassandra operations have historically been command line driven. + The management of operational tools for Apache Cassandra have been mostly + outsourced to teams who manage their specific environments. + The result is a fragmented and tribal set of best practices, workarounds, and edge cases. - + The Management API is a sidecar service layer that attempts to build a well supported - set of operational actions on Cassandra® nodes that can be administered centrally. - It currently works with official [Apache Cassandra®](https://cassandra.apache.org) 3.11.x an 4.0 + set of operational actions on Cassandra nodes that can be administered centrally. + It currently works with official [Apache Cassandra](https://cassandra.apache.org) 3.11.x and 4.0 via a drop in java agent. - - * Lifecycle Management + + * Lifecycle Management * Start Node * Stop Node * Configuration Management (alpha) * Change YAML * Change jvm-opts - * Health Checks + * Health Checks * Kubernetes liveness/readiness checks - * Consistency level checks - * Per node actions + * Consistency level checks + * Per node actions * All nodetool commands - + ## Design Principles * Secure by default * Simple to use and extend * CQL Only for all C* interactions * Operations: Use `CALL` method for invoking via CQL - * Observations: Rely on System Views - - The Management API has no configuration file. Rather, it can only be configured from a - small list of command line flags. Communication by default can only be via **unix socket** + * Observations: Rely on System Views + + The Management API has no configuration file. Rather, it can only be configured from a + small list of command line flags. Communication by default can only be via **unix socket** or via a **http(s) endpoint** with optional TLS client auth. - - In a containerized setting the Management API represents **PID 1** and will be - responsible for the lifecycle of Cassandra® via the API. - - Communication between the Management API and Cassandra® is via a local **unix socket** using - CQL as it's only protocol. This means, out of the box Cassandra® can be started + + In a containerized setting the Management API represents **PID 1** and will be + responsible for the lifecycle of Cassandra via the API. + + Communication between the Management API and Cassandra is via a local **unix socket** using + CQL as it's only protocol. This means, out of the box Cassandra can be started securely with no open ports! Also, using CQL only means operators can execute operations via CQL directly if they wish. - + Each Management API is responsible for the local node only. Coordination across nodes is up to the caller. That being said, complex health checks can be added via CQL. - + ## Building ### Containers - -First you need to build the Management API base image: - + +First you need to build the Management API base image + +(*Deprecated: For Cassandra 3.11 and 4.0 images, as well as DSE 6.8 images, you do not need to build the Management API builder image*): + docker build -t management-api-for-apache-cassandra-builder -f ./Dockerfile-build . -Then you need to build the image based on the actual Cassandra® version, either the 3.11 or 4.0: +Then you need to build the image based on the actual Cassandra version, either the 3.11 or 4.0: -**NOTE:** For building 3.11 images, you will need to have the [Docker buildx plugin](https://docs.docker.com/buildx/working-with-buildx/) installed. +**NOTE:** For building 3.11 and 4.0 images, you will need to have the [Docker buildx plugin](https://docs.docker.com/buildx/working-with-buildx/) installed. - #Create a docker image with management api and C* 3.11 (version 3.11.7 and newer are supported, replace `3.11.10` with the version you want below) - docker buildx build --load --build-arg CASSANDRA_VERSION=3.11.10 --tag mgmtapi-3_11 --file Dockerfile-oss --target oss311 --platform linux/amd64 . - - #Create a docker image with management api and C* 4.0 - docker build -t mgmtapi-4_0 -f Dockerfile-4_0 . + #Create a docker image with management api and C* 3.11 (version 3.11.7 and newer are supported, replace `3.11.11` with the version you want below) + docker buildx build --load --build-arg CASSANDRA_VERSION=3.11.11 --tag mgmtapi-3_11 --file Dockerfile-oss --target oss311 --platform linux/amd64 . + + #Create a docker image with management api and C* 4.0 (version 4.0.0 and 4.0.1 are supported) + docker buildx build --load --build-arg CASSANDRA_VERSION=4.0.1 --tag mgmtapi-4_0 --file Dockerfile-4_0 --target oss40 --platform linux/amd64 . + +You can also build an image based on Datastax Astra Cassandra 4.0 sources. First checkout [sources](https://github.com/datastax/cassandra/tree/astra) and build a tgz distribution: -You can also build an image based on Datastax Astra Cassandra® 4.0 sources. First checkout [sources](https://github.com/datastax/cassandra/tree/astra) and build a tgz distribution: - ant artifacts - + Then copy the tgz archive into the astra-4.0 directory of the Management API sources and run: - + cd astra-4.0 docker build -t datastax/astra:4.0 . - + Finally build the Management API image: - + cd .. docker build -t mgmtapi-astra-4_0 -f Dockerfile-astra-4_0 . - + ### Standalone - + mvn -DskipTests package mvn test - + mvn integration-test -Drun311tests=true -Drun40tests=true + +**NOTE 1:** Running ````integration-test````s will also run unit tests. + +**NOTE 2:** Running ````integration-test````s requires at least one of ````-Drun311tests````, ````-Drun40tests```` or ````-DrunDSEtests```` to be set to ````true```` (you can set any combination of them to ````true````). + +**NOTE 3:** In order to run DSE integration tests, you must also enable the ````dse```` profile: + + mvn integration-test -P dse -DrunDSEtests=true + ## REST API - [The current Swagger/OpenAPI documentation](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/datastax/management-api-for-apache-cassandra/master/management-api-server/doc/openapi.json&nocors) - + [The current Swagger/OpenAPI documentation](https://redocly.github.io/redoc/?url=https://raw.githubusercontent.com/k8ssandra/management-api-for-apache-cassandra/master/management-api-server/doc/openapi.json&nocors) + Also readable from url root: ````/openapi.json```` - + ## Usage - The latest releases are on Docker Hub: - [Management API for Apache Cassandra® 3.11.7](https://hub.docker.com/repository/docker/datastax/cassandra-mgmtapi-3_11_7) and - [Management API for Apache Cassandra® 4.0 alpha3](https://hub.docker.com/repository/docker/datastax/cassandra-mgmtapi-4_0_0). - + As of v0.1.24, Management API Docker images for Apache Cassandra are consolidated into a single image repository here: + + - [Management API for Apache Cassandra](https://hub.docker.com/repository/docker/k8ssandra/cass-management-api) + + For different Cassandra versions, you will need to specify the Cassandra version as an image tag. The following lists the currently supported versions + + k8ssandra/cass-management-api:3.11.7 + k8ssandra/cass-management-api:3.11.8 + k8ssandra/cass-management-api:3.11.9 (**Deprecated: last version is v0.1.27**) + k8ssandra/cass-management-api:3.11.10 (**Deprecated: last version is v0.1.27**) + k8ssandra/cass-management-api:3.11.11 + k8ssandra/cass-management-api:4.0.0 + k8ssandra/cass-management-api:4.0.1 + + Each of the above examples will always point to the **latest** Management API version for the associated Cassandra version. If you want a specific + Management API version, you can append the desired version to the Cassandra version tag. For example, if you want v0.1.24 of Management API for Cassandra version 3.11.9: + + docker pull k8ssandra/cass-management-api:3.11.9-v0.1.24 + + For Management API versions v0.1.23 and lower, you will need to use the old Docker repositories, which are Cassandra version specific: + + - [Management API for Apache Cassandra 3.11.7](https://hub.docker.com/repository/docker/datastax/cassandra-mgmtapi-3_11_7) + - [Management API for Apache Cassandra 3.11.8](https://hub.docker.com/repository/docker/datastax/cassandra-mgmtapi-3_11_8) + - [Management API for Apache Cassandra 3.11.9](https://hub.docker.com/repository/docker/datastax/cassandra-mgmtapi-3_11_9) + - [Management API for Apache Cassandra 3.11.10](https://hub.docker.com/repository/docker/datastax/cassandra-mgmtapi-3_11_10) + - [Management API for Apache Cassandra 4.0-beta4](https://hub.docker.com/repository/docker/datastax/cassandra-mgmtapi-4_0_0). + + For DSE Docker images, the location remains unchanged + + - [Management API for DSE 6.8](https://hub.docker.com/repository/docker/datastax/dse-mgmtapi-6_8) + For running standalone the jars can be downloaded from the github release: - [Management API Releases Zip](https://github.com/datastax/management-api-for-apache-cassandra/releases) + [Management API Releases Zip](https://github.com/k8ssandra/management-api-for-apache-cassandra/releases) + + The Management API can be run as a standalone service or along with the Kubernetes + [cass-operator](https://github.com/datastax/cass-operator). - The Management API can be run as a standalone service or along with the Kubernetes - [cass-operator](https://github.com/datastax/cass-operator). - The Management API is configured from the CLI. To start the service with a C* version built above, run: - - > docker run -p 8080:8080 -it --rm mgmtapi-4_0 - + + > docker run -p 8080:8080 -it --rm mgmtapi-4_0 + > curl http://localhost:8080/api/v0/probes/liveness OK - + # Check service and C* are running > curl http://localhost:8080/api/v0/probes/readiness OK @@ -124,8 +162,6 @@ Once you have DSE jars published locally, follow these steps: # The builder image needs to have Maven settings.xml (that provides access to Artifactory): cp $HOME/.m2/settings.xml $PWD -docker build -t management-api-for-dse-builder -f ./Dockerfile-build-dse . - docker build -t mgmtapi-dse -f Dockerfile-dse-68 . docker run -p 8080:8080 -it --rm mgmtapi-dse @@ -137,34 +173,34 @@ docker run -p 8080:8080 -it --rm mgmtapi-dse To start the service with a locally installed C* or DSE instance, you would run the below commands. The Management API will figure out through `--db-home` whether it points to a C* or DSE folder - + # REQUIRED: Add management api agent to C*/DSE startup > export JVM_EXTRA_OPTS="-javaagent:$PWD/management-api-agent/target/datastax-mgmtapi-agent-0.1.0-SNAPSHOT.jar" - + > alias mgmtapi="java -jar management-api-server/target/datastax-mgmtapi-server-0.1.0-SNAPSHOT.jar" - + # Start the service with a local unix socket only, you could also pass -H http://localhost:8080 to expose a port > mgmtapi --db-socket=/tmp/db.sock --host=unix:///tmp/mgmtapi.sock --db-home= - + # Cassandra/DSE will be started by the service by default unless you pass --explicit-start flag - + # Check the service is up > curl --unix-socket /tmp/mgmtapi.sock http://localhost/api/v0/probes/liveness - OK - + OK + # Check C*/DSE is up - > curl --unix-socket /tmp/mgmtapi.sock http://localhost/api/v0/probes/readiness + > curl --unix-socket /tmp/mgmtapi.sock http://localhost/api/v0/probes/readiness OK - + # Stop C*/DSE curl -XPOST --unix-socket /tmp/mgmtapi.sock http://localhost/api/v0/lifecycle/stop OK # CLI Help The CLI help covers the different options: - + mgmtapi --help - + NAME cassandra-management-api - REST service for managing an Apache Cassandra or DSE node @@ -238,15 +274,15 @@ docker run -p 8080:8080 -it --rm mgmtapi-dse This options value must be a path on the file system that must be readable. - + COPYRIGHT Copyright (c) DataStax 2020 - + LICENSE Please see https://www.apache.org/licenses/LICENSE-2.0 for more information - + ## Roadmap * CQL based configuration changes * Configuration as system table @@ -260,3 +296,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +## Dependencies + +For information on the packaged dependencies of the Management API for Apache Cassandra® and their licenses, check out our [open source report](https://app.fossa.com/reports/1aa499fc-6878-4ad3-b61c-350258c0605b). diff --git a/management-api-agent-3.x/pom.xml b/management-api-agent-3.x/pom.xml index 06c7a606..df5aa86b 100644 --- a/management-api-agent-3.x/pom.xml +++ b/management-api-agent-3.x/pom.xml @@ -12,10 +12,9 @@ datastax-mgmtapi-agent-3.x - 3.11.10 + 3.11.11 1.10.10 3.1.5 - 4.10.0 build_version.sh 4.13.1 @@ -50,7 +49,7 @@ org.apache.cassandra cassandra-all - ${cassandra.version} + ${cassandra3.version} commons-codec @@ -59,11 +58,6 @@ provided - - com.datastax.oss - java-driver-query-builder - ${driver.version} - junit junit diff --git a/management-api-agent-4.x/pom.xml b/management-api-agent-4.x/pom.xml index 39ac6552..3af44fab 100644 --- a/management-api-agent-4.x/pom.xml +++ b/management-api-agent-4.x/pom.xml @@ -12,45 +12,54 @@ datastax-mgmtapi-agent-4.x - 4.0-beta4 + 4.0.0 1.10.10 3.1.5 - 4.10.0 build_version.sh 4.13.1 + + + com.datastax + datastax-mgmtapi-common + ${project.version} + + + com.datastax + datastax-mgmtapi-agent-common + ${project.version} + + + net.bytebuddy + byte-buddy + ${bytebuddy.version} + + + net.bytebuddy + byte-buddy-agent + ${bytebuddy.version} + + + junit + junit + ${junit.version} + test + + + + default true - - com.datastax - datastax-mgmtapi-common - ${project.version} - - - com.datastax - datastax-mgmtapi-agent-common - ${project.version} - - - net.bytebuddy - byte-buddy - ${bytebuddy.version} - - - net.bytebuddy - byte-buddy-agent - ${bytebuddy.version} - org.apache.cassandra cassandra-all - ${cassandra.version} + ${cassandra4.version} commons-codec @@ -59,16 +68,25 @@ provided + + + + dse-db-all + + false + + - com.datastax.oss - java-driver-query-builder - ${driver.version} - - - junit - junit - ${junit.version} - test + com.datastax.dse + dse-db-all + ${cassandra4.version} + + + commons-codec + * + + + provided diff --git a/management-api-agent-4.x/src/main/java/com/datastax/mgmtapi/shim/CassandraAPI4x.java b/management-api-agent-4.x/src/main/java/com/datastax/mgmtapi/shim/CassandraAPI4x.java index 2b69b33f..ba182451 100644 --- a/management-api-agent-4.x/src/main/java/com/datastax/mgmtapi/shim/CassandraAPI4x.java +++ b/management-api-agent-4.x/src/main/java/com/datastax/mgmtapi/shim/CassandraAPI4x.java @@ -30,7 +30,6 @@ import io.netty.channel.ChannelInitializer; import org.apache.cassandra.auth.IRoleManager; import org.apache.cassandra.config.DatabaseDescriptor; -import org.apache.cassandra.cql3.CQLStatement; import org.apache.cassandra.db.ConsistencyLevel; import org.apache.cassandra.db.Keyspace; import org.apache.cassandra.db.compaction.CompactionManager; @@ -111,7 +110,7 @@ public Map, List> checkConsistencyLevel(String consistencyLev Token midpoint = partitioner.midpoint(range.left, range.right); EndpointsForRange endpoints = mockStrategy.calculateNaturalReplicas(midpoint, tokenMetadata); - if (!ReplicaPlans.isSufficientLiveReplicasForRead(mockKs, cl, endpoints)) + if (!ReplicaPlans.isSufficientLiveReplicasForRead(mockKs.getReplicationStrategy(), cl, endpoints)) { List downEndpoints = new ArrayList<>(); for (InetAddressAndPort endpoint : endpoints.endpoints()) @@ -122,7 +121,7 @@ public Map, List> checkConsistencyLevel(String consistencyLev downEndpoints.add(endpoint.toString()); } - int blockFor = cl.blockFor(mockKs); + int blockFor = cl.blockFor(mockKs.getReplicationStrategy()); if (downEndpoints.isEmpty() && endpoints.size() < blockFor) downEndpoints.add(String.format("%d replicas required, but only %d nodes in the ring", blockFor, endpoints.size())); diff --git a/management-api-agent-4.x/src/main/java/com/datastax/mgmtapi/shim/RpcStatement.java b/management-api-agent-4.x/src/main/java/com/datastax/mgmtapi/shim/RpcStatement.java index 72b4c63c..cf30b8f4 100644 --- a/management-api-agent-4.x/src/main/java/com/datastax/mgmtapi/shim/RpcStatement.java +++ b/management-api-agent-4.x/src/main/java/com/datastax/mgmtapi/shim/RpcStatement.java @@ -1,16 +1,8 @@ package com.datastax.mgmtapi.shim; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; - import com.datastax.mgmtapi.shims.RpcStatementShim; import org.apache.cassandra.audit.AuditLogContext; -import org.apache.cassandra.cql3.CQLStatement; -import org.apache.cassandra.cql3.ColumnSpecification; import org.apache.cassandra.cql3.QueryOptions; -import org.apache.cassandra.exceptions.InvalidRequestException; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.QueryState; import org.apache.cassandra.transport.messages.ResultMessage; diff --git a/management-api-agent-common/README.md b/management-api-agent-common/README.md index da77f02d..12dfad7d 100644 --- a/management-api-agent-common/README.md +++ b/management-api-agent-common/README.md @@ -1,6 +1,6 @@ # Management API Agent -The Agent is the bridge between the Management API and the instance of Cassandra or DSE it controls. +The Agent is the bridge between the Management API and the instance of Apache Cassandra™ or DSE it controls. This is accomplished by adding the agent jarfile to the startup options of the server with the `-javaagent` directive. See the [main README](../README.md#using-the-service-with-a-locally-installed-c-or-dse-instance) as an example. diff --git a/management-api-agent-common/pom.xml b/management-api-agent-common/pom.xml index a3ab7ed0..9f59438c 100644 --- a/management-api-agent-common/pom.xml +++ b/management-api-agent-common/pom.xml @@ -12,10 +12,9 @@ datastax-mgmtapi-agent-common - 3.11.10 + 3.11.11 1.10.10 3.1.5 - 4.10.0 build_version.sh 4.13.1 @@ -45,7 +44,7 @@ org.apache.cassandra cassandra-all - ${cassandra.version} + ${cassandra3.version} commons-codec diff --git a/management-api-agent-common/src/main/java/com/datastax/mgmtapi/NodeOpsProvider.java b/management-api-agent-common/src/main/java/com/datastax/mgmtapi/NodeOpsProvider.java index 8561607a..e0ac9c70 100644 --- a/management-api-agent-common/src/main/java/com/datastax/mgmtapi/NodeOpsProvider.java +++ b/management-api-agent-common/src/main/java/com/datastax/mgmtapi/NodeOpsProvider.java @@ -38,6 +38,8 @@ import org.apache.cassandra.auth.RoleOptions; import org.apache.cassandra.auth.RoleResource; import org.apache.cassandra.db.ConsistencyLevel; +import org.apache.cassandra.repair.RepairParallelism; +import org.apache.cassandra.repair.messages.RepairOption; /** * Replace JMX calls with CQL 'CALL' methods via the the Rpc framework @@ -328,6 +330,12 @@ public List>>> getStreamInfo() return ShimLoader.instance.get().getStreamInfo(); } + @Rpc(name = "getKeyspaces") + public List getKeyspaces() + { + return ShimLoader.instance.get().getKeyspaces(); + } + @Rpc(name = "createKeyspace") public void createKeyspace(@RpcParam(name="keyspaceName") String keyspaceName, @RpcParam(name="replicationSettings") Map replicationSettings) throws IOException { @@ -452,4 +460,31 @@ public void clearSnapshots(@RpcParam(name="snapshotNames") List snapshot } } + @Rpc(name = "repair") + public void repair(@RpcParam(name="keyspaceName") String keyspace, @RpcParam(name="tables") List tables, @RpcParam(name="full") Boolean full) throws IOException + { + // At least one keyspace is required + if (keyspace != null) + { + // create the repair spec + Map repairSpec = new HashMap<>(); + + // add any specified tables to the repair spec + if (tables != null && !tables.isEmpty()) + { + // set the tables/column families + repairSpec.put(RepairOption.COLUMNFAMILIES_KEY, String.join(",", tables)); + } + + // handle incremental vs full + boolean isIncremental = Boolean.FALSE.equals(full); + repairSpec.put(RepairOption.INCREMENTAL_KEY, Boolean.toString(isIncremental)); + if (isIncremental) + { + // incremental repairs will fail if parallelism is not set + repairSpec.put(RepairOption.PARALLELISM_KEY, RepairParallelism.PARALLEL.getName()); + } + ShimLoader.instance.get().getStorageService().repairAsync(keyspace, repairSpec); + } + } } diff --git a/management-api-agent-common/src/main/java/com/datastax/mgmtapi/interceptors/SystemDistributedReplicationInterceptor.java b/management-api-agent-common/src/main/java/com/datastax/mgmtapi/interceptors/SystemDistributedReplicationInterceptor.java index a4d53c91..44f943e6 100644 --- a/management-api-agent-common/src/main/java/com/datastax/mgmtapi/interceptors/SystemDistributedReplicationInterceptor.java +++ b/management-api-agent-common/src/main/java/com/datastax/mgmtapi/interceptors/SystemDistributedReplicationInterceptor.java @@ -15,6 +15,7 @@ import com.google.common.collect.ImmutableMap; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import net.bytebuddy.agent.builder.AgentBuilder; @@ -34,19 +35,78 @@ public class SystemDistributedReplicationInterceptor { + private static final Logger LOGGER = LoggerFactory.getLogger(SystemDistributedReplicationInterceptor.class); private static final String SYSTEM_DISTRIBUTED_NTS_DC_OVERRIDE_PROPERTY = "cassandra.system_distributed_replication_dc_names"; private static final String SYSTEM_DISTRIBUTED_NTS_RF_OVERRIDE_PROPERTY = "cassandra.system_distributed_replication_per_dc"; + /** + * This DC_RF property is used to specify different RF per DC, instead of a single RF for all DCs + * The format of the property value should be :,:,.... + * + * ex. cassandra.system_distributed_replication=dc1:1,dc2:3,dc3:3 + * + * If both this override and either of the above overrides are present, this value will take + * precedence. + */ + private static final String SYSTEM_DISTRIBUTED_NTS_DC_RF_OVERRIDE_PROPERTY = "cassandra.system_distributed_replication"; + + private static Map parseDcRfOverrides() + { + Map dcRfOverrides = null; + try + { + if (System.getProperty(SYSTEM_DISTRIBUTED_NTS_DC_RF_OVERRIDE_PROPERTY) != null) + { + dcRfOverrides = new HashMap<>(); + String mappings = System.getProperty(SYSTEM_DISTRIBUTED_NTS_DC_RF_OVERRIDE_PROPERTY); + for (String mapping : mappings.split(",")) + { + String map = mapping.trim(); + List parts = Arrays.stream(map.split(":")) + .map(String::trim) + .collect(Collectors.toList()); + if (parts.size() != 2) + { + LOGGER.error("Invalid dc-rf mapping for {}: {}", + SYSTEM_DISTRIBUTED_NTS_DC_RF_OVERRIDE_PROPERTY, + mapping); + } + else { + String dc = parts.get(0); + Integer rf = Integer.parseInt(parts.get(1)); + if (rf <= 0 || rf > 5) + { + LOGGER.error("Invalid repliction factor specified for {}: {}", + SYSTEM_DISTRIBUTED_NTS_DC_RF_OVERRIDE_PROPERTY, + mapping); + } + else + { + dcRfOverrides.put(dc, rf.toString()); + } + } + } + } + } + catch (Throwable t) + { + LOGGER.error("Error parsing system distributed replication override properties", t); + } + + return dcRfOverrides; + } private static final Map SYSTEM_DISTRIBUTED_NTS_OVERRIDE; static { Integer rfOverride = null; List dcOverride = Collections.emptyList(); + Map dcRfOverrides = null; Map ntsOverride = new HashMap<>(); ntsOverride.put(ReplicationParams.CLASS, NetworkTopologyStrategy.class.getSimpleName()); try { + dcRfOverrides = parseDcRfOverrides(); rfOverride = Integer.getInteger(SYSTEM_DISTRIBUTED_NTS_RF_OVERRIDE_PROPERTY, null); dcOverride = Arrays.stream(System.getProperty(SYSTEM_DISTRIBUTED_NTS_DC_OVERRIDE_PROPERTY, "") .split(",")) @@ -55,22 +115,27 @@ public class SystemDistributedReplicationInterceptor } catch (Throwable t) { - LoggerFactory.getLogger(SystemDistributedReplicationInterceptor.class).error("Error parsing system distributed replication override properties", t); + LOGGER.error("Error parsing system distributed replication override properties", t); } - if (rfOverride != null && !dcOverride.isEmpty()) + if (dcRfOverrides != null && !dcRfOverrides.isEmpty()) + { + ntsOverride.putAll(dcRfOverrides); + LOGGER.info("Using override for distributed system keyspaces: {}", ntsOverride); + } + else if (rfOverride != null && !dcOverride.isEmpty()) { //Validate reasonable defaults if (rfOverride <= 0 || rfOverride > 5) { - LoggerFactory.getLogger(SystemDistributedReplicationInterceptor.class).error("Invalid value for {}", SYSTEM_DISTRIBUTED_NTS_RF_OVERRIDE_PROPERTY); + LOGGER.error("Invalid value for {}", SYSTEM_DISTRIBUTED_NTS_RF_OVERRIDE_PROPERTY); } else { for (String dc : dcOverride) ntsOverride.put(dc, String.valueOf(rfOverride)); - LoggerFactory.getLogger(SystemDistributedReplicationInterceptor.class).info("Using override for distributed system keyspaces: {}", ntsOverride); + LOGGER.info("Using override for distributed system keyspaces: {}", ntsOverride); } } diff --git a/management-api-agent-common/src/main/java/com/datastax/mgmtapi/rpc/Rpc.java b/management-api-agent-common/src/main/java/com/datastax/mgmtapi/rpc/Rpc.java index ac557237..b7101f58 100644 --- a/management-api-agent-common/src/main/java/com/datastax/mgmtapi/rpc/Rpc.java +++ b/management-api-agent-common/src/main/java/com/datastax/mgmtapi/rpc/Rpc.java @@ -8,9 +8,6 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; -import org.apache.cassandra.auth.Permission; - - /** * Method annotation for methods that can be called via RPC. * diff --git a/management-api-agent-common/src/main/java/com/datastax/mgmtapi/rpc/RpcExecutionException.java b/management-api-agent-common/src/main/java/com/datastax/mgmtapi/rpc/RpcExecutionException.java index bd737ec2..1174ac8c 100644 --- a/management-api-agent-common/src/main/java/com/datastax/mgmtapi/rpc/RpcExecutionException.java +++ b/management-api-agent-common/src/main/java/com/datastax/mgmtapi/rpc/RpcExecutionException.java @@ -7,7 +7,6 @@ import java.lang.reflect.InvocationTargetException; import java.security.AccessControlException; -import java.util.Optional; import com.google.common.base.Throwables; import org.slf4j.Logger; diff --git a/management-api-agent-dse-6.8/src/main/java/org/apache/cassandra/locator/K8SeedProviderDse68.java b/management-api-agent-dse-6.8/src/main/java/org/apache/cassandra/locator/K8SeedProviderDse68.java index 7a8c7c87..47c0041d 100644 --- a/management-api-agent-dse-6.8/src/main/java/org/apache/cassandra/locator/K8SeedProviderDse68.java +++ b/management-api-agent-dse-6.8/src/main/java/org/apache/cassandra/locator/K8SeedProviderDse68.java @@ -11,7 +11,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/management-api-agent-dse-6.8/src/main/java/org/apache/cassandra/transport/UnixSocketServerDse68.java b/management-api-agent-dse-6.8/src/main/java/org/apache/cassandra/transport/UnixSocketServerDse68.java index 93df686b..08d1b6f5 100644 --- a/management-api-agent-dse-6.8/src/main/java/org/apache/cassandra/transport/UnixSocketServerDse68.java +++ b/management-api-agent-dse-6.8/src/main/java/org/apache/cassandra/transport/UnixSocketServerDse68.java @@ -11,7 +11,6 @@ import io.netty.channel.ChannelPipeline; import io.netty.channel.SimpleChannelInboundHandler; import io.reactivex.Single; -import org.apache.cassandra.auth.AuthManager; import org.apache.cassandra.auth.IAuthenticator; import org.apache.cassandra.config.DatabaseDescriptor; import org.apache.cassandra.service.ClientState; diff --git a/management-api-common/pom.xml b/management-api-common/pom.xml index 887f6aa6..11a8e561 100644 --- a/management-api-common/pom.xml +++ b/management-api-common/pom.xml @@ -15,7 +15,7 @@ 1.7.25 1.2.3 4.1.50.Final - 3.11.10 + 3.11.11 4.13.1 3.5.13 @@ -45,7 +45,7 @@ org.apache.cassandra cassandra-all - ${cassandra.version} + ${cassandra3.version} commons-codec diff --git a/management-api-common/src/main/java/com/datastax/mgmtapi/shims/CassandraAPI.java b/management-api-common/src/main/java/com/datastax/mgmtapi/shims/CassandraAPI.java index 15c057a2..13a09bcf 100644 --- a/management-api-common/src/main/java/com/datastax/mgmtapi/shims/CassandraAPI.java +++ b/management-api-common/src/main/java/com/datastax/mgmtapi/shims/CassandraAPI.java @@ -66,4 +66,8 @@ default Object handleRpcResult(Callable rpcResult) throws Exception RpcStatementShim makeRpcStatement(String method, String[] params); HintsService getHintsService(); + + default List getKeyspaces() { + return StorageService.instance.getKeyspaces(); + } } diff --git a/management-api-server/doc/openapi.json b/management-api-server/doc/openapi.json index 2c1af01a..977bbc7e 100644 --- a/management-api-server/doc/openapi.json +++ b/management-api-server/doc/openapi.json @@ -1,8 +1,8 @@ { "openapi" : "3.0.1", "info" : { - "title" : "Management API for Apache Cassandra", - "description" : "This is a Restful service for operating Apache Cassandra. You can find out more about the Management API on [Github](http://github.com/datastax/management-api-for-apache-cassandra)", + "title" : "Management API for Apache Cassandraâ„¢", + "description" : "This is a Restful service for operating Apache Cassandra. You can find out more about the Management API on [Github](http://github.com/k8ssandra/management-api-for-apache-cassandra)", "license" : { "name" : "Apache 2.0", "url" : "http://www.apache.org/licenses/LICENSE-2.0.html" diff --git a/management-api-server/pom.xml b/management-api-server/pom.xml index 66072182..839c84d8 100644 --- a/management-api-server/pom.xml +++ b/management-api-server/pom.xml @@ -2,14 +2,6 @@ 4.0.0 - - - jcenter - jcenter - https://jcenter.bintray.com/ - - - com.datastax datastax-mgmtapi @@ -23,14 +15,13 @@ 1.7.25 1.2.3 2.1.1 - 19.0 + 30.1.1-jre 2.7.0 2.1.6 4.5.9.Final 4.1.50.Final - 4.10.0 - 3.11.5 - 3.1.1 + 3.11.11 + 3.2.8 4.0.3 3.17.2 4.13.1 @@ -99,17 +90,25 @@ ${driver.version} - com.aries - docker-java-shaded + com.github.docker-java + docker-java ${docker.java.version} test + + + com.github.docker-java + docker-java-transport-jersey + + + com.github.docker-java + docker-java-transport-netty + + - org.apache.cassandra - apache-cassandra - ${cassandra.version} - bin - tar.gz + com.github.docker-java + docker-java-transport-zerodep + ${docker.java.version} test @@ -165,20 +164,43 @@ 1.4.0 - org.apache.maven.plugins - maven-dependency-plugin - 2.5.1 + maven-antrun-plugin + 3.0.0 - process-test-resources + check.cassandra.bin + process-resources + + + + + + + + + true + - unpack-dependencies + run + + + download.cassandra.bin + process-test-resources - apache-cassandra - tar.gz - target + + + + + + + ${skipTests} + + run + @@ -190,7 +212,7 @@ ${dseIncluded} ${basedir}/.. - ${basedir}/target/apache-cassandra-${cassandra.version} + ${basedir}/.cassandra-bin/apache-cassandra-${cassandra3.version} -javaagent:${basedir}/../management-api-agent/target/datastax-mgmtapi-agent-3.x-${project.version}.jar diff --git a/management-api-server/src/main/java/com/datastax/mgmtapi/resources/KeyspaceOpsResources.java b/management-api-server/src/main/java/com/datastax/mgmtapi/resources/KeyspaceOpsResources.java index b4c5f6bb..1b0ade04 100644 --- a/management-api-server/src/main/java/com/datastax/mgmtapi/resources/KeyspaceOpsResources.java +++ b/management-api-server/src/main/java/com/datastax/mgmtapi/resources/KeyspaceOpsResources.java @@ -7,7 +7,10 @@ import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; + import javax.ws.rs.Consumes; +import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.Produces; @@ -24,6 +27,10 @@ import com.datastax.mgmtapi.ManagementApplication; import com.datastax.mgmtapi.resources.models.CreateOrAlterKeyspaceRequest; import com.datastax.mgmtapi.resources.models.KeyspaceRequest; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.fasterxml.jackson.databind.ObjectMapper; + import io.swagger.v3.oas.annotations.Operation; import org.apache.http.HttpStatus; @@ -31,6 +38,7 @@ public class KeyspaceOpsResources { private static final Logger logger = LoggerFactory.getLogger(KeyspaceOpsResources.class); + private static final ObjectMapper jsonMapper = new ObjectMapper(); private final ManagementApplication app; private final CqlService cqlService; @@ -147,4 +155,27 @@ public Response alter(CreateOrAlterKeyspaceRequest createOrAlterKeyspaceRequest) return Response.ok("OK").build(); }); } + + @GET + @Produces(MediaType.APPLICATION_JSON) + @Consumes(MediaType.APPLICATION_JSON) + @Operation(summary = "List the keyspaces existing in the cluster") + public Response list(@QueryParam(value="keyspaceName")String keyspaceName) + { + return NodeOpsResources.handle(() -> + { + ResultSet result = cqlService.executePreparedStatement(app.dbUnixSocketFile, "CALL NodeOps.getKeyspaces()"); + Row row = result.one(); + List keyspaces = null; + if (row != null) + { + List allKeyspaces = row.getList(0, String.class); + keyspaces = allKeyspaces.stream() + .filter(ks -> ks.equals(keyspaceName) || StringUtils.isBlank(keyspaceName)) + .collect(Collectors.toList()); + } + + return Response.ok(jsonMapper.writeValueAsString(keyspaces), MediaType.APPLICATION_JSON).build(); + }); + } } diff --git a/management-api-server/src/main/java/com/datastax/mgmtapi/resources/NodeOpsResources.java b/management-api-server/src/main/java/com/datastax/mgmtapi/resources/NodeOpsResources.java index 48faf4f1..4a65508b 100644 --- a/management-api-server/src/main/java/com/datastax/mgmtapi/resources/NodeOpsResources.java +++ b/management-api-server/src/main/java/com/datastax/mgmtapi/resources/NodeOpsResources.java @@ -30,6 +30,7 @@ import com.datastax.mgmtapi.CqlService; import com.datastax.mgmtapi.ManagementApplication; +import com.datastax.mgmtapi.resources.models.RepairRequest; import com.datastax.mgmtapi.resources.models.TakeSnapshotRequest; import com.google.common.collect.ImmutableList; @@ -324,6 +325,29 @@ public Response clearSnapshots(@QueryParam(value="snapshotNames") List s }); } + @POST + @Path("/repair") + @Produces(MediaType.TEXT_PLAIN) + @Operation(summary = "Perform a nodetool repair") + public Response repair(RepairRequest repairRequest) + { + return handle(() -> + { + if (repairRequest.keyspaceName == null) + { + return Response.status(Response.Status.BAD_REQUEST).entity("keyspaceName must be specified").build(); + } + cqlService.executePreparedStatement( + app.dbUnixSocketFile, + "CALL NodeOps.repair(?, ?, ?)", + repairRequest.keyspaceName, + repairRequest.tables, + repairRequest.full); + + return Response.ok("OK").build(); + }); + } + static Response handle(Callable action) { try @@ -340,5 +364,4 @@ static Response handle(Callable action) return Response.status(HttpStatus.SC_INTERNAL_SERVER_ERROR).entity(t.getLocalizedMessage()).build(); } } - } diff --git a/management-api-server/src/main/java/com/datastax/mgmtapi/resources/models/RepairRequest.java b/management-api-server/src/main/java/com/datastax/mgmtapi/resources/models/RepairRequest.java new file mode 100644 index 00000000..225b4a22 --- /dev/null +++ b/management-api-server/src/main/java/com/datastax/mgmtapi/resources/models/RepairRequest.java @@ -0,0 +1,73 @@ +/** + * Copyright DataStax, Inc. + * + * Please see the included license file for details. + */ +package com.datastax.mgmtapi.resources.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; +import java.util.Objects; + + +public class RepairRequest +{ + + @JsonProperty(value = "keyspace_name", required = true) + public final String keyspaceName; + + @JsonProperty(value = "tables", required = false) + public final List tables; + + @JsonProperty(value = "full", required = false) + public final Boolean full; + + @JsonCreator + public RepairRequest(@JsonProperty("keyspace_name") String keyspaceName, @JsonProperty("tables") List tables, @JsonProperty(value = "full") Boolean full) + { + this.keyspaceName = keyspaceName; + this.tables = tables; + this.full = full == null ? Boolean.FALSE : full; + } + + @Override + public int hashCode() { + int hash = 7; + hash = 89 * hash + Objects.hashCode(this.keyspaceName); + hash = 89 * hash + Objects.hashCode(this.tables); + hash = 89 * hash + Objects.hashCode(this.full); + return hash; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + { + return true; + } + if (obj == null) + { + return false; + } + if (getClass() != obj.getClass()) + { + return false; + } + + final RepairRequest other = (RepairRequest) obj; + if (!Objects.equals(this.keyspaceName, other.keyspaceName)) + { + return false; + } + if (!Objects.equals(this.tables, other.tables)) + { + return false; + } + if (!Objects.equals(this.full, other.full)) + { + return false; + } + return true; + } +} diff --git a/management-api-server/src/main/resources/openapi-configuration.json b/management-api-server/src/main/resources/openapi-configuration.json index b0b01ea8..b1bb045a 100644 --- a/management-api-server/src/main/resources/openapi-configuration.json +++ b/management-api-server/src/main/resources/openapi-configuration.json @@ -7,8 +7,8 @@ "openAPI": { "info": { "version": "0.1", - "title": "Management API for Apache Cassandra", - "description": "This is a Restful service for operating Apache Cassandra. You can find out more about the Management API on [Github](http://github.com/datastax/management-api-for-apache-cassandra)", + "title": "Management API for Apache Cassandraâ„¢", + "description": "This is a Restful service for operating Apache Cassandra. You can find out more about the Management API on [Github](http://github.com/k8ssandra/management-api-for-apache-cassandra)", "license": { "name": "Apache 2.0", "url": "http://www.apache.org/licenses/LICENSE-2.0.html" diff --git a/management-api-server/src/test/java/com/datastax/mgmtapi/K8OperatorResourcesTest.java b/management-api-server/src/test/java/com/datastax/mgmtapi/K8OperatorResourcesTest.java index 79ac67d6..cb1ac0ee 100644 --- a/management-api-server/src/test/java/com/datastax/mgmtapi/K8OperatorResourcesTest.java +++ b/management-api-server/src/test/java/com/datastax/mgmtapi/K8OperatorResourcesTest.java @@ -25,6 +25,7 @@ import com.datastax.mgmtapi.resources.models.CompactRequest; import com.datastax.mgmtapi.resources.models.CreateOrAlterKeyspaceRequest; import com.datastax.mgmtapi.resources.models.KeyspaceRequest; +import com.datastax.mgmtapi.resources.models.RepairRequest; import com.datastax.mgmtapi.resources.models.ReplicationSetting; import com.datastax.mgmtapi.resources.models.ScrubRequest; import com.datastax.mgmtapi.resources.models.TakeSnapshotRequest; @@ -1237,4 +1238,77 @@ public void testDeleteSnapshotDetails() throws Exception Assert.assertEquals(HttpStatus.SC_OK, response.getStatus()); verify(context.cqlService).executePreparedStatement(any(), eq("CALL NodeOps.clearSnapshots(?, ?)"), any()); } + + @Test + public void testGetKeyspaces() throws Exception + { + Context context = setup(); + ResultSet mockResultSet = mock(ResultSet.class); + Row mockRow = mock(Row.class); + + MockHttpRequest request = MockHttpRequest.get(ROOT_PATH + "/ops/keyspace"); + when(context.cqlService.executePreparedStatement(any(), anyString())).thenReturn(mockResultSet); + when(mockResultSet.one()).thenReturn(mockRow); + List result = Arrays.asList("system_auth", "system", "system_distributed"); + String resultAsJSON = WriterUtility.asString(result, MediaType.APPLICATION_JSON); + when(mockRow.getList(0, String.class)).thenReturn(result); + + MockHttpResponse response = context.invoke(request); + + Assert.assertEquals(HttpStatus.SC_OK, response.getStatus()); + Assert.assertTrue(response.getContentAsString().contains(resultAsJSON)); + verify(context.cqlService).executePreparedStatement(any(), eq("CALL NodeOps.getKeyspaces()")); + } + + @Test + public void testGetKeyspacesWithFilter() throws Exception + { + Context context = setup(); + ResultSet mockResultSet = mock(ResultSet.class); + Row mockRow = mock(Row.class); + + MockHttpRequest request = MockHttpRequest.get(ROOT_PATH + "/ops/keyspace?keyspaceName=system"); + when(context.cqlService.executePreparedStatement(any(), anyString())).thenReturn(mockResultSet); + when(mockResultSet.one()).thenReturn(mockRow); + List result = Arrays.asList("system_auth", "system", "system_distributed"); + List filteredResult = Arrays.asList("system"); + String filteredResultAsJSON = WriterUtility.asString(filteredResult, MediaType.APPLICATION_JSON); + when(mockRow.getList(0, String.class)).thenReturn(result); + + MockHttpResponse response = context.invoke(request); + + Assert.assertEquals(HttpStatus.SC_OK, response.getStatus()); + Assert.assertTrue(response.getContentAsString().contains(filteredResultAsJSON)); + verify(context.cqlService).executePreparedStatement(any(), eq("CALL NodeOps.getKeyspaces()"), any()); + } + + @Test + public void testRepair() throws Exception + { + Context context = setup(); + when(context.cqlService.executePreparedStatement(any(), anyString())).thenReturn(null); + + RepairRequest repairRequest = new RepairRequest("test_ks", null, Boolean.TRUE); + String repairRequestAsJSON = WriterUtility.asString(repairRequest, MediaType.APPLICATION_JSON); + + MockHttpResponse response = postWithBody("/ops/node/repair", repairRequestAsJSON, context); + + assertThat(response.getStatus()).isEqualTo(HttpStatus.SC_OK); + verify(context.cqlService).executePreparedStatement(any(), eq("CALL NodeOps.repair(?, ?, ?)"), eq("test_ks"), eq(null), eq(true)); + } + + @Test + public void testRepairRequiresKeyspaceName() throws Exception + { + Context context = setup(); + when(context.cqlService.executePreparedStatement(any(), anyString())).thenReturn(null); + + RepairRequest repairRequest = new RepairRequest(null, null, Boolean.TRUE); + String repairRequestAsJSON = WriterUtility.asString(repairRequest, MediaType.APPLICATION_JSON); + + MockHttpResponse response = postWithBody("/ops/node/repair", repairRequestAsJSON, context); + + assertThat(response.getStatus()).isEqualTo(HttpStatus.SC_BAD_REQUEST); + assertThat(response.getContentAsString()).isEqualTo("keyspaceName must be specified"); + } } diff --git a/management-api-server/src/test/java/com/datastax/mgmtapi/KeepAliveIT.java b/management-api-server/src/test/java/com/datastax/mgmtapi/KeepAliveIT.java index d5c4fa9b..a9473d69 100644 --- a/management-api-server/src/test/java/com/datastax/mgmtapi/KeepAliveIT.java +++ b/management-api-server/src/test/java/com/datastax/mgmtapi/KeepAliveIT.java @@ -16,10 +16,8 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import com.datastax.mgmtapi.helpers.DockerHelper; import com.datastax.mgmtapi.helpers.IntegrationTestUtils; import com.datastax.mgmtapi.helpers.NettyHttpClient; -import com.datastax.mgmtapi.util.ShellUtils; import org.apache.http.HttpStatus; import org.awaitility.Awaitility; diff --git a/management-api-server/src/test/java/com/datastax/mgmtapi/LifecycleIT.java b/management-api-server/src/test/java/com/datastax/mgmtapi/LifecycleIT.java index 1744f4a6..a3f164a7 100644 --- a/management-api-server/src/test/java/com/datastax/mgmtapi/LifecycleIT.java +++ b/management-api-server/src/test/java/com/datastax/mgmtapi/LifecycleIT.java @@ -210,7 +210,7 @@ public void testSuperuserWasNotSet() throws IOException ResultSet rs = session.execute(String.format("select replication from system_schema.keyspaces where keyspace_name='%s'", systemKeyspace)); Map params = rs.one().getMap("replication", String.class, String.class); - assertEquals(params.get("dc1"), "1"); + assertEquals("1", params.get("dc1")); } } @@ -223,4 +223,75 @@ public void testSuperuserWasNotSet() throws IOException assertTrue(stopped); } } + + @Test + public void testDcReplicationFatorOverrides() throws IOException + { + assumeTrue(IntegrationTestUtils.shouldRun()); + + boolean ready = false; + NettyHttpClient client = null; + try + { + client = getClient(); + + //Configure + boolean configured = client.post(URI.create( BASE_PATH + "/lifecycle/configure?profile=dcrftest").toURL(), + FileUtils.readFileToString(IntegrationTestUtils.getFile(this.getClass(), "dcrf-override-1.yaml")), "application/yaml") + .thenApply(r -> r.status().code() == HttpStatus.SC_OK).join(); + + assertTrue(configured); + + //Startup + boolean started = client.post(URI.create( BASE_PATH + "/lifecycle/start?profile=dcrftest").toURL(), null) + .thenApply(r -> r.status().code() == HttpStatus.SC_CREATED).join(); + + assertTrue(started); + + int tries = 0; + while (tries++ < 10) + { + ready = client.get(URI.create(BASE_PATH + "/probes/readiness").toURL()) + .thenApply(r -> r.status().code() == HttpStatus.SC_OK).join(); + + if (ready) + break; + + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS); + } + + assertTrue(ready); + + //addRole + boolean roleAdded = client.post(URI.create(BASE_PATH + "/ops/auth/role?username=dcrftest&password=dcrftest&is_superuser=true&can_login=true").toURL(), null) + .thenApply(r -> r.status().code() == HttpStatus.SC_OK).join(); + + // create a session + CqlSession session = new TestgCqlSessionBuilder() + .withConfigLoader(DriverConfigLoader.programmaticBuilder() + .withString(AUTH_PROVIDER_CLASS, PlainTextAuthProvider.class.getCanonicalName()) + .withString(AUTH_PROVIDER_USER_NAME, "dcrftest") + .withString(AUTH_PROVIDER_PASSWORD, "dcrftest") + .withString(LOAD_BALANCING_LOCAL_DATACENTER, "dc1") + .build()) + .addContactPoint(new InetSocketAddress("127.0.0.1", 9042)) + .build(); + + for (String systemKeyspace : Arrays.asList("system_auth", "system_distributed", "system_traces")) + { + ResultSet rs = session.execute(String.format("select replication from system_schema.keyspaces where keyspace_name='%s'", systemKeyspace)); + + Map params = rs.one().getMap("replication", String.class, String.class); + assertEquals("1", params.get("dc1")); + } + } + finally + { + //Stop before next test starts + boolean stopped = client.post(URI.create("http://localhost/api/v0/lifecycle/stop").toURL(), null) + .thenApply(r -> r.status().code() == HttpStatus.SC_OK).join(); + + assertTrue(stopped); + } + } } diff --git a/management-api-server/src/test/java/com/datastax/mgmtapi/NonDestructiveOpsIT.java b/management-api-server/src/test/java/com/datastax/mgmtapi/NonDestructiveOpsIT.java index cbf9169c..2233ef97 100644 --- a/management-api-server/src/test/java/com/datastax/mgmtapi/NonDestructiveOpsIT.java +++ b/management-api-server/src/test/java/com/datastax/mgmtapi/NonDestructiveOpsIT.java @@ -27,9 +27,13 @@ import com.datastax.mgmtapi.resources.models.CompactRequest; import com.datastax.mgmtapi.resources.models.CreateOrAlterKeyspaceRequest; import com.datastax.mgmtapi.resources.models.KeyspaceRequest; +import com.datastax.mgmtapi.resources.models.RepairRequest; import com.datastax.mgmtapi.resources.models.ReplicationSetting; import com.datastax.mgmtapi.resources.models.ScrubRequest; import com.datastax.mgmtapi.resources.models.TakeSnapshotRequest; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; + import io.netty.handler.codec.http.FullHttpResponse; import org.apache.http.HttpStatus; import org.apache.http.client.utils.URIBuilder; @@ -469,6 +473,39 @@ public void testAlterKeyspace() throws IOException, URISyntaxException assertTrue(requestSuccessful); } + @Test + public void testGetKeyspaces() throws IOException, URISyntaxException + { + assumeTrue(IntegrationTestUtils.shouldRun()); + ensureStarted(); + + NettyHttpClient client = new NettyHttpClient(BASE_URL); + String localDc = client.get(new URIBuilder(BASE_PATH + "/metadata/localdc").build().toURL()) + .thenApply(this::responseAsString).join(); + + String ks = "getkeyspacestest"; + createKeyspace(client, localDc, ks); + + + URI uri = new URIBuilder(BASE_PATH + "/ops/keyspace").build(); + String response = client.get(uri.toURL()) + .thenApply(this::responseAsString).join(); + assertNotNull(response); + assertNotEquals("", response); + assertTrue(response.contains(ks)); + + URI uriFilter = new URIBuilder(BASE_PATH + "/ops/keyspace?keyspaceName=" + ks).build(); + String responseFilter = client.get(uriFilter.toURL()) + .thenApply(this::responseAsString).join(); + assertNotNull(responseFilter); + assertNotEquals("", responseFilter); + + final ObjectMapper jsonMapper = new ObjectMapper(); + List keyspaces = jsonMapper.readValue(responseFilter, new TypeReference>(){}); + assertEquals(1, keyspaces.size()); + assertEquals(ks, keyspaces.get(0)); + } + @Test public void testGetSnapshotDetails() throws IOException, URISyntaxException, InterruptedException { @@ -528,6 +565,25 @@ public void testGetSnapshotDetails() throws IOException, URISyntaxException, Int assertTrue(entities.isEmpty()); } + @Test + public void testRepair() throws IOException, URISyntaxException, InterruptedException + { + assumeTrue(IntegrationTestUtils.shouldRun()); + ensureStarted(); + + NettyHttpClient client = new NettyHttpClient(BASE_URL); + + URIBuilder uriBuilder = new URIBuilder(BASE_PATH + "/ops/node/repair"); + URI repairUri = uriBuilder.build(); + + // execute repair + RepairRequest repairRequest = new RepairRequest("system_auth", null, Boolean.TRUE); + String requestAsJSON = WriterUtility.asString(repairRequest, MediaType.APPLICATION_JSON); + + boolean repairSuccessful = client.post(repairUri.toURL(), requestAsJSON).thenApply(r -> r.status().code() == HttpStatus.SC_OK).join(); + assertTrue("Repair request was not successful", repairSuccessful); + } + private void createKeyspace(NettyHttpClient client, String localDc, String keyspaceName) throws IOException, URISyntaxException { CreateOrAlterKeyspaceRequest request = new CreateOrAlterKeyspaceRequest(keyspaceName, Arrays.asList(new ReplicationSetting(localDc, 1))); diff --git a/management-api-server/src/test/java/com/datastax/mgmtapi/helpers/DockerHelper.java b/management-api-server/src/test/java/com/datastax/mgmtapi/helpers/DockerHelper.java index 30b845b8..35318572 100644 --- a/management-api-server/src/test/java/com/datastax/mgmtapi/helpers/DockerHelper.java +++ b/management-api-server/src/test/java/com/datastax/mgmtapi/helpers/DockerHelper.java @@ -26,7 +26,10 @@ import org.slf4j.LoggerFactory; import com.github.dockerjava.api.DockerClient; +import com.github.dockerjava.api.async.ResultCallback.Adapter; +import com.github.dockerjava.api.command.BuildImageResultCallback; import com.github.dockerjava.api.command.CreateContainerResponse; +import com.github.dockerjava.api.command.InspectExecCmd; import com.github.dockerjava.api.command.InspectExecResponse; import com.github.dockerjava.api.command.ListContainersCmd; import com.github.dockerjava.api.command.ListImagesCmd; @@ -43,9 +46,8 @@ import com.github.dockerjava.core.DefaultDockerClientConfig; import com.github.dockerjava.core.DockerClientBuilder; import com.github.dockerjava.core.DockerClientConfig; -import com.github.dockerjava.core.command.BuildImageResultCallback; -import com.github.dockerjava.core.command.ExecStartResultCallback; -import com.github.dockerjava.core.command.LogContainerResultCallback; +import com.github.dockerjava.zerodep.ZerodepDockerHttpClient; +import java.io.Closeable; import org.apache.http.HttpStatus; @@ -56,6 +58,17 @@ public class DockerHelper // Keep track of Docker images built during test runs private static final Set IMAGE_NAMES = new HashSet<>(); + private static final DockerClientConfig CONFIG = DefaultDockerClientConfig.createDefaultConfigBuilder().build(); + + private static final DockerClient DOCKER_CLIENT = DockerClientBuilder.getInstance(CONFIG) + .withDockerHttpClient( + new ZerodepDockerHttpClient.Builder() + .dockerHost(CONFIG.getDockerHost()) + .sslConfig(CONFIG.getSSLConfig()) + .maxConnections(100) + .build()) + .build();; + // Cleanup hook to remove Docker images built for tests static { Runtime.getRuntime().addShutdownHook(new Thread() { @@ -63,12 +76,11 @@ public class DockerHelper public void run() { if (!Boolean.getBoolean("skip_test_docker_image_cleanup")) { logger.info("Cleaning up test Docker images"); - DockerClient dockerClient = DockerClientBuilder.getInstance(DefaultDockerClientConfig.createDefaultConfigBuilder().build()).build(); for (String imageName : IMAGE_NAMES) { - Image image = searchImages(imageName, dockerClient); + Image image = searchImages(imageName); if (image != null) { try { - dockerClient.removeImageCmd(image.getId()).exec(); + DOCKER_CLIENT.removeImageCmd(image.getId()).exec(); } catch (Throwable e) { logger.info(String.format("Removing image %s did not complete cleanly", imageName)); } @@ -82,44 +94,23 @@ public void run() { } }); } - private DockerClientConfig config; - private DockerClient dockerClient; private String container; private File dataDir; public DockerHelper(File dataDir) { - this.config = DefaultDockerClientConfig.createDefaultConfigBuilder().build(); - this.dockerClient = DockerClientBuilder.getInstance(config).build(); this.dataDir = dataDir; } public String getIpAddressOfContainer() { - return dockerClient.inspectContainerCmd(container).exec().getNetworkSettings().getIpAddress(); + return DOCKER_CLIENT.inspectContainerCmd(container).exec().getNetworkSettings().getIpAddress(); } public void startManagementAPI(String version, List envVars) { - File baseDir = new File(System.getProperty("dockerFileRoot",".")); - File dockerFile; - String target; - boolean useBuildx; - - if ("3_11".equals(version)) - { - dockerFile = Paths.get(baseDir.getPath(), "Dockerfile-oss").toFile(); - target = "oss311"; - useBuildx = true; - } - else - { - dockerFile = Paths.get(baseDir.getPath(), "Dockerfile-" + version).toFile(); - target = null; - useBuildx = false; - } - - if (!dockerFile.exists()) - throw new RuntimeException("Missing " + dockerFile.getAbsolutePath()); + DockerBuildConfig config = DockerBuildConfig.getConfig(version); + if (!config.dockerFile.exists()) + throw new RuntimeException("Missing " + config.dockerFile.getAbsolutePath()); String name = "mgmtapi"; List ports = Arrays.asList(9042, 8080); @@ -130,7 +121,7 @@ public void startManagementAPI(String version, List envVars) if (envVars != null) envList.addAll(envVars); - this.container = startDocker(dockerFile, baseDir, target, name, ports, volumeDescList, envList, cmdList, useBuildx); + this.container = startDocker(config, name, ports, volumeDescList, envList, cmdList); waitForPort("localhost",8080, Duration.ofMillis(50000), logger, false); } @@ -140,8 +131,8 @@ public String runCommand(String... commandAndArgs) if (container == null) throw new IllegalStateException("Container not started"); - String execId = dockerClient.execCreateCmd(container).withCmd(commandAndArgs).withAttachStderr(true).withAttachStdout(true).exec().getId(); - dockerClient.execStartCmd(execId).exec(null); + String execId = DOCKER_CLIENT.execCreateCmd(container).withCmd(commandAndArgs).withAttachStderr(true).withAttachStdout(true).exec().getId(); + DOCKER_CLIENT.execStartCmd(execId).exec(new Adapter()); return execId; } @@ -151,22 +142,28 @@ public void tailSystemLog(int numberOfLines) if (container == null) throw new IllegalStateException("Container not started"); - String execId = dockerClient.execCreateCmd(container).withTty(true).withCmd("tail", "-n " + numberOfLines, "/var/log/cassandra/system.log").withAttachStderr(true).withAttachStdout(true).exec().getId(); - dockerClient.execStartCmd(execId).withTty(true).exec(new ExecStartResultCallback(System.out, System.err) {}); + String execId = DOCKER_CLIENT.execCreateCmd(container).withTty(true).withCmd("tail", "-n " + numberOfLines, "/var/log/cassandra/system.log").withAttachStderr(true).withAttachStdout(true).exec().getId(); + DOCKER_CLIENT.execStartCmd(execId).withTty(true).exec(new Adapter() { + @Override + public void onNext(Frame item) { + System.out.print(new String(item.getPayload())); + } + }); } public void waitTillFinished(String execId) { - InspectExecResponse r = dockerClient.inspectExecCmd(execId).exec(); - + InspectExecCmd cmd = DOCKER_CLIENT.inspectExecCmd(execId); + InspectExecResponse r = cmd.exec(); while (r.isRunning()) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); logger.info("SLEEPING"); + r = cmd.exec(); } - if (r.getExitCode() != null && r.getExitCode() != 0) - throw new RuntimeException("Process error code " + r.getExitCode()); + if (r.getExitCodeLong() != null && r.getExitCodeLong() != 0l) + throw new RuntimeException("Process error code " + r.getExitCodeLong()); logger.info("PROCESS finished!"); } @@ -210,15 +207,15 @@ public boolean started() return container != null; } - private void buildImageWithBuildx(File dockerFile, File baseDir, String target, String name) throws Exception { + private void buildImageWithBuildx(DockerBuildConfig config, String name) throws Exception { ProcessBuilder pb = new ProcessBuilder("docker", "buildx", "build", "--load", "--progress", "plain", "--tag", name, - "--file", dockerFile.getPath(), - "--target", target, + "--file", config.dockerFile.getPath(), + "--target", config.target, "--platform", "linux/amd64", - baseDir.getPath()); + config.baseDir.getPath()); Process p = pb.inheritIO().start(); int exitCode = p.waitFor(); @@ -229,9 +226,9 @@ private void buildImageWithBuildx(File dockerFile, File baseDir, String target, } } - private String startDocker(File dockerFile, File baseDir, String target, String name, List ports, List volumeDescList, List envList, List cmdList, boolean useBuildx) + private String startDocker(DockerBuildConfig config, String name, List ports, List volumeDescList, List envList, List cmdList) { - ListContainersCmd listContainersCmd = dockerClient.listContainersCmd(); + ListContainersCmd listContainersCmd = DOCKER_CLIENT.listContainersCmd(); listContainersCmd.getFilters().put("name", Arrays.asList(name)); try { @@ -240,8 +237,8 @@ private String startDocker(File dockerFile, File baseDir, String target, String { String id = namedContainer.getId(); logger.info("Removing container: " + id); - dockerClient.stopContainerCmd(id).exec(); - dockerClient.removeContainerCmd(id).exec(); + DOCKER_CLIENT.stopContainerCmd(id).exec(); + DOCKER_CLIENT.removeContainerCmd(id).exec(); } } catch (Exception e) @@ -260,8 +257,8 @@ private String startDocker(File dockerFile, File baseDir, String target, String } // see if we have the image already built - final String imageName = String.format("%s-%s-test", name, dockerFile.getName()).toLowerCase(); - Image image = searchImages(imageName, dockerClient); + final String imageName = String.format("%s-%s-test", name, config.dockerFile.getName()).toLowerCase(); + Image image = searchImages(imageName); if (image == null) { BuildImageResultCallback callback = new BuildImageResultCallback() @@ -276,12 +273,12 @@ public void onNext(BuildResponseItem item) } }; - logger.info(String.format("Building container: name=%s, Dockerfile=%s, image name=%s", name, dockerFile.getPath(), imageName)); - if (useBuildx) + logger.info(String.format("Building container: name=%s, Dockerfile=%s, image name=%s", name, config.dockerFile.getPath(), imageName)); + if (config.useBuildx) { try { - buildImageWithBuildx(dockerFile, baseDir, target, imageName); + buildImageWithBuildx(config, imageName); } catch (Exception e) { @@ -291,9 +288,9 @@ public void onNext(BuildResponseItem item) } else { - dockerClient.buildImageCmd() - .withBaseDirectory(baseDir) - .withDockerfile(dockerFile) + DOCKER_CLIENT.buildImageCmd() + .withBaseDirectory(config.baseDir) + .withDockerfile(config.dockerFile) .withTags(Sets.newHashSet(imageName)) .exec(callback) .awaitImageId(); @@ -328,7 +325,7 @@ public void onNext(BuildResponseItem item) CreateContainerResponse containerResponse; logger.warn("Binding a local temp directory to /var/log/cassandra can cause permissions issues on startup. Skipping volume bindings."); - containerResponse = dockerClient.createContainerCmd(imageName) + containerResponse = DOCKER_CLIENT.createContainerCmd(imageName) .withCmd(cmdList) .withEnv(envList) .withExposedPorts(tcpPorts) @@ -343,13 +340,18 @@ public void onNext(BuildResponseItem item) .exec(); - dockerClient.startContainerCmd(containerResponse.getId()).exec(); - dockerClient.logContainerCmd(containerResponse.getId()).withStdOut(true).withStdErr(true).withFollowStream(true).withTailAll().exec(new LogContainerResultCallback() { + DOCKER_CLIENT.startContainerCmd(containerResponse.getId()).exec(); + DOCKER_CLIENT.logContainerCmd(containerResponse.getId()).withStdOut(true).withStdErr(true).withFollowStream(true).withTailAll().exec(new Adapter() { @Override public void onNext(Frame item) { System.out.print(new String(item.getPayload())); } + + @Override + public void onStart(Closeable stream) { + System.out.println("Starting container " + name); + } }); return containerResponse.getId(); @@ -357,7 +359,7 @@ public void onNext(Frame item) private Container searchContainer(String name) { - ListContainersCmd listContainersCmd = dockerClient.listContainersCmd().withStatusFilter(Collections.singletonList("running")); + ListContainersCmd listContainersCmd = DOCKER_CLIENT.listContainersCmd().withStatusFilter(Collections.singletonList("running")); listContainersCmd.getFilters().put("name", Arrays.asList(name)); List runningContainers = null; try { @@ -377,9 +379,9 @@ private Container searchContainer(String name) return null; } - private static Image searchImages(String imageName, DockerClient dockerClient) + private static Image searchImages(String imageName) { - ListImagesCmd listImagesCmd = dockerClient.listImagesCmd(); + ListImagesCmd listImagesCmd = DOCKER_CLIENT.listImagesCmd(); List images = null; logger.info(String.format("Searching for image named %s", imageName)); try { @@ -415,9 +417,39 @@ public void stopManagementAPI() { if (container != null) { - dockerClient.stopContainerCmd(container).exec(); - dockerClient.removeContainerCmd(container).exec(); + DOCKER_CLIENT.stopContainerCmd(container).exec(); + DOCKER_CLIENT.removeContainerCmd(container).exec(); container = null; } } + + private static class DockerBuildConfig + { + static final File baseDir = new File(System.getProperty("dockerFileRoot",".")); + + File dockerFile; + String target = null; + boolean useBuildx = false; + + static DockerBuildConfig getConfig(String version) + { + DockerBuildConfig config = new DockerBuildConfig(); + switch (version) { + case "3_11" : + config.dockerFile = Paths.get(baseDir.getPath(), "Dockerfile-oss").toFile(); + config.target = "oss311"; + config.useBuildx = true; + break; + case "4_0" : + config.dockerFile = Paths.get(baseDir.getPath(), "Dockerfile-4_0").toFile(); + config.target = "oss40"; + config.useBuildx = true; + break; + default : // DSE 6.8 + config.dockerFile = Paths.get(baseDir.getPath(), "Dockerfile-dse-68").toFile(); + break; + } + return config; + } + } } diff --git a/management-api-server/src/test/resources/com/datastax/mgmtapi/dcrf-override-1.yaml b/management-api-server/src/test/resources/com/datastax/mgmtapi/dcrf-override-1.yaml new file mode 100644 index 00000000..4ea15bb1 --- /dev/null +++ b/management-api-server/src/test/resources/com/datastax/mgmtapi/dcrf-override-1.yaml @@ -0,0 +1,62 @@ +apiVersion: datastax.com/v1alpha1 +kind: DseDatacenter +metadata: + name: dc1 +spec: + clusterName: testcluster + size: 3 + resources: + requests: + # TODO this should be a template var and dependent + # on the t-shirt size of the cluster + memory: 1000 + cpu: 1000 + storageClaim: + storageclassname: idk + resources: + requests: + storage: idk + racks: + - name: rack0 + zone: idk + allowMultipleNodesPerWorker: false + serviceAccount: "dse" + parked: false + canaryUpgrade: false + configBuilderImage: idk + dseVersion: "6.8.0" + dseImage: idk + dseSuperuserSecret: dse-credentials + managementApiAuth: + # insecure: {} + manual: + clientSecretName: mgmt-api-client-credentials + serverSecretName: mgmt-api-server-credentials + config: + node-topology: + dc: dc1 + rack: rack1 + prometheus: + enabled: true + port: 9103 + staleness-delta: 300 + cassandra-yaml: + authenticator: org.apache.cassandra.auth.PasswordAuthenticator + authorizer: org.apache.cassandra.auth.CassandraAuthorizer + auto_snapshot: false # default is true, but we don't have a good way to use or clean up snapshots when someone drops a table + compaction_throughput_mb_per_sec: 64 # default is 16 + concurrent_compactors: 2 # this is based on tuning for four cores / 15 GB + file_cache_size_in_mb: 500 # this is based on tuning for four cores / 15 GB + hinted_handoff_throttle_in_kb: 512 # this is based on tuning for four cores / 15 GB + memtable_flush_writers: 2 # this is based on tuning for four cores / 15 GB + num_tokens: 256 # default is 1 + phi_convict_threshold: 12 # default is 8 + role_manager: org.apache.cassandra.auth.CassandraRoleManager + endpoint_snitch: org.apache.cassandra.locator.GossipingPropertyFileSnitch + jvm-server-options: + heap_new_size: 100M + max_heap_size: 500M + additional-jvm-opts: + - "-Dcassandra.skip_default_role_setup=true" + - "-Dcassandra.superuser_setup_delay_ms=100" + - "-Dcassandra.system_distributed_replication=dc1:1" \ No newline at end of file diff --git a/pom.xml b/pom.xml index 677af333..c011e6e2 100644 --- a/pom.xml +++ b/pom.xml @@ -11,6 +11,7 @@ build_version.sh 0.1.0-SNAPSHOT + 4.11.1 diff --git a/scripts/docker-entrypoint.sh b/scripts/docker-entrypoint.sh index 4e7d52b9..b8e40467 100755 --- a/scripts/docker-entrypoint.sh +++ b/scripts/docker-entrypoint.sh @@ -68,10 +68,11 @@ if [ "$1" = 'mgmtapi' ]; then echo "data_dir_max_size_in_mb: 100" >> ${MCAC_PATH}/config/metric-collector.yaml fi - if ! grep -qxF "JVM_OPTS=\"\$JVM_OPTS -javaagent:${MAAC_PATH}/datastax-mgmtapi-agent-0.1.0-SNAPSHOT.jar\"" < ${CASSANDRA_CONF}/cassandra-env.sh ; then + MGMT_AGENT_JAR="$(find "${MAAC_PATH}" -name *datastax-mgmtapi-agent*.jar)" + if ! grep -qxF "JVM_OPTS=\"\$JVM_OPTS -javaagent:${MGMT_AGENT_JAR}\"" < ${CASSANDRA_CONF}/cassandra-env.sh ; then # ensure newline at end of file echo "" >> ${CASSANDRA_CONF}/cassandra-env.sh - echo "JVM_OPTS=\"\$JVM_OPTS -javaagent:${MAAC_PATH}/datastax-mgmtapi-agent-0.1.0-SNAPSHOT.jar\"" >> ${CASSANDRA_CONF}/cassandra-env.sh + echo "JVM_OPTS=\"\$JVM_OPTS -javaagent:${MGMT_AGENT_JAR}\"" >> ${CASSANDRA_CONF}/cassandra-env.sh fi # Set this if you want to ignore default env variables, i.e. when running inside an operator @@ -168,7 +169,7 @@ if [ "$1" = 'mgmtapi' ]; then MGMT_API_ARGS="$MGMT_API_ARGS $MGMT_API_PID_FILE" fi - MGMT_API_CASSANDRA_HOME="--cassandra-home /var/lib/cassandra/" + MGMT_API_CASSANDRA_HOME="--cassandra-home ${CASSANDRA_HOME}" MGMT_API_ARGS="$MGMT_API_ARGS $MGMT_API_CASSANDRA_HOME" if [ ! -z "$MGMT_API_NO_KEEP_ALIVE" ]; then @@ -178,8 +179,10 @@ if [ "$1" = 'mgmtapi' ]; then MGMT_API_JAR="$(find "${MAAC_PATH}" -name *server*.jar)" - echo "Running" java ${MGMT_API_JAVA_OPTS} -Xms128m -Xmx128m -jar "$MGMT_API_JAR" $MGMT_API_ARGS - java ${MGMT_API_JAVA_OPTS} -Xms128m -Xmx128m -jar "$MGMT_API_JAR" $MGMT_API_ARGS + # use default of 128m heap if env variable not set + : "${MGMT_API_HEAP_SIZE:=128m}" + echo "Running" java ${MGMT_API_JAVA_OPTS} -Xms${MGMT_API_HEAP_SIZE} -Xmx${MGMT_API_HEAP_SIZE} -jar "$MGMT_API_JAR" $MGMT_API_ARGS + java ${MGMT_API_JAVA_OPTS} -Xms${MGMT_API_HEAP_SIZE} -Xmx${MGMT_API_HEAP_SIZE} -jar "$MGMT_API_JAR" $MGMT_API_ARGS fi exec "$@" diff --git a/scripts/dse-6.8-docker-entrypoint.sh b/scripts/dse-6.8-docker-entrypoint.sh index 3d2f216a..046423f9 100755 --- a/scripts/dse-6.8-docker-entrypoint.sh +++ b/scripts/dse-6.8-docker-entrypoint.sh @@ -42,10 +42,11 @@ _sed-in-place() { if [ "$1" = 'mgmtapi' ]; then echo "Starting Management API" - if ! grep -qxF "JVM_OPTS=\"\$JVM_OPTS -javaagent:${MAAC_PATH}/datastax-mgmtapi-agent-0.1.0-SNAPSHOT.jar\"" < ${CASSANDRA_CONF}/cassandra-env.sh ; then + MGMT_AGENT_JAR="$(find "${MAAC_PATH}" -name *datastax-mgmtapi-agent*.jar)" + if ! grep -qxF "JVM_OPTS=\"\$JVM_OPTS -javaagent:${MGMT_AGENT_JAR}\"" < ${CASSANDRA_CONF}/cassandra-env.sh ; then # ensure newline at end of file echo "" >> ${CASSANDRA_CONF}/cassandra-env.sh - echo "JVM_OPTS=\"\$JVM_OPTS -javaagent:${MAAC_PATH}/datastax-mgmtapi-agent-0.1.0-SNAPSHOT.jar\"" >> ${CASSANDRA_CONF}/cassandra-env.sh + echo "JVM_OPTS=\"\$JVM_OPTS -javaagent:${MGMT_AGENT_JAR}\"" >> ${CASSANDRA_CONF}/cassandra-env.sh fi CASSANDRA_NATIVE_TRANSPORT_ADDRESS='0.0.0.0' @@ -146,8 +147,10 @@ fi MGMT_API_JAR="$(find "${MAAC_PATH}" -name *server*.jar)" - echo "Running" java ${MGMT_API_JAVA_OPTS} -Xms128m -Xmx128m -jar "$MGMT_API_JAR" $MGMT_API_ARGS - exec java ${MGMT_API_JAVA_OPTS} -Xms128m -Xmx128m -jar "$MGMT_API_JAR" $MGMT_API_ARGS + # use default of 128m heap if env variable not set + : "${MGMT_API_HEAP_SIZE:=128m}" + echo "Running" java ${MGMT_API_JAVA_OPTS} -Xms${MGMT_API_HEAP_SIZE} -Xmx${MGMT_API_HEAP_SIZE} -jar "$MGMT_API_JAR" $MGMT_API_ARGS + exec java ${MGMT_API_JAVA_OPTS} -Xms${MGMT_API_HEAP_SIZE} -Xmx${MGMT_API_HEAP_SIZE} -jar "$MGMT_API_JAR" $MGMT_API_ARGS fi