UNCLASSIFIED - NO CUI

Skip to content
Snippets Groups Projects
Commit a1b8cb15 authored by Patrick Einheber's avatar Patrick Einheber
Browse files

Merge branch 'confluence-lts-alpine-9.2.0' into 'development'

Confluence lts alpine 9.2.0

See merge request !30
parents 3eccd67c c87ec9a1
No related branches found
No related tags found
2 merge requests!31Development,!30Confluence lts alpine 9.2.0
Pipeline #3902970 passed with warnings
Showing
with 1179 additions and 35 deletions
# Default ignored files
/shelf/
/workspace.xml
<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" languageLevel="JDK_18" default="true" project-jdk-name="18" project-jdk-type="JavaSDK">
<output url="file://$PROJECT_DIR$/out" />
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/confluence-lts-alpine.iml" filepath="$PROJECT_DIR$/.idea/confluence-lts-alpine.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>
\ No newline at end of file
ARG BASE_REGISTRY=registry1.dso.mil
ARG BASE_IMAGE=ironbank/opensource/bar/foo
ARG BASE_TAG=1.2
ARG BASE_IMAGE=ironbank/opensource/alpinelinux/alpine
ARG BASE_TAG=3.21
FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}
RUN dnf update -y --nodocs && \
dnf clean all && \
rm -rf /var/cache/dnf
ARG CONFLUENCE_VERSION=9.2.0
USER nobody
ENV APP_NAME confluence
ENV RUN_USER confluence
ENV RUN_GROUP confluence
ENV RUN_UID 2002
ENV RUN_GID 2002
ENV CONFLUENCE_HOME /var/atlassian/application-data/confluence
ENV CONFLUENCE_INSTALL_DIR /opt/atlassian/confluence
ENV CONFLUENCE_LOG_STDOUT false
ENV CONFLUENCE_VERSION ${CONFLUENCE_VERSION}
WORKDIR $CONFLUENCE_HOME
USER root
COPY atlassian-confluence-${CONFLUENCE_VERSION}.tar.gz /opt/
COPY scripts/entrypoint.py scripts/entrypoint_helpers.py scripts/shutdown-wait.sh /
COPY scripts/support /opt/atlassian/support
COPY scripts/config/* /opt/atlassian/etc/
RUN apk update && apk upgrade --no-cache \
&& apk add bash fontconfig python3 py3-jinja2 openjdk17-jdk tar gcompat dumb-init tini \
&& addgroup -g ${RUN_GID} ${RUN_GROUP} \
&& adduser -u ${RUN_UID} -G ${RUN_GROUP} -h ${CONFLUENCE_HOME} -s /bin/bash -S ${RUN_USER} \
&& echo PATH=$PATH > /etc/environment \
&& mkdir -p ${CONFLUENCE_INSTALL_DIR} \
&& tar -xzf /opt/atlassian-confluence-${CONFLUENCE_VERSION}.tar.gz --strip-components 1 -C ${CONFLUENCE_INSTALL_DIR} \
&& chmod -R "u=rwX,g=rX,o=rX" ${CONFLUENCE_INSTALL_DIR}/ \
&& chown -R ${RUN_USER}:${RUN_GROUP} ${CONFLUENCE_INSTALL_DIR}/ \
&& chown -R ${RUN_USER}:${RUN_GROUP} ${CONFLUENCE_HOME} \
&& sed -i -e 's/-Xms\([0-9]\+[kmg]\) -Xmx\([0-9]\+[kmg]\)/-Xms\${JVM_MINIMUM_MEMORY:=\1} -Xmx\${JVM_MAXIMUM_MEMORY:=\2} -Dconfluence.home=\${CONFLUENCE_HOME}/g' ${CONFLUENCE_INSTALL_DIR}/bin/setenv.sh \
&& sed -i -e 's/-XX:ReservedCodeCacheSize=\([0-9]\+[kmg]\)/-XX:ReservedCodeCacheSize=${JVM_RESERVED_CODE_CACHE_SIZE:=\1}/g' ${CONFLUENCE_INSTALL_DIR}/bin/setenv.sh \
&& sed -i -e 's/export CATALINA_OPTS/CATALINA_OPTS="\${CATALINA_OPTS} \${JVM_SUPPORT_RECOMMENDED_ARGS} -DConfluenceHomeLogAppender.disabled=${CONFLUENCE_LOG_STDOUT}"\n\nexport CATALINA_OPTS/g' ${CONFLUENCE_INSTALL_DIR}/bin/setenv.sh \
&& mkdir -p /opt/java/openjdk/lib/fonts/fallback/ \
&& ln -sf /usr/share/fonts/truetype/noto/* /opt/java/openjdk/lib/fonts/fallback/ \
&& chmod a+x /entrypoint.py \
&& chown -R ${RUN_USER}:${RUN_GROUP} /opt/ /entrypoint.py /entrypoint_helpers.py \
&& chmod 0750 ${CONFLUENCE_HOME} \
&& apk cache clean \
&& rm -rf /opt/atlassian-confluence-${CONFLUENCE_VERSION}.tar.gz \
&& rm -rf /var/cache/apk/*
# Must be declared after setting perms
VOLUME ["${CONFLUENCE_HOME}"]
USER ${RUN_USER}
# Expose HTTP port
EXPOSE 8090
# Expose Synchrony port
EXPOSE 8091
CMD ["/entrypoint.py"]
ENTRYPOINT ["/usr/bin/tini", "--"]
A commercial license will be required to run your Confluence instance.
To purchase a license please contact the Atlassian Government Team at: govsales@atlassian.com
# <application name>
![Atlassian Confluence](https://wac-cdn.atlassian.com/dam/jcr:5d1374c2-276f-4bca-9ce4-813aba614b7a/confluence-icon-gradient-blue.svg?cdnVersion=696)
Project template for all Iron Bank container repositories.
Confluence is where you create, organise and discuss work with your
team. Capture the knowledge that's too often lost in email inboxes and shared
network drives in Confluence - where it's easy to find, use, and update. Give
every team, project, or department its own space to create the things they need,
whether it's meeting notes, product requirements, file lists, or project plans,
you can get more done in Confluence.
Please see: https://docs-ironbank.dso.mil/
Learn more about Confluence: <https://www.atlassian.com/software/confluence>
# Must use CNAP / VPN / Private network (air gapped etc.) in front of this container (ingress/egress enforcement) due to the volume of critical security findings for environments above IL2.
# Overview
This Docker container makes it easy to get an instance of Confluence up and
running.
# Support
**ALL** Support requests **MUST** go through the below portals. Only support requests with a valid license (SEN) are entitled to support. No other method of support is monitored. Please follow the links below to recieve support for these products.
For product support, go to:
* https://support.atlassian.com/confluence-server/
**NOTE:** Failure to use the above links to request support may result in delays in obtaining support. The queues outside of the above links are **NOT** monitored and are not official Atlassian support channels. Please use the official support channels above.
# Patch Policy
Atlassian’s security bugfix policy is located here: [Security Bugfix Policy | Atlassian](https://www.atlassian.com/trust/security/bug-fix-policy) The extended resolution timeframes apply to this product. An active license is required to obtain support and patches.
# Product Release Notes
Release notes are available here: [Release notes | Confluence Data Center](https://confluence.atlassian.com/doc/issues-resolved-in-7-12-3-1072475372.html)
# Quick Start
For the directory in the environmental variable `CONFLUENCE_HOME` that is used
to store Confluence data (amongst other things) we recommend mounting a host
directory as a [data volume][1]:
Additionally, if running Confluence in a Data Center cluster it is required that a
shared filesystem is mounted. The mountpoint (inside the container) can be
configured with `CONFLUENCE_SHARED_HOME`.
Start Atlassian Confluence:
docker run -v /data/your-confluence-home:/var/atlassian/application-data/confluence --name="confluence" -d -p 8090:8090 -p 8091:8091 atlassian/confluence
**Success**. Confluence is now available on <http://localhost:8090>*
Please ensure your container has the necessary resources allocated to it. We
recommend 2GiB of memory allocated to accommodate the application server. See
[Supported Platforms][3] for further information.
_* Note: If you are using `docker-machine` on Mac OS X, please use `open http://$(docker-machine ip default):8090` instead._
# Configuring Confluence
This Docker image is intended to be configured from its environment; the
provided information is used to generate the application configuration files
from templates. This allows containers to be repeatably created and destroyed
on-the-fly, as required in advanced cluster configurations. Most aspects of the
deployment can be configured in this manner; the necessary environment variables
are documented below. However, if your particular deployment scenario is not
covered by these settings, it is possible to override the provided templates
with your own; see the section _Advanced Configuration_ below.
## Memory / Heap Size
If you need to override Confluence Server's default memory allocation, you can
control the minimum heap (Xms) and maximum heap (Xmx) via the below environment
variables.
* `JVM_MINIMUM_MEMORY` (default: 1024m)
The minimum heap size of the JVM
* `JVM_MAXIMUM_MEMORY` (default: 1024m)
The maximum heap size of the JVM
* `JVM_RESERVED_CODE_CACHE_SIZE` (default: 256m)
The reserved code cache size of the JVM
## Tomcat and Reverse Proxy Settings
If Confluence is run behind a reverse proxy server (e.g. a load-balancer or
nginx server), then you need to specify extra options to make Confluence aware
of the setup. They can be controlled via the below environment variables.
* `ATL_PROXY_NAME` (default: NONE)
The reverse proxy's fully qualified hostname. `CATALINA_CONNECTOR_PROXYNAME`
is also supported for backwards compatability.
* `ATL_PROXY_PORT` (default: NONE)
The reverse proxy's port number via which Confluence is
accessed. `CATALINA_CONNECTOR_PROXYPORT` is also supported for backwards
compatability.
* `ATL_TOMCAT_PORT` (default: 8090)
The port for Tomcat/Confluence to listen on. Depending on your container
deployment method this port may need to be
[exposed and published][docker-expose].
* `ATL_TOMCAT_SCHEME` (default: http)
The protocol via which Confluence is accessed. `CATALINA_CONNECTOR_SCHEME` is also
supported for backwards compatability.
* `ATL_TOMCAT_SECURE` (default: false)
Set 'true' if `ATL_TOMCAT_SCHEME` is 'https'. `CATALINA_CONNECTOR_SECURE` is
also supported for backwards compatability.
* `ATL_TOMCAT_CONTEXTPATH` (default: NONE)
The context path the application is served over. `CATALINA_CONTEXT_PATH` is
also supported for backwards compatability.
* `ATL_TOMCAT_ACCESS_LOG` (default: false)
Whether to enable Tomcat access logging; set to `true` to enable. *NOTE*:
These logs are written to the Container internal volume by default (under
`/opt/atlassian/confluence/logs/`); these are rotated but not removed, and
will grow indefinitely. If you enable this functionality it is recommended
that you map the directory to a volume and perform log ingestion/cleanup with
external tools.
The following Tomcat/Catalina options are also supported. For more information,
see https://tomcat.apache.org/tomcat-7.0-doc/config/index.html
* `ATL_TOMCAT_MGMT_PORT` (default: 8000)
* `ATL_TOMCAT_MAXTHREADS` (default: 48)
* `ATL_TOMCAT_MINSPARETHREADS` (default: 10)
* `ATL_TOMCAT_CONNECTIONTIMEOUT` (default: 20000)
* `ATL_TOMCAT_ENABLELOOKUPS` (default: false)
* `ATL_TOMCAT_PROTOCOL` (default: org.apache.coyote.http11.Http11NioProtocol)
* `ATL_TOMCAT_REDIRECTPORT` (default: 8443)
* `ATL_TOMCAT_ACCEPTCOUNT` (default: 10)
* `ATL_TOMCAT_DEBUG` (default: 0)
* `ATL_TOMCAT_URIENCODING` (default: UTF-8)
* `ATL_TOMCAT_MAXHTTPHEADERSIZE` (default: 8192)
## JVM configuration
If you need to pass additional JVM arguments to Confluence such as specifying a
custom trust store, you can add them via the below environment variable
* `JVM_SUPPORT_RECOMMENDED_ARGS`
Additional JVM arguments for Confluence
Example:
docker run -e JVM_SUPPORT_RECOMMENDED_ARGS=-Djavax.net.ssl.trustStore=/var/atlassian/application-data/confluence/cacerts -v confluenceVolume:/var/atlassian/application-data/confluence --name="confluence" -d -p 8090:8090 -p 8091:8091 atlassian/confluence
## Confluence-specific settings
* `ATL_AUTOLOGIN_COOKIE_AGE` (default: 1209600; two weeks, in seconds)
The maximum time a user can remain logged-in with 'Remember Me'.
* `CONFLUENCE_HOME`
The confluence home directory. This may be on an mounted volume; if so it
should be writable by the user `confluence`. See note below about UID
mappings.
* `ATL_LUCENE_INDEX_DIR`
The directory where [Lucene](https://lucene.apache.org/) search indexes should
be stored. Defaults to `index` under the Confluence home directory.
* `ATL_LICENSE_KEY` (from Confluence 7.9 onwards)
The Confluence license string. Providing this will remove the need to supply it through the web startup screen.
* *use with caution* `CONFLUENCE_LOG_STDOUT` `[true, false]` (from Confluence 7.9 onwards)
Prior to Confluence version 7.9.0, the log files are always stored in the `logs` folder in Confluence home. From version
7.9.0, the logs can be printed directly to the `stdout` and don't use the file at all. This makes it possible to fetch the log messages
via `docker logs <CONTAINER_ID>`. In this setup we recommend using some log aggregation tooling (e.g. AWS Cloudwatch or ELK stack).
**Beware, if enabled, the support ZIP produced by the Troubleshooting and Support plugin doesn't contain the application logs.**
## Database configuration
It is optionally possible to configure the database from the environment,
avoiding the need to do so through the web startup screen.
The following variables are all must all be supplied if using this feature:
* `ATL_JDBC_URL`
The database URL; this is database-specific.
* `ATL_JDBC_USER`
The database user to connect as.
* `ATL_JDBC_PASSWORD`
The password for the database user.
* `ATL_DB_TYPE`
The type of database; valid supported values are:
* `mssql`
* `mysql`
* `oracle12c` (Confluence 7.3.0 or earlier only)
* `oracle` (Confluence 7.3.1 or later only. Compatible with Oracle 12c and Oracle 19c)
* `postgresql`
Note: Due to licensing restrictions Confluence does not ship with a MySQL or
Oracle JDBC drivers. To use these databases you will need to copy a suitable
driver into the container and restart it. For example, to copy the MySQL driver
into a container named "confluence", you would do the following:
docker cp mysql-connector-java.x.y.z.jar confluence:/opt/atlassian/confluence/confluence/WEB-INF/lib
docker restart confluence
For more information see the [Database JDBC Drivers](https://confluence.atlassian.com/doc/database-jdbc-drivers-171742.html)
page.
### Optional database settings
The following variables are for the database connection pool, and are
optional.
* `ATL_DB_POOLMINSIZE` (default: 20)
* `ATL_DB_POOLMAXSIZE` (default: 100)
* `ATL_DB_TIMEOUT` (default: 30)
* `ATL_DB_IDLETESTPERIOD` (default: 100)
* `ATL_DB_MAXSTATEMENTS` (default: 0)
* `ATL_DB_VALIDATE` (default: false)
* `ATL_DB_ACQUIREINCREMENT` (default: 1)
* `ATL_DB_VALIDATIONQUERY` (default: "select 1")
## Data Center configuration
This docker image can be run as part of a [Data Center][4] cluster. You can
specify the following properties to start Confluence as a Data Center node,
instead of manually configuring a cluster. See [Installing Confluence Data
Center][5] for more information.
### Cluster configuration
Confluence Data Center allows clustering via various methods. For more
information on the setting for each type see [this page][6].
**NOTE:** The underlying network should be set-up to support the Confluence
clustering type you are using. How to do this depends on the container
management technology, and is beyond the scope of this documentation.
#### Common cluster settings
* `ATL_CLUSTER_TYPE`
The cluster type. Setting this effectively enables clustering. Valid values
are `aws`, `multicast`, and `tcp_ip`.
* `ATL_CLUSTER_NAME`
The cluster name; this should be common across all nodes.
* `ATL_PRODUCT_HOME_SHARED`
The location of the shared home directory for all Confluence nodes. **Note**:
This must be real shared filesystem that is mounted inside the
container. Additionally, see the note about UIDs.
* `ATL_CLUSTER_TTL`
The time-to-live for cluster packets. Primarily of use in multicast clusters.
#### AWS cluster settings
The following should be populated from the AWS environment.
* `ATL_HAZELCAST_NETWORK_AWS_IAM_ROLE`
* `ATL_HAZELCAST_NETWORK_AWS_IAM_REGION`
* `ATL_HAZELCAST_NETWORK_AWS_HOST_HEADER`
* `ATL_HAZELCAST_NETWORK_AWS_SECURITY_GROUP`
* `ATL_HAZELCAST_NETWORK_AWS_TAG_KEY`
* `ATL_HAZELCAST_NETWORK_AWS_TAG_VALUE`
#### TCP cluster settings
* `ATL_CLUSTER_PEERS`
A comma-separated list of peer IPs.
#### Multicast cluster settings
* `ATL_CLUSTER_ADDRESS`
The multicast address the cluster will communicate on.
## Container Configuration
* `SET_PERMISSIONS` (default: true)
Define whether to set home directory permissions on startup. Set to `false` to disable
this behaviour.
## Advanced Configuration
As mentioned at the top of this section, the settings from the environment are
used to populate the application configuration on the container startup. However
in some cases you may wish to customise the settings in ways that are not
supported by the environment variables above. In this case, it is possible to
modify the base templates to add your own configuration. There are three main
ways of doing this; modify our repository to your own image, build a new image
from the existing one, or provide new templates at startup. We will briefly
outline this methods here, but in practice how you do this will depend on your
needs.
#### Overwrite the templates at runtime
There are two main ways of doing this:
* If your container is going to be long-lived, you can create it, modify the
installed templates under `/opt/atlassian/etc/`, and then run it.
* Alternatively, you can create a volume containing your alternative templates,
and mount it over the provided templates at runtime
with `--volume my-config:/opt/atlassian/etc/`.
# Shared directory and user IDs
By default the Confuence application runs as the user `confluence`, with a UID
and GID of 2002. Consequently this UID must have write access to the shared
filesystem. If for some reason a different UID must be used, there are a number
of options available:
* The Docker image can be rebuilt with a different UID.
* Under Linux, the UID can be remapped using
[user namespace remapping][7].
To preserve strict permissions for certain configuration files, this container starts as
`root` to perform bootstrapping before running Confluence under a non-privileged user
account. If you wish to start the container as a non-root user, please note that Tomcat
configuration, and the bootstrapping of seraph-config.xml (SSO) &
confluence-init.properties (overriding `$CONFLUENCE_HOME`) will be skipped and a warning
will be logged. You may still apply custom configuration in this situation by mounting a
custom file directly, e.g. by mounting your own server.xml file directly to
`/opt/atlassian/confluence/conf/server.xml`
Database and Clustering bootstrapping will work as expected when starting this container
as a non-root user.
# Backup
For evaluating Confluence you can use the built-in database that will store its
files in the Confluence home directory. In that case it is sufficient to
create a backup archive of the directory on the host that is used as a volume
(`/data/your-confluence-home` in the example above). **Do not use the built in database in production.** It is NOT performant. You have been warned.
Confluence's [automatic backup][8] is currently supported in the Docker
setup. You can also use the [Production Backup Strategy][9] approach if you're
using an external database.
Read more about data recovery and backups: [Site Backup and Restore][10]
# Troubleshooting
These images include built-in scripts to assist in performing common JVM diagnostic tasks.
## Thread dumps
`/opt/atlassian/support/thread-dumps.sh` can be run via `docker exec` to easily trigger the collection of thread
dumps from the containerized application. For example:
docker exec my_container /opt/atlassian/support/thread-dumps.sh
By default this script will collect 10 thread dumps at 5 second intervals. This can
be overridden by passing a custom value for the count and interval, by using `-c` / `--count`
and `-i` / `--interval` respectively. For example, to collect 20 thread dumps at 3 second intervals:
docker exec my_container /opt/atlassian/support/thread-dumps.sh --count 20 --interval 3
Thread dumps will be written to `$APP_HOME/thread_dumps/<date>`.
Note: By default this script will also capture output from top run in 'Thread-mode'. This can
be disabled by passing `-n` / `--no-top`
## Heap dump
`/opt/atlassian/support/heap-dump.sh` can be run via `docker exec` to easily trigger the collection of a heap
dump from the containerized application. For example:
docker exec my_container /opt/atlassian/support/heap-dump.sh
A heap dump will be written to `$APP_HOME/heap.bin`. If a file already exists at this
location, use `-f` / `--force` to overwrite the existing heap dump file.
## Manual diagnostics
The `jcmd` utility is also included in these images and can be used by starting a `bash` shell
in the running container:
docker exec -it my_container /bin/bash
# Support
For product support, go to
[support.atlassian.com](https://support.atlassian.com/confluence-server/).
[1]: https://docs.docker.com/userguide/dockervolumes/#mount-a-host-directory-as-a-data-volume
[3]: https://confluence.atlassian.com/display/DOC/Supported+platforms
[4]: https://confluence.atlassian.com/doc/confluence-data-center-technical-overview-790795847.html
[5]: https://confluence.atlassian.com/doc/installing-confluence-data-center-203603.html
[6]: https://confluence.atlassian.com/doc/change-node-discovery-from-multicast-to-tcp-ip-or-aws-792297728.html#ChangeNodeDiscoveryfromMulticasttoTCP/IPorAWS-TochangefromTCP/IPtomulticast
[7]: https://docs.docker.com/engine/security/userns-remap/
[8]: https://confluence.atlassian.com/display/DOC/Configuring+Backups
[9]: https://confluence.atlassian.com/display/DOC/Production+Backup+Strategy
[10]: https://confluence.atlassian.com/display/DOC/Site+Backup+and+Restore
[12]: https://confluence.atlassian.com/doc/confluence-6-13-release-notes-959288785.html
---
apiVersion: v1
name: "opensource/foo/bar"
# The repository name in registry1, excluding /ironbank/
name: "atlassian/confluence-data-center/confluence-lts-alpine"
# List of tags to push for the repository in registry1
# The most specific version should be the first tag and will be shown
# on ironbank.dsop.io
tags:
- "0.0.0"
- "latest"
- "9.2.0"
- "latest"
# Build args passed to Dockerfile ARGs
args:
BASE_IMAGE: "opensource/bar/foo"
BASE_TAG: "1.2"
BASE_IMAGE: "opensource/alpinelinux/alpine"
BASE_TAG: "3.21"
# Docker image labels
labels:
org.opencontainers.image.title: "title"
org.opencontainers.image.description: "description"
org.opencontainers.image.licenses: "Apache-2.0"
org.opencontainers.image.url: "https://url"
org.opencontainers.image.vendor: "vendor"
org.opencontainers.image.version: "0.0.0"
mil.dso.ironbank.image.keywords: " "
mil.dso.ironbank.image.type: "opensource"
mil.dso.ironbank.product.name: "name"
org.opencontainers.image.title: "confluence-lts-alpine"
org.opencontainers.image.description: "Create, collaborate, and organize all your work in one place. Confluence is a team workspace where knowledge and collaboration meet. Dynamic pages give your team a place to create, capture, and collaborate on any project or idea. Spaces help your team structure, organize, and share work, so every team member has visibility into institutional knowledge and access to the information they need to do their best work"
org.opencontainers.image.licenses: "proprietary"
org.opencontainers.image.url: "https://hub.docker.com/r/atlassian/confluence-server"
org.opencontainers.image.vendor: "Atlassian"
org.opencontainers.image.version: "9.2.0"
mil.dso.ironbank.image.keywords: "confluence,atlassian,team,organise,share,remote,work,wiki,document,knowledge,knowledgebase,culture,devops,devsecops,postmortem"
mil.dso.ironbank.image.type: "commercial"
mil.dso.ironbank.product.name: "atlassian"
# List of resources to make available to the offline build context
resources:
- filename: "filename.extension"
url: "https://example.com/path/to/file.tar.gz"
validation:
type: sha256
value: " "
# For Docker dependencies
- tag: "product/name:1.0.0"
url: "docker://<image-registry>/name/of/product@sha256:<value>"
- filename: atlassian-confluence-9.2.0.tar.gz
url: https://product-downloads.atlassian.com/software/confluence/downloads/atlassian-confluence-9.2.0.tar.gz
validation:
type: sha512
value: d08b456aefe5c4772d534259c86ce539fa2de2b4e30daab6fdb5289444ef521474d39810e80db6f897522d45c035c10bd0f0a0ddaa13044ed37517eb6fca475c
# List of project maintainers
maintainers:
- email: "username@example.com"
name: "FirstName LastName"
username: "UserName"
cht_member: false
- name: "Contegix"
username: "jhunt"
email: "jhunt@ascendintegrated.com"
- name: "Contegix"
username: "dcrum"
email: "dcrum@ascendintegrated.com"
- name: "Atlassian"
username: "drathbone"
email: "support@atlassian.com"
confluence.home = {{ atl_product_home | default(confluence_home) | default('') }}
<?xml version="1.0" encoding="UTF-8"?>
<confluence-configuration>
<setupStep>{{ atl_setup_step | default('setupstart') }}</setupStep>
<setupType>{{ atl_setup_type | default('custom') }}</setupType>
<buildNumber>{{ atl_build_number | default('0') }}</buildNumber>
<properties>
<property name="confluence.database.connection.type">database-type-standard</property>
<property name="struts.multipart.saveDir">${localHome}/temp</property>
<property name="webwork.multipart.saveDir">${localHome}/temp</property>
<property name="attachments.dir">${confluenceHome}/attachments</property>
<property name="lucene.index.dir">{{ atl_lucene_index_dir | default('${confluenceHome}/index') }}</property>
{# We need mappings for "oracle12c" and "oracle" to remain backwards compatible with Confluence 7.3.0 and earlier. Oracle 19c support was added in Confluence 7.3.1 and changed the database type name for oracle databases from "oracle12c" to "oracle" #}
{% if atl_jdbc_url is defined %}
{% set databases = {
"mysql": ["com.mysql.jdbc.Driver", "MySQLDialect"],
"postgresql": ["org.postgresql.Driver", "PostgreSQLDialect"],
"mssql": ["com.microsoft.sqlserver.jdbc.SQLServerDriver", "SQLServerDialect"],
"oracle12c": ["oracle.jdbc.driver.OracleDriver", "OracleDialect"],
"oracle": ["oracle.jdbc.driver.OracleDriver", "OracleDialect"]
} %}
<property name="confluence.database.choice">{{ atl_db_type }}</property>
<property name="hibernate.connection.url">{{ atl_jdbc_url }}</property>
<property name="hibernate.connection.username">{{ atl_jdbc_user }}</property>
{% if atl_jdbc_secret_class is defined %}
<property name="jdbc.password.decrypter.classname">{{ atl_jdbc_secret_class }}</property>
{% endif %}
<property name="hibernate.connection.password">{{ atl_jdbc_password }}</property>
<property name="hibernate.connection.driver_class">{{ databases[atl_db_type][0] }}</property>
<property name="hibernate.dialect">com.atlassian.confluence.impl.hibernate.dialect.{{ databases[atl_db_type][1] }}</property>
{# Confluence versions 7.13 and newer #}
{% if (confluence_version.split(".")[0] | int() == 7 and confluence_version.split(".")[1] | int() <= 13) or confluence_version.split(".")[0] | int() < 7 %}
<property name="hibernate.c3p0.min_size">{{ atl_db_poolminsize | default('20') }}</property>
<property name="hibernate.c3p0.max_size">{{ atl_db_poolmaxsize | default('100') }}</property>
<property name="hibernate.c3p0.timeout">{{ atl_db_timeout | default('30') }}</property>
<property name="hibernate.c3p0.idle_test_period">{{ atl_db_idletestperiod | default('100') }}</property>
<property name="hibernate.c3p0.max_statements">{{ atl_db_maxstatements | default('0') }}</property>
<property name="hibernate.c3p0.validate">{{ atl_db_validate | default('true') }}</property>
<property name="hibernate.c3p0.acquire_increment">{{ atl_db_acquireincrement | default('1') }}</property>
{% if atl_db_validationquery is defined %}
<property name="hibernate.c3p0.preferredTestQuery">{{ atl_db_validationquery | default('select 1') }}</property>
{%endif %}
{% else %}
<property name="hibernate.hikari.idleTimeout">{{ (atl_db_timeout | default(30) | int) * 1000 }}</property>
<property name="hibernate.hikari.maximumPoolSize">{{ atl_db_poolmaxsize | default('100') }}</property>
<property name="hibernate.hikari.minimumIdle">{{ atl_db_poolminsize | default('20') }}</property>
<property name="hibernate.hikari.registerMbeans">true</property>
<property name="hibernate.connection.provider_class">{{ atl_db_provider_class | default('com.atlassian.confluence.impl.hibernate.DelegatingHikariConnectionProvider') }}</property>
{% endif %}
{% endif %}
{% if atl_cluster_type is defined %}
<property name="confluence.cluster">true</property>
<property name="confluence.cluster.name">{{ atl_cluster_name }}</property>
<property name="confluence.cluster.node.name">{{ atl_cluster_node_name }}</property>
<property name="confluence.cluster.home">{{ atl_product_home_shared | default(confluence_shared_home) | default('') }}</property>
<property name="shared-home">{{ atl_product_home_shared | default(confluence_shared_home) | default('') }}</property>
<property name="confluence.cluster.join.type">{{ atl_cluster_type }}</property>
{% if atl_cluster_type == 'aws' %}
<property name="confluence.cluster.aws.iam.role">{{ atl_hazelcast_network_aws_iam_role }}</property>
<property name="confluence.cluster.aws.region">{{ atl_hazelcast_network_aws_iam_region }}</property>
<property name="confluence.cluster.aws.host.header">{{ atl_hazelcast_network_aws_host_header }}</property>
<property name="confluence.cluster.aws.security.group.name">{{ atl_hazelcast_network_aws_security_group }}</property>
<property name="confluence.cluster.aws.tag.key">{{ atl_hazelcast_network_aws_tag_key }}</property>
<property name="confluence.cluster.aws.tag.value">{{ atl_hazelcast_network_aws_tag_value }}</property>
<property name="confluence.cluster.ttl">{{ atl_cluster_ttl }}</property>
{% elif atl_cluster_type == 'tcp_ip' %}
<property name="confluence.cluster.peers">{{ atl_cluster_peers }}</property>
{% elif atl_cluster_type == 'multicast' %}
<property name="confluence.cluster.address">{{ atl_cluster_address }}</property>
<property name="confluence.cluster.ttl">{{ atl_cluster_ttl }}</property>
{% endif %}
{% endif %}
{% if atl_license_key is defined %}
<property name="atlassian.license.message">{{ atl_license_key }}</property>
{% endif %}
{% if atl_tomcat_contextpath is defined and atl_tomcat_contextpath != '' %}
<property name="confluence.webapp.context.path">/{{ atl_tomcat_contextpath }}</property>
{% endif %}
{% if atl_snapshot_used is defined %}
<property name="hibernate.setup">true</property>
{% endif %}
</properties>
</confluence-configuration>
<security-config>
<parameters>
<init-param>
<param-name>login.url</param-name>
<param-value>/login.action?os_destination=${originalurl}&amp;permissionViolation=true</param-value>
</init-param>
<init-param>
<param-name>link.login.url</param-name>
<param-value>/login.action</param-value>
</init-param>
<init-param>
<param-name>cookie.encoding</param-name>
<param-value>cNf</param-value>
</init-param>
<init-param>
<param-name>login.cookie.key</param-name>
<param-value>seraph.confluence</param-value>
</init-param>
{% if atl_autologin_cookie_age is defined %}
<init-param>
<param-name>autologin.cookie.age</param-name>
<param-value>{{ atl_autologin_cookie_age }}</param-value>
</init-param>
{% endif %}
<!--only basic authentication available-->
<init-param>
<param-name>authentication.type</param-name>
<param-value>os_authType</param-value>
</init-param>
<!-- Invalidate session on login to prevent session fixation attack -->
<init-param>
<param-name>invalidate.session.on.login</param-name>
<param-value>true</param-value>
</init-param>
<!-- Add names for session attributes that must not be copied to a new session when the old one gets invalidated.
Currently it is empty (i.e. all attributes will be copied). -->
<init-param>
<param-name>invalidate.session.exclude.list</param-name>
<param-value></param-value>
</init-param>
</parameters>
<rolemapper class="com.atlassian.confluence.security.ConfluenceRoleMapper"/>
<controller class="com.atlassian.confluence.setup.seraph.ConfluenceSecurityController"/>
<!-- Default Confluence authenticator, which uses the configured user management for authentication. -->
<authenticator class="com.atlassian.confluence.user.ConfluenceAuthenticator"/>
<!-- Custom authenticators appear below. To enable one of them, comment out the default authenticator above and uncomment the one below. -->
<!-- Authenticator with support for Crowd single-sign on (SSO). -->
<!-- <authenticator class="com.atlassian.confluence.user.ConfluenceCrowdSSOAuthenticator"/> -->
<!-- Specialised version of the default authenticator which adds authenticated users to confluence-users if they aren't already a member. -->
<!-- <authenticator class="com.atlassian.confluence.user.ConfluenceGroupJoiningAuthenticator"/> -->
<services>
<service class="com.atlassian.seraph.service.PathService">
<init-param>
<param-name>config.file</param-name>
<param-value>seraph-paths.xml</param-value>
</init-param>
</service>
</services>
<elevatedsecurityguard class="com.atlassian.confluence.security.seraph.ConfluenceElevatedSecurityGuard"/>
</security-config>
<?xml version="1.0" encoding="utf-8"?>
<Server port="{{ atl_tomcat_mgmt_port | default('8000') }}"
shutdown="SHUTDOWN">
<Service name="Catalina">
<Connector port="{{ atl_tomcat_port | default('8090') }}"
maxThreads="{{ atl_tomcat_maxthreads | default('48') }}"
minSpareThreads="{{ atl_tomcat_minsparethreads | default('10') }}"
connectionTimeout="{{ atl_tomcat_connectiontimeout | default('20000') }}"
enableLookups="{{ atl_tomcat_enablelookups | default('false') }}"
protocol="{{ atl_tomcat_protocol | default('org.apache.coyote.http11.Http11NioProtocol') }}"
redirectPort="{{ atl_tomcat_redirectport | default('8443') }}"
acceptCount="{{ atl_tomcat_acceptcount | default('10') }}"
debug="{{ atl_tomcat_debug | default('0') }}"
URIEncoding="{{ atl_tomcat_uriencoding | default('UTF-8') }}"
secure="{{ atl_tomcat_secure | default(catalina_connector_secure) | default('false') }}"
scheme="{{ atl_tomcat_scheme | default(catalina_connector_scheme) | default('http') }}"
proxyName="{{ atl_proxy_name | default(catalina_connector_proxyname) | default('') }}"
proxyPort="{{ atl_proxy_port | default(catalina_connector_proxyport) | default('') }}"
maxHttpHeaderSize="{{ atl_tomcat_maxhttpheadersize | default('8192') }}" />
<Engine name="Standalone"
defaultHost="localhost"
debug="0">
<Host name="localhost"
debug="0"
appBase="webapps"
unpackWARs="true"
autoDeploy="false"
startStopThreads="4">
<Context path="{{ atl_tomcat_contextpath | default(catalina_context_path) | default('') }}"
docBase="../confluence"
debug="0"
reloadable="false"
useHttpOnly="true">
<!-- Logging configuration for Confluence is specified in confluence/WEB-INF/classes/log4j.properties -->
<Manager pathname=""/>
<Valve className="org.apache.catalina.valves.StuckThreadDetectionValve"
threshold="60"/>
{% if ((atl_tomcat_access_log == 'true') or
(atl_tomcat_access_log is not defined and (confluence_version.split('.') | map('int') | list) >= ('7.11.0'.split('.') | map('int') | list)) ) %}
<Valve className="org.apache.catalina.valves.AccessLogValve"
directory="logs"
prefix="confluence_access"
suffix=".log"
rotatable="true"
pattern="%h %{X-AUSERNAME}o %t &quot;%r&quot; %s %b %D %U %I &quot;%{User-Agent}i&quot;"
requestAttributesEnabled="{{ atl_tomcat_requestattributesenabled | default('false') }}"
maxDays="{{ atl_tomcat_access_logs_maxdays | default('-1') }}"/>
{%- endif %}
{%- if atl_tomcat_trustedproxies is defined or atl_tomcat_internalproxies is defined %}
<Valve className="org.apache.catalina.valves.RemoteIpValve"
{%- if atl_tomcat_trustedproxies %}
trustedProxies="{{ atl_tomcat_trustedproxies }}"
{%- endif %}
{%- if atl_tomcat_internalproxies %}
internalProxies="{{ atl_tomcat_internalproxies }}"
{%- endif %}
remoteIpHeader="x-forwarded-for"
proxiesHeader="x-forwarded-by"
protocolHeader="x-forwarded-proto"/>
{%- endif %}
</Context>
<Context path="${confluence.context.path}/synchrony-proxy"
docBase="../synchrony-proxy"
debug="0"
reloadable="false"
useHttpOnly="true">
<Valve className="org.apache.catalina.valves.StuckThreadDetectionValve"
threshold="60"/>
</Context>
</Host>
</Engine>
</Service>
</Server>
from bottle import route, run, static_file
import subprocess
#
# Download confluence-home on the fly from node1 to other node by
# ---------------------------------------------------------------
# curl -o /tmp/confluence-home.tar http://confluence-cluster-node1:8888/download
# tar xfv /tmp/confluence-home.tar -C /
# ---------------------------------------------------------------
@route('/download')
def index():
p = subprocess.Popen('tar cf /work-private/confluence-home.tar /confluence-home/bundled-plugins /confluence-home/confluence.cfg.xml /confluence-home/index /confluence-home/journal /confluence-home/synchrony-standalone.jar', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
retval = p.wait()
return static_file('confluence-home.tar', root='/work-private')
run(host='0.0.0.0', port=8888)
#!/usr/bin/python3 -B
from entrypoint_helpers import env, gen_cfg, str2bool_or, exec_app
RUN_USER = env['run_user']
RUN_GROUP = env['run_group']
CONFLUENCE_INSTALL_DIR = env['confluence_install_dir']
CONFLUENCE_HOME = env['confluence_home']
UPDATE_CFG = str2bool_or(env.get('atl_force_cfg_update'), False)
UNSET_SENSITIVE_VARS = str2bool_or(env.get('atl_unset_sensitive_env_vars'), True)
gen_cfg('server.xml.j2', f'{CONFLUENCE_INSTALL_DIR}/conf/server.xml',
user=RUN_USER, group=RUN_GROUP, overwrite=UPDATE_CFG)
gen_cfg('seraph-config.xml.j2',
f'{CONFLUENCE_INSTALL_DIR}/confluence/WEB-INF/classes/seraph-config.xml',
user=RUN_USER, group=RUN_GROUP, overwrite=UPDATE_CFG)
gen_cfg('confluence-init.properties.j2',
f'{CONFLUENCE_INSTALL_DIR}/confluence/WEB-INF/classes/confluence-init.properties',
user=RUN_USER, group=RUN_GROUP, overwrite=UPDATE_CFG)
gen_cfg('confluence.cfg.xml.j2', f'{CONFLUENCE_HOME}/confluence.cfg.xml',
user=RUN_USER, group=RUN_GROUP, overwrite=UPDATE_CFG)
exec_app([f'{CONFLUENCE_INSTALL_DIR}/bin/start-confluence.sh', '-fg'], CONFLUENCE_HOME,
name='Confluence', env_cleanup=True)
import sys
import os
import pwd
import shutil
import logging
import jinja2 as j2
import uuid
import base64
logging.basicConfig(level=logging.DEBUG)
######################################################################
# Setup inputs and outputs
# Import all ATL_* and Dockerfile environment variables. We lower-case
# these for compatability with Ansible template convention. We also
# support CATALINA variables from older versions of the Docker images
# for backwards compatability, if the new version is not set.
env = {k.lower(): v
for k, v in os.environ.items()}
# Setup Jinja2 for templating
jenv = j2.Environment(
loader=j2.FileSystemLoader('/opt/atlassian/etc/'),
autoescape=j2.select_autoescape(['xml']))
######################################################################
# Utils
def set_perms(path, user, group, mode):
try:
shutil.chown(path, user=user, group=group)
except PermissionError:
logging.warning(f"Could not chown path {path} to {user}:{group} due to insufficient permissions.")
try:
os.chmod(path, mode)
except PermissionError:
logging.warning(f"Could not chmod path {path} to {mode} due to insufficient permissions.")
def set_tree_perms(path, user, group, mode):
set_perms(path, user, group, mode)
for dirpath, dirnames, filenames in os.walk(path):
set_perms(path, user, group, mode)
for filename in filenames:
set_perms(path, user, group, mode)
def check_perms(path, uid, gid, mode):
stat = os.stat(path)
return all([
stat.st_uid == int(uid),
stat.st_gid == int(gid),
stat.st_mode & mode == mode
])
def gen_cfg(tmpl, target, user='root', group='root', mode=0o644, overwrite=True):
if not overwrite and os.path.exists(target):
logging.info(f"{target} exists; skipping.")
return
logging.info(f"Generating {target} from template {tmpl}")
cfg = jenv.get_template(tmpl).render(env)
try:
with open(target, 'w') as fd:
fd.write(cfg)
except (OSError, PermissionError):
logging.warning(f"Permission problem writing '{target}'; skipping")
else:
set_tree_perms(target, user, group, mode)
def gen_container_id():
env['uuid'] = uuid.uuid4().hex
with open('/etc/container_id') as fd:
lcid = fd.read()
if lcid != '':
env['local_container_id'] = lcid
def str2bool(s):
if str(s).lower() in ('yes', 'true', 't', 'y', '1'):
return True
return False
def str2bool_or(s, default):
# If the string is set, interpret it as a bool, or fallback to a
# default.
if s == None:
return default
else:
return str2bool(s)
def unset_secure_vars():
secure_keywords = ('PASS', 'SECRET', 'TOKEN')
for key in os.environ:
if any(kw in key.upper() for kw in secure_keywords):
logging.warning(f"Unsetting environment var {key}")
del os.environ[key]
######################################################################
# Application startup utilities
def check_permissions(home_dir):
"""Ensure the home directory is set to minimal permissions"""
if str2bool(env.get('set_permissions') or True) and check_perms(home_dir, env['run_uid'], env['run_gid'], 0o700) is False:
set_tree_perms(home_dir, env['run_user'], env['run_group'], 0o700)
logging.info(f"User is currently root. Will change directory ownership and downgrade run user to {env['run_user']}")
def drop_root(run_user):
logging.info(f"User is currently root. Will downgrade run user to {run_user}")
pwd_entry = pwd.getpwnam(run_user)
os.environ['USER'] = run_user
os.environ['HOME'] = pwd_entry.pw_dir
os.environ['SHELL'] = pwd_entry.pw_shell
os.environ['LOGNAME'] = run_user
os.setgid(pwd_entry.pw_gid)
os.setuid(pwd_entry.pw_uid)
def write_pidfile():
app_home = env[f"{env['app_name'].lower()}_home"]
pidfile = f"{app_home}/docker-app.pid"
with open(pidfile, 'wt', encoding='utf-8') as fd:
pid = os.getpid()
fd.write(str(pid))
def exec_app(start_cmd_v, home_dir, name='app', env_cleanup=False):
"""Run the supplied application startup command.
Arguments:
start_cmd -- A list of the command and its arguments.
home_dir -- Application home directory.
name -- (Optional) The name to display in the log message.
env_cleanup -- (Default: False) Remove possibly sensitive env-vars.
"""
if os.getuid() == 0:
check_permissions(home_dir)
drop_root(env['run_user'])
write_pidfile()
if env_cleanup:
unset_secure_vars()
cmd = start_cmd_v[0]
args = start_cmd_v
logging.info(f"Running {name} with command '{cmd}', arguments {args}")
os.execv(cmd, args)
#!/bin/bash
##############################################################################
#
# This script will initiate a clean shutdown of the application, and
# then wait for the process to finish before returning. This is
# primarily intended for use in environments that provide an orderly
# shutdown mechanism, in particular the Kubernetes `preStop` hook.
#
# This script will wait for the process to exit indefinitely; however
# most run-time tools (including Docker and Kubernetes) have their own
# shutdown timeouts that will send a SIGKILL if the grace period is
# exceeded.
#
##############################################################################
set -e
source /opt/atlassian/support/common.sh
echo "Shutting down Confluence..."
echo ${JVM_APP_PID} > ${CONFLUENCE_INSTALL_DIR}/work/catalina.pid
if [[ "${UID}" == 0 ]]; then
/bin/su ${RUN_USER} -c ${CONFLUENCE_INSTALL_DIR}/bin/stop-confluence.sh;
else
${CONFLUENCE_INSTALL_DIR}/bin/stop-confluence.sh;
fi
/opt/atlassian/support/wait-pid.sh ${JVM_APP_PID}
# -------------------------------------------------------------------------------------
# Common bootstrapping for support scripts (get app details: home directory, PID, etc.)
# -------------------------------------------------------------------------------------
# Set up Java utils
JCMD="${JAVA_HOME}/bin/jcmd"
# Set up app info
APP_NAME="$(set | grep '_INSTALL_DIR' | awk -F'_' '{print $1}')"
case "${APP_NAME}" in
BITBUCKET )
BOOTSTRAP_PROC="com.atlassian.bitbucket.internal.launcher.BitbucketServerLauncher"
;;
* )
BOOTSTRAP_PROC="org.apache.catalina.startup.Bootstrap"
;;
esac
# Get value of <app>_INSTALL_DIR
function get_app_install_dir {
local APP_INSTALL_DIR=${APP_NAME}_INSTALL_DIR
echo ${!APP_INSTALL_DIR}
}
# Get value of <app>_HOME
function get_app_home {
local APP_HOME=${APP_NAME}_HOME
echo ${!APP_HOME}
}
# Get app PID. APP_PID is the root process. JVM_APP_PID will generally
# be the same as APP_PID; the exception is Bitbucket running with
# Elasticsearch enabled.
JVM_APP_PID=$(${JCMD} | grep "${BOOTSTRAP_PROC}" | awk '{print $1}')
PIDFILE="$(get_app_home)/docker-app.pid"
if [[ -f $PIDFILE ]]; then
APP_PID=$(<$PIDFILE)
else
APP_PID=$JVM_APP_PID
fi
# Set valid getopt options
function set_valid_options {
OPTS=$(getopt -o "$1" --long "$2" -n 'parse-options' -- "$@")
if [ $? != 0 ]; then
echo "Failed parsing options." >&2
exit 1
fi
eval set -- "$OPTS"
}
# Run command(s)
function run_as_runuser {
if [ $(id -u) = 0 ]; then
su "${RUN_USER}" -c '"$@"' -- argv0 "$@"
else
$@
fi
}
#!/bin/bash
# -------------------------------------------------------------------------------------
# Heap collector for containerized Atlassian applications
#
# This script can be run via `docker exec` to easily trigger the collection of a heap
# dump from the containerized application. For example:
#
# $ docker exec -it my_jira /opt/atlassian/support/heap-dump.sh
#
# A heap dump will be written to $APP_HOME/heap.bin. If a file already exists at this
# location, use -f/--force to overwrite the existing heap dump file.
#
# -------------------------------------------------------------------------------------
set -euo pipefail
# Set up common vars like APP_NAME, APP_HOME, APP_PID
SCRIPT_DIR=$(dirname "$0")
source "${SCRIPT_DIR}/common.sh"
# Set up script opts
set_valid_options "f" "force"
# Set defaults
OVERWRITE="false"
# Parse opts
while true; do
case "${1-}" in
-f | --force ) OVERWRITE="true"; shift ;;
* ) break ;;
esac
done
echo "Atlassian heap dump collector"
echo "App: ${APP_NAME}"
echo "Run user: ${RUN_USER}"
echo
OUT_FILE="$(get_app_home)/heap.bin"
if [[ -f "${OUT_FILE}" ]]; then
echo "A previous heap dump already exists at ${OUT_FILE}."
if [[ "${OVERWRITE}" == "true" ]]; then
echo "Removing previous heap dump file"
echo
rm "${OUT_FILE}"
else
echo "Use -f/--force to overwrite the existing heap dump."
exit
fi
fi
echo "Generating heap dump"
run_as_runuser ${JCMD} ${APP_PID} GC.heap_dump -all ${OUT_FILE} > /dev/null
echo
echo "Heap dump has been written to ${OUT_FILE}"
#!/bin/bash
# Send the specified signal to the main application process.
#
# If 'wait' is added as a second parameter, wait for the process to
# terminate. NOTE: This waits indefinitely, but may be killed by
# higher-level processes (e.g. Docker/Kubernetes)
set -e
SIG=$1
WAIT=$2
SHDIR=$(dirname $0)
source ${SHDIR}/common.sh
kill -${SIG} ${JVM_APP_PID}
if [[ "${WAIT}" == "wait" ]]; then
${SHDIR}/wait-pid.sh $JVM_APP_PID
fi
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment