first sync
Some checks failed
Deployment Verification / deploy-and-test (push) Failing after 29s

This commit is contained in:
Hubert Cornet 2025-03-04 07:59:21 +01:00
parent 9cdcf486b6
commit 506716e703
1450 changed files with 577316 additions and 62 deletions

1
.env
View File

@ -1 +0,0 @@
# Template

85
CoPilot/.env Normal file
View File

@ -0,0 +1,85 @@
################
# Velociraptor #
################
VELOX_USER=admin
VELOX_PASSWORD=admin
VELOX_ROLE=administrator
VELOX_SERVER_URL=https://Velociraptor:8000/
VELOX_FRONTEND_HOSTNAME=Velociraptor
###########
# CoPilot #
###########
# Leave this as is if connecting from a remote machine
SERVER_IP=0.0.0.0
MYSQL_URL=copilot-mysql
# ! Avoid using special characters in the password ! #
MYSQL_ROOT_PASSWORD=REPLACE_WITH_PASSWORD
MYSQL_USER=copilot
# ! Avoid using special characters in the password ! #
MYSQL_PASSWORD=REPLACE_WITH_PASSWORD
MINIO_URL=copilot-minio
MINIO_ROOT_USER=admin
# ! Make the password at least 8 characters long ! #
MINIO_ROOT_PASSWORD=REPLACE_ME
MINIO_SECURE=False
# ! ALERT FORWARDING IP
# Set this to the IP of the host running CoPilot. This is used by Graylog to forward alerts to CoPilot
# ! Not needed anymore since we are reading from the index now
# ! Ensure Graylog is able to reach this IP and port 5000
ALERT_FORWARDING_IP=0.0.0.0
# Connector Credentials
# ! SETTING UP YOUR CONNECTORS DEMOs https://www.youtube.com/@taylorwalton_socfortress/videos! #
WAZUH_INDEXER_URL=https://wazuh.indexer:9200
WAZUH_INDEXER_USERNAME=admin
WAZUH_INDEXER_PASSWORD=SecretPassword
WAZUH_MANAGER_URL=https://wazuh.manager:55000
WAZUH_MANAGER_USERNAME=wazuh-wui
WAZUH_MANAGER_PASSWORD=MyS3cr37P450r.*-
GRAYLOG_URL=http://graylog:9000
GRAYLOG_USERNAME=admin
GRAYLOG_PASSWORD=yourpassword
SHUFFLE_URL=https://127.1.1.1
SHUFFLER_API_KEY=dummy
SHUFFLE_WORKFLOW_ID=dummy
VELOCIRAPTOR_URL=https://velociraptor:8889
VELOCIRAPTOR_API_KEY_PATH=dummy
SUBLIME_URL=http://127.1.1.1
SUBLIME_API_KEY=dummy
INFLUXDB_URL=http://127.1.1.1
INFLUXDB_API_KEY=dummy
INFLUXDB_ORG_AND_BUCKET=dummy,dummy
GRAFANA_URL=http://grafana:3000
GRAFANA_USERNAME=admin
GRAFANA_PASSWORD=admin
WAZUH_WORKER_PROVISIONING_URL=http://127.1.1.1
EVENT_SHIPPER_URL=graylog_host
GELF_INPUT_PORT=gelf_port
ALERT_CREATION_PROVISIONING_URL=http://127.1.1.1
HAPROXY_PROVISIONING_URL=http://127.1.1.1
# VirusTotal
VIRUSTOTAL_URL=https://www.virustotal.com/api/v3
VIRUSTOTAL_API_KEY=REPLACE_ME
# Portainer
PORTAINER_URL=http://127.1.1.1:9000
PORTAINER_USERNAME=admin
PORTAINER_PASSWORD=admin
PORTAINER_ENDPOINT_ID=2

View File

@ -0,0 +1,58 @@
###########
# CoPilot #
###########
copilot-backend:
image: ghcr.io/socfortress/copilot-backend:latest
container_name: copilot-backend
env_file: .env
# Expose the Ports for Graylog Alerting and Docs
ports:
- "5000:5000"
volumes:
- copilot-logs:/opt/logs
# Mount the copilot.db file to persist the database
- copilot-data:/opt/copilot/backend/data
depends_on:
- copilot-mysql
restart: unless-stopped
copilot-frontend:
image: ghcr.io/socfortress/copilot-frontend:latest
container_name: copilot-frontend
env_file: .env
environment:
- SERVER_HOST=${SERVER_HOST:-localhost} # Set the domain name of your server
ports:
- "80:80"
- "443:443"
restart: unless-stopped
copilot-mysql:
image: mysql:8.0.38-debian
container_name: copilot-mysql
env_file: .env
environment:
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
MYSQL_DATABASE: copilot
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
volumes:
- copilot-mysql_data:/var/lib/mysql
restart: unless-stopped
copilot-minio:
image: quay.io/minio/minio:RELEASE.2024-09-13T20-26-02Z
container_name: copilot-minio
env_file: .env
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
volumes:
- copilot_minio-data:/data
command: server /data --console-address ":9001"
restart: unless-stopped
copilot-nuclei-module:
image: ghcr.io/socfortress/copilot-nuclei-module:latest
container_name: copilot-nuclei-module
restart: unless-stopped

85
Velociraptor/.env Normal file
View File

@ -0,0 +1,85 @@
################
# Velociraptor #
################
VELOX_USER=admin
VELOX_PASSWORD=admin
VELOX_ROLE=administrator
VELOX_SERVER_URL=https://Velociraptor:8000/
VELOX_FRONTEND_HOSTNAME=Velociraptor
###########
# CoPilot #
###########
# Leave this as is if connecting from a remote machine
SERVER_IP=0.0.0.0
MYSQL_URL=copilot-mysql
# ! Avoid using special characters in the password ! #
MYSQL_ROOT_PASSWORD=REPLACE_WITH_PASSWORD
MYSQL_USER=copilot
# ! Avoid using special characters in the password ! #
MYSQL_PASSWORD=REPLACE_WITH_PASSWORD
MINIO_URL=copilot-minio
MINIO_ROOT_USER=admin
# ! Make the password at least 8 characters long ! #
MINIO_ROOT_PASSWORD=REPLACE_ME
MINIO_SECURE=False
# ! ALERT FORWARDING IP
# Set this to the IP of the host running CoPilot. This is used by Graylog to forward alerts to CoPilot
# ! Not needed anymore since we are reading from the index now
# ! Ensure Graylog is able to reach this IP and port 5000
ALERT_FORWARDING_IP=0.0.0.0
# Connector Credentials
# ! SETTING UP YOUR CONNECTORS DEMOs https://www.youtube.com/@taylorwalton_socfortress/videos! #
WAZUH_INDEXER_URL=https://wazuh.indexer:9200
WAZUH_INDEXER_USERNAME=admin
WAZUH_INDEXER_PASSWORD=SecretPassword
WAZUH_MANAGER_URL=https://wazuh.manager:55000
WAZUH_MANAGER_USERNAME=wazuh-wui
WAZUH_MANAGER_PASSWORD=MyS3cr37P450r.*-
GRAYLOG_URL=http://graylog:9000
GRAYLOG_USERNAME=admin
GRAYLOG_PASSWORD=yourpassword
SHUFFLE_URL=https://127.1.1.1
SHUFFLER_API_KEY=dummy
SHUFFLE_WORKFLOW_ID=dummy
VELOCIRAPTOR_URL=https://velociraptor:8889
VELOCIRAPTOR_API_KEY_PATH=dummy
SUBLIME_URL=http://127.1.1.1
SUBLIME_API_KEY=dummy
INFLUXDB_URL=http://127.1.1.1
INFLUXDB_API_KEY=dummy
INFLUXDB_ORG_AND_BUCKET=dummy,dummy
GRAFANA_URL=http://grafana:3000
GRAFANA_USERNAME=admin
GRAFANA_PASSWORD=admin
WAZUH_WORKER_PROVISIONING_URL=http://127.1.1.1
EVENT_SHIPPER_URL=graylog_host
GELF_INPUT_PORT=gelf_port
ALERT_CREATION_PROVISIONING_URL=http://127.1.1.1
HAPROXY_PROVISIONING_URL=http://127.1.1.1
# VirusTotal
VIRUSTOTAL_URL=https://www.virustotal.com/api/v3
VIRUSTOTAL_API_KEY=REPLACE_ME
# Portainer
PORTAINER_URL=http://127.1.1.1:9000
PORTAINER_USERNAME=admin
PORTAINER_PASSWORD=admin
PORTAINER_ENDPOINT_ID=2

View File

@ -0,0 +1,20 @@
################
# Velociraptor #
################
velociraptor:
container_name: velociraptor
image: wlambert/velociraptor
env_file: .env
volumes:
- velociraptor:/velociraptor/:rw
environment:
- VELOX_USER=${VELOX_USER}
- VELOX_PASSWORD=${VELOX_PASSWORD}
- VELOX_ROLE=${VELOX_ROLE}
- VELOX_SERVER_URL=${VELOX_SERVER_URL}
- VELOX_FRONTEND_HOSTNAME=${VELOX_FRONTEND_HOSTNAME}
ports:
- "8000:8000"
- "8001:8001"
- "8889:8889"
restart: unless-stopped

View File

@ -1,42 +0,0 @@
#### NETWORKS
networks:
traefik_front_network:
external: true
back_network_:
driver: bridge
attachable: true
#### SERVICES
services:
### hello_world
hello_world:
container_name: gitea-app
hostname: gitea-app
image: hello-world
environment:
restart: always
networks:
# - back_network_gitea
- traefik_front_network
volumes:
labels:
- "traefik.enable=true"
- "traefik.docker.network=traefik_front_network"
# HTTP
- "traefik.http.routers.hello-world-http.rule=Host(`hello-world.tips-of-mine.com`)"
- "traefik.http.routers.hello-world-http.entrypoints=http"
- "traefik.http.routers.hello-world-http.priority=49"
# HTTPS
- "traefik.http.routers.hello-world-https.rule=Host(`hello-world.tips-of-mine.com`)"
- "traefik.http.routers.hello-world-https.entrypoints=https"
- "traefik.http.routers.hello-world-https.tls=true"
- "traefik.http.routers.hello-world-https.priority=50"
- "traefik.http.routers.gitea.service=gitea-https-service"
# Middleware
# Service
# - "traefik.http.services.gitea-https-service.loadbalancer.server.port=3000"
# - "traefik.http.services.gitea-https-service.loadbalancer.server.scheme=https"
# - "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.hostname=gitea.traefik.me"
# - "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.method=foobar"
# - "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.timeout=10"
# - "traefik.http.services.gitea-https-service.loadbalancer.healthcheck.interval=30"

View File

@ -0,0 +1,40 @@
# Use the latest Ubuntu image
FROM ubuntu:latest
# Avoid prompts from apt
ENV DEBIAN_FRONTEND=noninteractive
# Update repositories
RUN apt-get update
# Install SSH server, rsyslog, and other necessary tools
RUN apt-get install -y openssh-server rsyslog iputils-ping sudo nano wget curl
# Install supervisor
RUN apt-get install -y supervisor
# Configure SSH
RUN mkdir /var/run/sshd
RUN echo 'root:password' | chpasswd
RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
# Create supervisord configuration file
RUN echo "[supervisord]" > /etc/supervisor/conf.d/supervisord.conf
RUN echo "nodaemon=true" >> /etc/supervisor/conf.d/supervisord.conf
RUN echo "user=root" >> /etc/supervisor/conf.d/supervisord.conf
RUN echo "[program:sshd]" >> /etc/supervisor/conf.d/supervisord.conf
RUN echo "command=/usr/sbin/sshd -D" >> /etc/supervisor/conf.d/supervisord.conf
RUN echo "[program:rsyslog]" >> /etc/supervisor/conf.d/supervisord.conf
RUN echo "command=/usr/sbin/rsyslogd -n" >> /etc/supervisor/conf.d/supervisord.conf
RUN echo "autorestart=true" >> /etc/supervisor/conf.d/supervisord.conf
# Environment variable for SSH
ENV NOTVISIBLE "in users profile"
RUN echo "export VISIBLE=now" >> /etc/profile
# Expose SSH port
EXPOSE 22
# Run supervisord
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]

View File

@ -0,0 +1,16 @@
version: '3.3'
services:
ubuntu_ssh:
networks:
- shared-network
build:
context: .
ports:
- "2222:22"
- "8080:80"
restart: always
networks:
shared-network:
external: true

1938
grafana/config/grafana.ini Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,13 @@
###########
# Grafana #
###########
grafana:
image: grafana/grafana-enterprise
container_name: grafana
hostname: grafana
ports:
- 3000:3000
volumes:
- grafana-storage:/var/lib/grafana
- ./config/grafana.ini:/etc/grafana/grafana.ini
restart: unless-stopped

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 MiB

11
graylog/config/README.md Normal file
View File

@ -0,0 +1,11 @@
# Graylog
<br>
I've provided some basic files to make your life easier on initial deployment, feel free to change/delete/modify them in any way that is useful to you, but DO NOT skip the next step.
## Pre-Deployment
Graylog container runs as the graylog user with UUID 1100, for this files to be recognized once the container is deployed you need to modify file ownership before starting the container.
```
sudo chown 1100:1100 *
```
If you have any other files you would like pass onto your Graylog instance, they would also have to go through the same process for Graylog to be able to use them.

767
graylog/config/graylog.conf Normal file
View File

@ -0,0 +1,767 @@
############################
# GRAYLOG CONFIGURATION FILE
############################
#
# This is the Graylog configuration file. The file has to use ISO 8859-1/Latin-1 character encoding.
# Characters that cannot be directly represented in this encoding can be written using Unicode escapes
# as defined in https://docs.oracle.com/javase/specs/jls/se8/html/jls-3.html#jls-3.3, using the \u prefix.
# For example, \u002c.
#
# * Entries are generally expected to be a single line of the form, one of the following:
#
# propertyName=propertyValue
# propertyName:propertyValue
#
# * White space that appears between the property name and property value is ignored,
# so the following are equivalent:
#
# name=Stephen
# name = Stephen
#
# * White space at the beginning of the line is also ignored.
#
# * Lines that start with the comment characters ! or # are ignored. Blank lines are also ignored.
#
# * The property value is generally terminated by the end of the line. White space following the
# property value is not ignored, and is treated as part of the property value.
#
# * A property value can span several lines if each line is terminated by a backslash (\) character.
# For example:
#
# targetCities=\
# Detroit,\
# Chicago,\
# Los Angeles
#
# This is equivalent to targetCities=Detroit,Chicago,Los Angeles (white space at the beginning of lines is ignored).
#
# * The characters newline, carriage return, and tab can be inserted with characters \n, \r, and \t, respectively.
#
# * The backslash character must be escaped as a double backslash. For example:
#
# path=c:\\docs\\doc1
#
# If you are running more than one instances of Graylog server you have to select one of these
# instances as leader. The leader will perform some periodical tasks that non-leaders won't perform.
is_leader = true
# The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
# to use an absolute file path here if you are starting Graylog server from init scripts or similar.
node_id_file = /usr/share/graylog/data/config/node-id
# You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
# Generate one by using for example: pwgen -N 1 -s 96
# ATTENTION: This value must be the same on all Graylog nodes in the cluster.
# Changing this value after installation will render all user sessions and encrypted values in the database invalid. (e.g. encrypted access tokens)
password_secret = lNLYcK78TCCDI6xJERGz3cC6u6alzBgrKIbVGWOkgtqm1ZebPm1gYz8FwIuNwQhh8rMYlrzvDsPAeOgKsENnFjctBolYKizM
# The default root user is named 'admin'
#root_username = admin
# You MUST specify a hash password for the root user (which you only need to initially set up the
# system and in case you lose connectivity to your authentication backend)
# This password cannot be changed using the API or via the web interface. If you need to change it,
# modify it in this file.
# Create one by using for example: echo -n yourpassword | shasum -a 256
# and put the resulting hash value into the following line
root_password_sha2 = e3c652f0ba0b4801205814f8b6bc49672c4c74e25b497770bb89b22cdeb4e951
# The email address of the root user.
# Default is empty
#root_email = ""
# The time zone setting of the root user. See http://www.joda.org/joda-time/timezones.html for a list of valid time zones.
# Default is UTC
#root_timezone = UTC
# Set the bin directory here (relative or absolute)
# This directory contains binaries that are used by the Graylog server.
# Default: bin
bin_dir = /usr/share/graylog/bin
# Set the data directory here (relative or absolute)
# This directory is used to store Graylog server state.
data_dir = /usr/share/graylog/data
# Set plugin directory here (relative or absolute)
plugin_dir = /usr/share/graylog/plugin
###############
# HTTP settings
###############
#### HTTP bind address
#
# The network interface used by the Graylog HTTP interface.
#
# This network interface must be accessible by all Graylog nodes in the cluster and by all clients
# using the Graylog web interface.
#
# If the port is omitted, Graylog will use port 9000 by default.
#
# Default: 127.0.0.1:9000
http_bind_address = 0.0.0.0:9000
#http_bind_address = [2001:db8::1]:9000
#### HTTP publish URI
#
# The HTTP URI of this Graylog node which is used to communicate with the other Graylog nodes in the cluster and by all
# clients using the Graylog web interface.
#
# The URI will be published in the cluster discovery APIs, so that other Graylog nodes will be able to find and connect to this Graylog node.
#
# This configuration setting has to be used if this Graylog node is available on another network interface than $http_bind_address,
# for example if the machine has multiple network interfaces or is behind a NAT gateway.
#
# If $http_bind_address contains a wildcard IPv4 address (0.0.0.0), the first non-loopback IPv4 address of this machine will be used.
# This configuration setting *must not* contain a wildcard address!
#
# Default: http://$http_bind_address/
#http_publish_uri = http://192.168.1.1:9000/
#### External Graylog URI
#
# The public URI of Graylog which will be used by the Graylog web interface to communicate with the Graylog REST API.
#
# The external Graylog URI usually has to be specified, if Graylog is running behind a reverse proxy or load-balancer
# and it will be used to generate URLs addressing entities in the Graylog REST API (see $http_bind_address).
#
# When using Graylog Collector, this URI will be used to receive heartbeat messages and must be accessible for all collectors.
#
# This setting can be overridden on a per-request basis with the "X-Graylog-Server-URL" HTTP request header.
#
# Default: $http_publish_uri
#http_external_uri =
#### Enable CORS headers for HTTP interface
#
# This allows browsers to make Cross-Origin requests from any origin.
# This is disabled for security reasons and typically only needed if running graylog
# with a separate server for frontend development.
#
# Default: false
#http_enable_cors = false
#### Enable GZIP support for HTTP interface
#
# This compresses API responses and therefore helps to reduce
# overall round trip times. This is enabled by default. Uncomment the next line to disable it.
#http_enable_gzip = false
# The maximum size of the HTTP request headers in bytes.
#http_max_header_size = 8192
# The size of the thread pool used exclusively for serving the HTTP interface.
#http_thread_pool_size = 64
################
# HTTPS settings
################
#### Enable HTTPS support for the HTTP interface
#
# This secures the communication with the HTTP interface with TLS to prevent request forgery and eavesdropping.
#
# Default: false
#http_enable_tls = true
# The X.509 certificate chain file in PEM format to use for securing the HTTP interface.
#http_tls_cert_file = /path/to/graylog.crt
# The PKCS#8 private key file in PEM format to use for securing the HTTP interface.
#http_tls_key_file = /path/to/graylog.key
# The password to unlock the private key used for securing the HTTP interface.
#http_tls_key_password = secret
# If set to "true", Graylog will periodically investigate indices to figure out which fields are used in which streams.
# It will make field list in Graylog interface show only fields used in selected streams, but can decrease system performance,
# especially on systems with great number of streams and fields.
stream_aware_field_types=false
# Comma separated list of trusted proxies that are allowed to set the client address with X-Forwarded-For
# header. May be subnets, or hosts.
#trusted_proxies = 127.0.0.1/32, 0:0:0:0:0:0:0:1/128
# List of Elasticsearch hosts Graylog should connect to.
# Need to be specified as a comma-separated list of valid URIs for the http ports of your elasticsearch nodes.
# If one or more of your elasticsearch hosts require authentication, include the credentials in each node URI that
# requires authentication.
#
# Default: http://127.0.0.1:9200
elasticsearch_hosts = https://admin:SecretPassword@wazuh.indexer:9200
# Maximum number of attempts to connect to elasticsearch on boot for the version probe.
#
# Default: 0, retry indefinitely with the given delay until a connection could be established
#elasticsearch_version_probe_attempts = 5
# Waiting time in between connection attempts for elasticsearch_version_probe_attempts
#
# Default: 5s
#elasticsearch_version_probe_delay = 5s
# Maximum amount of time to wait for successful connection to Elasticsearch HTTP port.
#
# Default: 10 Seconds
#elasticsearch_connect_timeout = 10s
# Maximum amount of time to wait for reading back a response from an Elasticsearch server.
# (e. g. during search, index creation, or index time-range calculations)
#
# Default: 60 seconds
#elasticsearch_socket_timeout = 60s
# Maximum idle time for an Elasticsearch connection. If this is exceeded, this connection will
# be tore down.
#
# Default: inf
#elasticsearch_idle_timeout = -1s
# Maximum number of total connections to Elasticsearch.
#
# Default: 200
#elasticsearch_max_total_connections = 200
# Maximum number of total connections per Elasticsearch route (normally this means per
# elasticsearch server).
#
# Default: 20
#elasticsearch_max_total_connections_per_route = 20
# Maximum number of times Graylog will retry failed requests to Elasticsearch.
#
# Default: 2
#elasticsearch_max_retries = 2
# Enable automatic Elasticsearch node discovery through Nodes Info,
# see https://www.elastic.co/guide/en/elasticsearch/reference/5.4/cluster-nodes-info.html
#
# WARNING: Automatic node discovery does not work if Elasticsearch requires authentication, e. g. with Shield.
#
# Default: false
#elasticsearch_discovery_enabled = true
# Filter for including/excluding Elasticsearch nodes in discovery according to their custom attributes,
# see https://www.elastic.co/guide/en/elasticsearch/reference/5.4/cluster.html#cluster-nodes
#
# Default: empty
#elasticsearch_discovery_filter = rack:42
# Frequency of the Elasticsearch node discovery.
#
# Default: 30s
# elasticsearch_discovery_frequency = 30s
# Set the default scheme when connecting to Elasticsearch discovered nodes
#
# Default: http (available options: http, https)
#elasticsearch_discovery_default_scheme = http
# Enable payload compression for Elasticsearch requests.
#
# Default: false
#elasticsearch_compression_enabled = true
# Enable use of "Expect: 100-continue" Header for Elasticsearch index requests.
# If this is disabled, Graylog cannot properly handle HTTP 413 Request Entity Too Large errors.
#
# Default: true
#elasticsearch_use_expect_continue = true
# Graylog uses Index Sets to manage settings for groups of indices. The default options for index sets are configurable
# for each index set in Graylog under System > Configuration > Index Set Defaults.
# The following settings are used to initialize in-database defaults on the first Graylog server startup.
# Specify these values if you want the Graylog server and indices to start with specific settings.
# The prefix for the Default Graylog index set.
#
#elasticsearch_index_prefix = graylog
# The name of the index template for the Default Graylog index set.
#
#elasticsearch_template_name = graylog-internal
# The prefix for the for graylog event indices.
#
#default_events_index_prefix = gl-events
# The prefix for graylog system event indices.
#
#default_system_events_index_prefix = gl-system-events
# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
# Elasticsearch documentation: https://www.elastic.co/guide/en/elasticsearch/reference/2.3/analysis.html
# Note that this setting only takes effect on newly created indices.
#
#elasticsearch_analyzer = standard
# How many Elasticsearch shards and replicas should be used per index?
#
#elasticsearch_shards = 1
#elasticsearch_replicas = 0
# Maximum number of attempts to connect to datanode on boot.
# Default: 0, retry indefinitely with the given delay until a connection could be established
#datanode_startup_connection_attempts = 5
# Waiting time in between connection attempts for datanode_startup_connection_attempts
#
# Default: 5s
# datanode_startup_connection_delay = 5s
# Disable the optimization of Elasticsearch indices after index cycling. This may take some load from Elasticsearch
# on heavily used systems with large indices, but it will decrease search performance. The default is to optimize
# cycled indices.
#
#disable_index_optimization = true
# Optimize the index down to <= index_optimization_max_num_segments. A higher number may take some load from Elasticsearch
# on heavily used systems with large indices, but it will decrease search performance. The default is 1.
#
#index_optimization_max_num_segments = 1
# Time interval to trigger a full refresh of the index field types for all indexes. This will query ES for all indexes
# and populate any missing field type information to the database.
#
#index_field_type_periodical_full_refresh_interval = 5m
# You can configure the default strategy used to determine when to rotate the currently active write index.
# Multiple rotation strategies are supported, the default being "time-size-optimizing":
# - "time-size-optimizing" tries to rotate daily, while focussing on optimal sized shards.
# The global default values can be configured with
# "time_size_optimizing_retention_min_lifetime" and "time_size_optimizing_retention_max_lifetime".
# - "count" of messages per index, use elasticsearch_max_docs_per_index below to configure
# - "size" per index, use elasticsearch_max_size_per_index below to configure
# - "time" interval between index rotations, use elasticsearch_max_time_per_index to configure
# A strategy may be disabled by specifying the optional enabled_index_rotation_strategies list and excluding that strategy.
#
#enabled_index_rotation_strategies = count,size,time,time-size-optimizing
# The default index rotation strategy to use.
#rotation_strategy = time-size-optimizing
# (Approximate) maximum number of documents in an Elasticsearch index before a new index
# is being created, also see no_retention and elasticsearch_max_number_of_indices.
# Configure this if you used 'rotation_strategy = count' above.
#
#elasticsearch_max_docs_per_index = 20000000
# (Approximate) maximum size in bytes per Elasticsearch index on disk before a new index is being created, also see
# no_retention and elasticsearch_max_number_of_indices. Default is 30GB.
# Configure this if you used 'rotation_strategy = size' above.
#
#elasticsearch_max_size_per_index = 32212254720
# (Approximate) maximum time before a new Elasticsearch index is being created, also see
# no_retention and elasticsearch_max_number_of_indices. Default is 1 day.
# Configure this if you used 'rotation_strategy = time' above.
# Please note that this rotation period does not look at the time specified in the received messages, but is
# using the real clock value to decide when to rotate the index!
# Specify the time using a duration and a suffix indicating which unit you want:
# 1w = 1 week
# 1d = 1 day
# 12h = 12 hours
# Permitted suffixes are: d for day, h for hour, m for minute, s for second.
#
#elasticsearch_max_time_per_index = 1d
# Controls whether empty indices are rotated. Only applies to the "time" rotation_strategy.
#
#elasticsearch_rotate_empty_index_set=false
# Provides a hard upper limit for the retention period of any index set at configuration time.
#
# This setting is used to validate the value a user chooses for the maximum number of retained indexes, when configuring
# an index set. However, it is only in effect, when a time-based rotation strategy is chosen.
#
# If a rotation strategy other than time-based is selected and/or no value is provided for this setting, no upper limit
# for index retention will be enforced. This is also the default.
# Default: none
#max_index_retention_period = P90d
# Optional upper bound on elasticsearch_max_time_per_index
#
#elasticsearch_max_write_index_age = 1d
# Disable message retention on this node, i. e. disable Elasticsearch index rotation.
#no_retention = false
# Decide what happens with the oldest indices when the maximum number of indices is reached.
# The following strategies are available:
# - delete # Deletes the index completely (Default)
# - close # Closes the index and hides it from the system. Can be re-opened later.
#
#retention_strategy = delete
# This configuration list limits the retention strategies available for user configuration via the UI
# The following strategies can be disabled:
# - delete # Deletes the index completely (Default)
# - close # Closes the index and hides it from the system. Can be re-opened later.
# - none # No operation is performed. The index stays open. (Not recommended)
# WARNING: At least one strategy must be enabled. Be careful when extending this list on existing installations!
disabled_retention_strategies = none,close
# How many indices do you want to keep for the delete and close retention types?
#
#elasticsearch_max_number_of_indices = 20
# Disable checking the version of Elasticsearch for being compatible with this Graylog release.
# WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss!
#
#elasticsearch_disable_version_check = true
# Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
# be enabled with care. See also: https://docs.graylog.org/docs/query-language
allow_leading_wildcard_searches = false
# Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
# should only be enabled after making sure your Elasticsearch cluster has enough memory.
allow_highlighting = false
# Sets field value suggestion mode. The possible values are:
# 1. "off" - field value suggestions are turned off
# 2. "textual_only" - field values are suggested only for textual fields
# 3. "on" (default) - field values are suggested for all field types, even the types where suggestions are inefficient performance-wise
field_value_suggestion_mode = on
# Global timeout for index optimization (force merge) requests.
# Default: 1h
#elasticsearch_index_optimization_timeout = 1h
# Maximum number of concurrently running index optimization (force merge) jobs.
# If you are using lots of different index sets, you might want to increase that number.
# This value should be set lower than elasticsearch_max_total_connections_per_route, otherwise index optimization
# could deplete all the client connections to the search server and block new messages ingestion for prolonged
# periods of time.
# Default: 10
#elasticsearch_index_optimization_jobs = 10
# Mute the logging-output of ES deprecation warnings during REST calls in the ES RestClient
#elasticsearch_mute_deprecation_warnings = true
# Time interval for index range information cleanups. This setting defines how often stale index range information
# is being purged from the database.
# Default: 1h
#index_ranges_cleanup_interval = 1h
# Batch size for the Elasticsearch output. This is the maximum accumulated size of messages that are written to
# Elasticsearch in a batch call. If the configured batch size has not been reached within output_flush_interval seconds,
# everything that is available will be flushed at once.
# Each output buffer processor has to keep an entire batch of messages in memory until it has been sent to
# Elasticsearch, so increasing this value will also increase the memory requirements of the Graylog server.
# Batch sizes can be specified in data units (e.g. bytes, kilobytes, megabytes) or as an absolute number of messages.
# Example: output_batch_size = 10mb
output_batch_size = 500
# Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two
# batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages
# for this time period is less than output_batch_size * outputbuffer_processors.
output_flush_interval = 1
# As stream outputs are loaded only on demand, an output which is failing to initialize will be tried over and
# over again. To prevent this, the following configuration options define after how many faults an output will
# not be tried again for an also configurable amount of seconds.
output_fault_count_threshold = 5
output_fault_penalty_seconds = 30
# Number of process buffer processors running in parallel.
# By default, the value will be determined automatically based on the number of CPU cores available to the JVM, using
# the formula (<#cores> * 0.36 + 0.625) rounded to the nearest integer.
# Set this value explicitly to override the dynamically calculated value. Try raising the number if your buffers are
# filling up.
#processbuffer_processors = 5
# Number of output buffer processors running in parallel.
# By default, the value will be determined automatically based on the number of CPU cores available to the JVM, using
# the formula (<#cores> * 0.162 + 0.625) rounded to the nearest integer.
# Set this value explicitly to override the dynamically calculated value. Try raising the number if your buffers are
# filling up.
#outputbuffer_processors = 3
# The size of the thread pool in the output buffer processor.
# Default: 3
#outputbuffer_processor_threads_core_pool_size = 3
# UDP receive buffer size for all message inputs (e. g. SyslogUDPInput).
#udp_recvbuffer_sizes = 1048576
# Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
# Possible types:
# - yielding
# Compromise between performance and CPU usage.
# - sleeping
# Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
# - blocking
# High throughput, low latency, higher CPU usage.
# - busy_spinning
# Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
processor_wait_strategy = blocking
# Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
# Must be a power of 2. (512, 1024, 2048, ...)
ring_size = 65536
inputbuffer_ring_size = 65536
inputbuffer_wait_strategy = blocking
# Number of input buffer processors running in parallel.
#inputbuffer_processors = 2
# Manually stopped inputs are no longer auto-restarted. To re-enable the previous behavior, set auto_restart_inputs to true.
#auto_restart_inputs = true
# Enable the message journal.
message_journal_enabled = true
# The directory which will be used to store the message journal. The directory must be exclusively used by Graylog and
# must not contain any other files than the ones created by Graylog itself.
#
# ATTENTION:
# If you create a separate partition for the journal files and use a file system creating directories like 'lost+found'
# in the root directory, you need to create a sub directory for your journal.
# Otherwise Graylog will log an error message that the journal is corrupt and Graylog will not start.
# Default: <data_dir>/journal
#message_journal_dir = data/journal
# Journal hold messages before they could be written to Elasticsearch.
# For a maximum of 12 hours or 5 GB whichever happens first.
# During normal operation the journal will be smaller.
#message_journal_max_age = 12h
#message_journal_max_size = 5gb
#message_journal_flush_age = 1m
#message_journal_flush_interval = 1000000
#message_journal_segment_age = 1h
#message_journal_segment_size = 100mb
# Number of threads used exclusively for dispatching internal events. Default is 2.
#async_eventbus_processors = 2
# How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual
# shutdown process. Set to 0 if you have no status checking load balancers in front.
lb_recognition_period_seconds = 3
# Journal usage percentage that triggers requesting throttling for this server node from load balancers. The feature is
# disabled if not set.
#lb_throttle_threshold_percentage = 95
# Every message is matched against the configured streams and it can happen that a stream contains rules which
# take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking.
# This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other
# streams, Graylog limits the execution time for each stream.
# The default values are noted below, the timeout is in milliseconds.
# If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times
# that stream is disabled and a notification is shown in the web interface.
#stream_processing_timeout = 2000
#stream_processing_max_faults = 3
# Since 0.21 the Graylog server supports pluggable output modules. This means a single message can be written to multiple
# outputs. The next setting defines the timeout for a single output module, including the default output module where all
# messages end up.
#
# Time in milliseconds to wait for all message outputs to finish writing a single message.
#output_module_timeout = 10000
# Time in milliseconds after which a detected stale leader node is being rechecked on startup.
#stale_leader_timeout = 2000
# Time in milliseconds which Graylog is waiting for all threads to stop on shutdown.
#shutdown_timeout = 30000
# MongoDB connection string
# See https://docs.mongodb.com/manual/reference/connection-string/ for details
mongodb_uri = mongodb://mongodb:27017/graylog
# Authenticate against the MongoDB server
# '+'-signs in the username or password need to be replaced by '%2B'
#mongodb_uri = mongodb://grayloguser:secret@localhost:27017/graylog
# Use a replica set instead of a single host
#mongodb_uri = mongodb://grayloguser:secret@localhost:27017,localhost:27018,localhost:27019/graylog?replicaSet=rs01
# DNS Seedlist https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format
#mongodb_uri = mongodb+srv://server.example.org/graylog
# Increase this value according to the maximum connections your MongoDB server can handle from a single client
# if you encounter MongoDB connection problems.
mongodb_max_connections = 1000
# Maximum number of attempts to connect to MongoDB on boot for the version probe.
#
# Default: 0, retry indefinitely until a connection can be established
#mongodb_version_probe_attempts = 5
# Email transport
#transport_email_enabled = false
#transport_email_hostname = mail.example.com
#transport_email_port = 587
#transport_email_use_auth = true
#transport_email_auth_username = you@example.com
#transport_email_auth_password = secret
#transport_email_from_email = graylog@example.com
#transport_email_socket_connection_timeout = 10s
#transport_email_socket_timeout = 10s
# Encryption settings
#
# ATTENTION:
# Using SMTP with STARTTLS *and* SMTPS at the same time is *not* possible.
# Use SMTP with STARTTLS, see https://en.wikipedia.org/wiki/Opportunistic_TLS
#transport_email_use_tls = true
# Use SMTP over SSL (SMTPS), see https://en.wikipedia.org/wiki/SMTPS
# This is deprecated on most SMTP services!
#transport_email_use_ssl = false
# Specify and uncomment this if you want to include links to the stream in your stream alert mails.
# This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users.
#transport_email_web_interface_url = https://graylog.example.com
# The default connect timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 5s
#http_connect_timeout = 5s
# The default read timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 10s
#http_read_timeout = 10s
# The default write timeout for outgoing HTTP connections.
# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds).
# Default: 10s
#http_write_timeout = 10s
# HTTP proxy for outgoing HTTP connections
# ATTENTION: If you configure a proxy, make sure to also configure the "http_non_proxy_hosts" option so internal
# HTTP connections with other nodes does not go through the proxy.
# Examples:
# - http://proxy.example.com:8123
# - http://username:password@proxy.example.com:8123
#http_proxy_uri =
# A list of hosts that should be reached directly, bypassing the configured proxy server.
# This is a list of patterns separated by ",". The patterns may start or end with a "*" for wildcards.
# Any host matching one of these patterns will be reached through a direct connection instead of through a proxy.
# Examples:
# - localhost,127.0.0.1
# - 10.0.*,*.example.com
#http_non_proxy_hosts =
# Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds.
#ldap_connection_timeout = 2000
# Disable the use of a native system stats collector (currently OSHI)
#disable_native_system_stats_collector = false
# The default cache time for dashboard widgets. (Default: 10 seconds, minimum: 1 second)
#dashboard_widget_default_cache_time = 10s
# For some cluster-related REST requests, the node must query all other nodes in the cluster. This is the maximum number
# of threads available for this. Increase it, if '/cluster/*' requests take long to complete.
# Should be http_thread_pool_size * average_cluster_size if you have a high number of concurrent users.
#proxied_requests_thread_pool_size = 64
# The default HTTP call timeout for cluster-related REST requests. This timeout might be overriden for some
# resources in code or other configuration values. (some cluster metrics resources use a lower timeout)
#proxied_requests_default_call_timeout = 5s
# The server is writing processing status information to the database on a regular basis. This setting controls how
# often the data is written to the database.
# Default: 1s (cannot be less than 1s)
#processing_status_persist_interval = 1s
# Configures the threshold for detecting outdated processing status records. Any records that haven't been updated
# in the configured threshold will be ignored.
# Default: 1m (one minute)
#processing_status_update_threshold = 1m
# Configures the journal write rate threshold for selecting processing status records. Any records that have a lower
# one minute rate than the configured value might be ignored. (dependent on number of messages in the journal)
# Default: 1
#processing_status_journal_write_rate_threshold = 1
# Automatically load content packs in "content_packs_dir" on the first start of Graylog.
#content_packs_loader_enabled = false
# The directory which contains content packs which should be loaded on the first start of Graylog.
# Default: <data_dir>/contentpacks
#content_packs_dir = data/contentpacks
# A comma-separated list of content packs (files in "content_packs_dir") which should be applied on
# the first start of Graylog.
# Default: empty
#content_packs_auto_install = grok-patterns.json
# The allowed TLS protocols for system wide TLS enabled servers. (e.g. message inputs, http interface)
# Setting this to an empty value, leaves it up to system libraries and the used JDK to chose a default.
# Default: TLSv1.2,TLSv1.3 (might be automatically adjusted to protocols supported by the JDK)
#enabled_tls_protocols = TLSv1.2,TLSv1.3
# Enable Prometheus exporter HTTP server.
# Default: false
#prometheus_exporter_enabled = false
# IP address and port for the Prometheus exporter HTTP server.
# Default: 127.0.0.1:9833
#prometheus_exporter_bind_address = 127.0.0.1:9833
# Path to the Prometheus exporter core mapping file. If this option is enabled, the full built-in core mapping is
# replaced with the mappings in this file.
# This file is monitored for changes and updates will be applied at runtime.
# Default: none
#prometheus_exporter_mapping_file_path_core = prometheus-exporter-mapping-core.yml
# Path to the Prometheus exporter custom mapping file. If this option is enabled, the mappings in this file are
# configured in addition to the built-in core mappings. The mappings in this file cannot overwrite any core mappings.
# This file is monitored for changes and updates will be applied at runtime.
# Default: none
#prometheus_exporter_mapping_file_path_custom = prometheus-exporter-mapping-custom.yml
# Configures the refresh interval for the monitored Prometheus exporter mapping files.
# Default: 60s
#prometheus_exporter_mapping_file_refresh_interval = 60s
# Optional allowed paths for Graylog data files. If provided, certain operations in Graylog will only be permitted
# if the data file(s) are located in the specified paths (for example, with the CSV File lookup adapter).
# All subdirectories of indicated paths are allowed by default. This Provides an additional layer of security,
# and allows administrators to control where in the file system Graylog users can select files from.
#allowed_auxiliary_paths = /etc/graylog/data-files,/etc/custom-allowed-path
# Do not perform any preflight checks when starting Graylog
# Default: false
#skip_preflight_checks = false
# Ignore any exceptions encountered when running migrations
# Use with caution - skipping failing migrations may result in an inconsistent DB state.
# Default: false
#ignore_migration_failures = false
# Comma-separated list of notification types which should not emit a system event.
# Default: SIDECAR_STATUS_UNKNOWN which would create a new event whenever the status of a sidecar becomes "Unknown"
#system_event_excluded_types = SIDECAR_STATUS_UNKNOWN
# RSS settings for content stream
#content_stream_rss_url = https://www.graylog.org/post
#content_stream_refresh_interval = 7d
# Maximum value that can be set for an event limit.
# Default: 1000
#event_definition_max_event_limit = 1000
# Optional limits on scheduling concurrency by job type. No more than the specified number of worker
# threads will be executing jobs of the specified type across the entire cluster.
# Default: no limitation
# Note: Monitor job queue metrics to avoid excessive backlog of unprocessed jobs when using this setting!
# Available job types in Graylog Open:
# check-for-cert-renewal-execution-v1
# event-processor-execution-v1
# notification-execution-v1
#job_scheduler_concurrency_limits = event-processor-execution-v1:2,notification-execution-v1:2

45
graylog/config/log4j2.xml Normal file
View File

@ -0,0 +1,45 @@
<?xml version="1.0" encoding="UTF-8"?>
<Configuration packages="org.graylog2.log4j" shutdownHook="disable">
<Appenders>
<Console name="STDOUT" target="SYSTEM_OUT">
<PatternLayout pattern="%d %-5p: %c - %m%n"/>
</Console>
<!-- Internal Graylog log appender. Please do not disable. This makes internal log messages available via REST calls. -->
<Memory name="graylog-internal-logs" bufferSizeBytes="20MB">
<PatternLayout pattern="%d{yyyy-MM-dd'T'HH:mm:ss.SSSXXX} %-5p [%c{1}] %m%n"/>
</Memory>
</Appenders>
<Loggers>
<!-- Application Loggers -->
<Logger name="org.graylog2" level="info"/>
<Logger name="com.github.joschi.jadconfig" level="warn"/>
<!-- Prevent DEBUG message about Lucene Expressions not found. -->
<Logger name="org.elasticsearch.script" level="warn"/>
<!-- Disable messages from the version check -->
<Logger name="org.graylog2.periodical.VersionCheckThread" level="off"/>
<!-- Silence chatty natty -->
<Logger name="com.joestelmach.natty.Parser" level="warn"/>
<!-- Silence Kafka log chatter -->
<Logger name="org.graylog.shaded.kafka09.log.Log" level="warn"/>
<Logger name="org.graylog.shaded.kafka09.log.OffsetIndex" level="warn"/>
<Logger name="org.apache.kafka.clients.consumer.ConsumerConfig" level="warn"/>
<Logger name="org.apache.kafka.clients.producer.ProducerConfig" level="warn"/>
<!-- Silence useless session validation messages -->
<Logger name="org.apache.shiro.session.mgt.AbstractValidatingSessionManager" level="warn"/>
<!-- Silence Azure SDK messages -->
<Logger name="com.azure" level="warn"/>
<Logger name="reactor.core.publisher.Operators" level="off"/>
<Logger name="com.azure.messaging.eventhubs.PartitionPumpManager" level="off"/>
<Logger name="com.azure.core.amqp.implementation.ReactorReceiver" level="off"/>
<Logger name="com.azure.core.amqp.implementation.ReactorDispatcher" level="off"/>
<!-- Silence Apache Hadoop/Avro log chatter -->
<Logger name="org.apache.hadoop" level="warn"/>
<Logger name="org.apache.parquet.hadoop.InternalParquetRecordReader" level="warn"/>
<Logger name="org.apache.avro.Schema" level="error"/>
<Root level="warn">
<AppenderRef ref="STDOUT"/>
<AppenderRef ref="graylog-internal-logs"/>
</Root>
</Loggers>
</Configuration>

View File

@ -0,0 +1,22 @@
"port","common"
"21", "yes"
"25", "yes"
"22", "yes"
"53", "yes"
"80", "yes"
"135", "yes"
"389", "yes"
"443", "yes"
"445", "yes"
"993", "yes"
"995", "yes"
"1514", "yes"
"1515", "yes"
"3389", "yes"
"5000", "yes"
"5223", "yes"
"8000", "yes"
"8002", "yes"
"8080", "yes"
"8083", "yes"
"8443", "yes"
1 port common
2 21 yes
3 25 yes
4 22 yes
5 53 yes
6 80 yes
7 135 yes
8 389 yes
9 443 yes
10 445 yes
11 993 yes
12 995 yes
13 1514 yes
14 1515 yes
15 3389 yes
16 5000 yes
17 5223 yes
18 8000 yes
19 8002 yes
20 8080 yes
21 8083 yes
22 8443 yes

View File

@ -0,0 +1,12 @@
"vendor","approved"
"Microsoft Corporation", "yes"
"Sysinternals - www.sysinternals.com", "yes"
"The Git Development Community", "yes"
"Vivaldi Technologies AS", "yes"
"GitHub, Inc.", "yes"
"GitHub", "yes"
"Brave Software, Inc.", "yes"
"Node.js", "yes"
"Avira Operations GmbH &amp; Co. KG", "yes"
"BraveSoftware Inc.", "yes"
"Sysinternals", "yes"
1 vendor approved
2 Microsoft Corporation yes
3 Sysinternals - www.sysinternals.com yes
4 The Git Development Community yes
5 Vivaldi Technologies AS yes
6 GitHub, Inc. yes
7 GitHub yes
8 Brave Software, Inc. yes
9 Node.js yes
10 Avira Operations GmbH &amp; Co. KG yes
11 BraveSoftware Inc. yes
12 Sysinternals yes

View File

@ -0,0 +1,46 @@
###########
# Graylog #
###########
# MongoDB: https://hub.docker.com/_/mongo/
mongodb:
image: mongo:6.0.14
container_name: mongodb
hostname: mongodb
volumes:
- mongodb_data:/data/db
- mongodb_config:/data/configdb
restart: unless-stopped
# Graylog: https://hub.docker.com/r/graylog/graylog/
graylog:
image: graylog/graylog:6.0.6
container_name: graylog
hostname: graylog
environment:
GRAYLOG_SERVER_JAVA_OPTS: "-Dlog4j2.formatMsgNoLookups=true -Djavax.net.ssl.trustStore=/usr/share/graylog/data/config/cacerts -Djavax.net.ssl.trustStorePassword=changeit"
volumes:
- graylog_data:/usr/share/graylog/data
- ./graylog/graylog.conf:/usr/share/graylog/data/config/graylog.conf
- ./graylog/log4j2.xml:/usr/share/graylog/data/config/log4j2.xml
- ./graylog/root-ca.pem:/usr/share/graylog/data/config/root-ca.pem
- ./graylog/GeoLite2-City.mmdb:/usr/share/graylog/data/config//GeoLite2-City.mmdb
- ./graylog/GeoLite2-ASN.mmdb:/usr/share/graylog/data/config/GeoLite2-ASN.mmdb
- ./config/network_ports.csv:/etc/graylog/network_ports.csv
- ./config/software_vendors.csv:/etc/graylog/software_vendors.csv
ports:
# Graylog web interface and REST API
- 9000:9000
# Syslog TCP (Disabled for compatibility with the Wazuh Manager)
# - 1514:1514
# Syslog UDP SophosFW
- 514:514/udp
# Syslog UDP SophosFW
- 2514:2514/udp
# GELF TCP
- 12201:12201
# GELF UDP
- 12201:12201/udp
depends_on:
- mongodb
- wazuh.indexer
restart: unless-stopped

View File

@ -1,19 +0,0 @@
#!/bin/sh
# Pour docker et le plugin compose pour docker sur un Debian
# Add Docker's official GPG key:
apt-get update
apt-get install ca-certificates curl
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc
chmod a+r /etc/apt/keyrings/docker.asc
# Add the repository to Apt sources:
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \
$(. /etc/os-release && echo "VERSION_CODENAME") stable" | \
tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

30
iris-web/.bumpversion.cfg Normal file
View File

@ -0,0 +1,30 @@
[bumpversion]
current_version = 2.3.3
commit = True
tag = True
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(-(?P<release>.*)-(?P<build>\d+))?
serialize =
{major}.{minor}.{patch}-{release}-{build}
{major}.{minor}.{patch}
[bumpversion:part:release_name]
first_value = regular
optional_value = regular
values =
alpha
beta
rc
test
regular
[bumpversion:file:source/app/configuration.py]
search = IRIS_VERSION = "v{current_version}"
replace = IRIS_VERSION = "v{new_version}"
[bumpversion:file:README.md]
search = v{current_version}
replace = v{new_version}
[bumpversion:file:docker-compose.yml]
search = :v{current_version}
replace = :v{new_version}

15
iris-web/.deepsource.toml Normal file
View File

@ -0,0 +1,15 @@
version = 1
test_patterns = ["source/tests/**"]
exclude_patterns = [
"source/dependencies/**",
"source/app/templates/**",
"source/static/assets/**"
]
[[analyzers]]
name = "python"
[analyzers.meta]
runtime_version = "3.x.x"

61
iris-web/.env Normal file
View File

@ -0,0 +1,61 @@
# -- NGINX
SERVER_NAME=iris.app.dev
KEY_FILENAME=iris_dev_key.pem
CERT_FILENAME=iris_dev_cert.pem
# -- DATABASE
POSTGRES_USER=postgres
POSTGRES_PASSWORD=__MUST_BE_CHANGED__
POSTGRES_ADMIN_USER=raptor
POSTGRES_ADMIN_PASSWORD=__MUST_BE_CHANGED__
POSTGRES_DB=iris_db
POSTGRES_SERVER=db
POSTGRES_PORT=5432
# -- IRIS
DOCKERIZED=1
IRIS_SECRET_KEY=AVerySuperSecretKey-SoNotThisOne
IRIS_SECURITY_PASSWORD_SALT=ARandomSalt-NotThisOneEither
IRIS_UPSTREAM_SERVER=app
IRIS_UPSTREAM_PORT=8000
# -- WORKER
CELERY_BROKER=amqp://rabbitmq
# -- AUTH
IRIS_AUTHENTICATION_TYPE=local
## optional
IRIS_ADM_PASSWORD=MySuperAdminPassword!
#IRIS_ADM_API_KEY=B8BA5D730210B50F41C06941582D7965D57319D5685440587F98DFDC45A01594
#IRIS_ADM_EMAIL=admin@localhost
IRIS_ADM_USERNAME=administrator
# requests the just-in-time creation of users with ldap authentification (see https://github.com/dfir-iris/iris-web/issues/203)
#IRIS_AUTHENTICATION_CREATE_USER_IF_NOT_EXIST=True
# the group to which newly created users are initially added, default value is Analysts
#IRIS_NEW_USERS_DEFAULT_GROUP=
# -- FOR LDAP AUTHENTICATION
#IRIS_AUTHENTICATION_TYPE=ldap
#LDAP_SERVER=127.0.0.1
#LDAP_AUTHENTICATION_TYPE=SIMPLE
#LDAP_PORT=3890
#LDAP_USER_PREFIX=uid=
#LDAP_USER_SUFFIX=ou=people,dc=example,dc=com
#LDAP_USE_SSL=False
# base DN in which to search for users
#LDAP_SEARCH_DN=ou=users,dc=example,dc=org
# unique identifier to search the user
#LDAP_ATTRIBUTE_IDENTIFIER=cn
# name of the attribute to retrieve the user's display name
#LDAP_ATTRIBUTE_DISPLAY_NAME=displayName
# name of the attribute to retrieve the user's email address
#LDAP_ATTRIBUTE_MAIL=mail
#LDAP_VALIDATE_CERTIFICATE=True
#LDAP_TLS_VERSION=1.2
#LDAP_SERVER_CERTIFICATE=
#LDAP_PRIVATE_KEY=
#LDAP_PRIVATE_KEY_PASSWORD=
# -- LISTENING PORT
INTERFACE_HTTPS_PORT=8443

2
iris-web/.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,2 @@
github: [whikernel]
open_collective: dfir-iris

View File

@ -0,0 +1,38 @@
---
name: Bug report
about: Create a report to help us improve
title: "[BUG] "
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.

View File

@ -0,0 +1,22 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[FR]"
labels: enhancement
assignees: ''
---
*Please ensure your feature request is not already on the roadmap or associated with an issue. This can be checked [here](https://github.com/orgs/dfir-iris/projects/1/views/4).*
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

25
iris-web/.gitignore vendored Normal file
View File

@ -0,0 +1,25 @@
flask/
*.pyc
dev
node_modules
source/app/database.db
source/app/build
yarn.lock
yarn-error.log
*.psd
test/
source/app/config.priv.ini
source/app/config.test.ini
.idea/
libesedb-*/
orcparser/
.DS_Store
.vscode/
*.code-workspace
nohup.out
celerybeat-schedule.db
.scannerwork/
source/app/static/assets/dist/
source/app/static/assets/img/graph/*
!source/app/static/assets/img/graph/*.png
run_nv_test.py

27
iris-web/CODESTYLE.md Normal file
View File

@ -0,0 +1,27 @@
# Coding style
If you wish to develop in DFIR-IRIS, please make sure to read the following tips.
## Commits
Try to follow the repository convention :
- If it's not linked to an issue, use the format `[action] Commit message`, with `action` being a 3 letters action related to the commit, eg `ADD`for additions, `DEL` for deletions, `IMP` for improvements, etc.
- If it's linked to an issue, prepend with the issue ID, i.e `[#issue_id][action] Commit message`
## Code
The code should be pretty easy to apprehend. It's not perfect but it will improve over time.
Some documentation about development is available [here](https://dfir-iris.github.io/development/).
Here are the main takes :
- **Routes** : these are the things that describes how URI should be handled. Routes are split by categories as in the UI menu.
They are defined in `source > app > blueprints`. A route providing a web page (i.e non API) relies on templates.
Each page template is present in the `templates` directory of the target route.
- **Database requests**: we are trying to split the DB code from the routes code. This is partially done and will improve over time. The DB code is provided in `source > app > datamgmt`.
- **HTML pages**: as specified above each page template is set in the `templates` directory of the corresponding route. These templates are based on layouts, which are defined in `source > app > templates`.
- **Static contents** : images, JS and CSS are defined in `source > app > static > assets`.
If your code implies database changes, please create an alembic migration script.
```
alembic -c app/alembic.ini revision -m <What's changed>
```
And then modifies the script in `source > app > alembic` so that the migration can be done automatically.

47
iris-web/CONFIGURATION.md Normal file
View File

@ -0,0 +1,47 @@
# IRIS Configuration
In order to connect to the database and other systems certain configurations are needed. This document lists all available configurations.
## How to set configuration variables
There are 3 different options to set configuration variables
1. Azure Key Vault
2. Environment Variables
3. The config.ini file
### Azure Key Vault
The first option that is checked is the Azure Key Vault. In order to use this the `AZURE_KEY_VAULT_NAME` should be specified.
Since Azure Key Vault does not support underscores you should remove this from the configuration name. For example: `POSTGRES_USER` becomes `POSTGRES-USER`.
### Environment Variables
The second option is using environment variables, which gives the most amount of flexibility.
### Config.ini
The last and fallback option is the config.ini. Within the project there is a `config.model.ini`, which is not used but gives the example how the file should look like. If the application is started with the environment variable `DOCKERIZED=1` then the `config.docker.ini` is loaded, otherwhise the `config.priv.ini` is loaded.
## Environment variable only
A few configs are environment variables only:
- `IRIS_WORKER` - Specifies if the process is the worker
- `DOCKERIZED` - Should be set if running in docker, also loads the other config.ini
## Configuration options
## POSTGRES
The POSTGRES section has the following configurations:
- `POSTGRES_USER` - The user IRIS uses
- `POSTGRES_PASSWORD` - The password for the user IRIS uses
- `POSTGRES_ADMIN_USER` - The user IRIS uses for table migrations
- `POSTGRES_ADMIN_PASSWORD` - The password for the user IRIS uses for table migrations
- `POSTGRES_HOST` - The server address
- `POSTGRES_PORT` - The server port
## CELERY
- `CELERY_BROKER` - The broker address used by [Celery](https://github.com/celery/celery)
## IRIS
- `IRIS_SECRET_KEY` - The secret key used by Flask.
- `IRIS_SECURITY_PASSWORD_SALT` - ??

20
iris-web/CONTRIBUTING.md Normal file
View File

@ -0,0 +1,20 @@
# Contributing to DFIR-IRIS
*This applies to any repository present in the DFIR-IRIS organisation.*
We are an open project, and we gladly accept contributions of any kinds. The two main ways to contribute are by
creating issues or submitting pull requests.
## Issues
Please try to follow the templates that are provided for feature requests and bugs. Also ensure that you feature or issue
is not already mentioned in the [roadmap](https://github.com/orgs/dfir-iris/projects/1/views/4). If an issue is similar
but not fit perfectly with what you have in mind, you can add comments to it, and it will take into account.
If you want to report a security issue, please read the [security page](https://github.com/dfir-iris/iris-web/SECURITY.md).
## Pull requests
Please make sure to follow the [code guideline](https://github.com/dfir-iris/iris-web/CODESTYLE.md) when writing your code.
The pull requests must be submitted on the `develop` branch of the project. Ensure that before submitting you are
up-to-date with it.
## Others
If you have any ideas not directly link to the code itself, you can directly contact us by [email](mailto:contact@dfir-iris.org).

165
iris-web/LICENSE.txt Normal file
View File

@ -0,0 +1,165 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

118
iris-web/README.md Normal file
View File

@ -0,0 +1,118 @@
<p align="center">
<img src="source/app/static/assets/img/logo.ico" />
</p>
<p align="center">
Incident Response Investigation System
<br>
<i>Current Version v2.3.3</i>
<br>
<a href="https://v200.beta.dfir-iris.org">Online Demonstration</a>
</p>
# IRIS
[![License: LGPL v3](https://img.shields.io/badge/License-LGPL_v3-blue.svg)](./LICENSE.txt)
Iris is a web collaborative platform aiming to help incident responders sharing technical details during investigations.
![demo_timeline](img/timeline_speed.gif)
## Table of contents
- [Getting Started](#getting-started)
- [Run IrisWeb](#run-irisweb)
- [Configuration](#configuration)
- [Versioning](#versioning)
- [Showcase](#showcase)
- [Documentation](#documentation)
- [Upgrades](#upgrades)
- [API](#api)
- [Help](#help)
- [Considerations](#considerations)
- [License](#license)
## Getting started
It is divided in two main parts, IrisWeb and IrisModules.
- IrisWeb is the web application which contains the core of
Iris (web interface, database management, etc).
- IrisModules are extensions of the core that allow third parties to process
data via Iris (eg enrich IOCs with MISP and VT, upload and injection of EVTX into Splunk).
IrisWeb can work without any modules though defaults ones are preinstalled. Head to ``Manage > Modules`` in the UI
to configure and enable them.
### Running Iris
To ease the installation and upgrades, Iris is shipped in Docker containers. Thanks to Docker compose,
it can be ready in a few minutes.
``` bash
# Clone the iris-web repository
git clone https://github.com/dfir-iris/iris-web.git
cd iris-web
# Checkout to the last tagged version
git checkout v2.3.3
# Copy the environment file
cp .env.model .env
# Build the dockers
docker-compose build
# Run IRIS
docker-compose up
```
Iris shall be available on the host interface, port 443, protocol HTTPS - ``https://<your_instance_ip>``.
By default, an ``administrator`` account is created. The password is printed in stdout the very first time Iris is started. It won't be printed anymore after that.
``WARNING :: post_init :: create_safe_admin :: >>>`` can be searched in the logs of the `webapp` docker to find the password.
The initial password can be set via the [configuration](https://docs.dfir-iris.org/operations/configuration/).
Iris is split on 5 Docker services, each with a different role.
- ``app``: The core, including web server, DB management, module management etc.
- ``db``: A PostgresSQL database
- ``RabbitMQ``: A RabbitMQ engine to handle jobs queuing and processing
- ``worker``: Jobs handler relying on RabbitMQ
- ``nginx``: A NGINX reverse proxy
### Configuration
There are three different options for configuring the settings and credentials: Azure Key Vault, Environment Variables and Configuration Files. This is also the order of priority, if a settings is not set it will fall back on the next option.
For all available configuration options see [configuration](https://docs.dfir-iris.org/operations/configuration/).
## Versioning
Starting from version 2.0.0, Iris is following the [Semantic Versioning 2.0](https://semver.org/) guidelines.
The code ready for production is always tagged with a version number.
``alpha`` and ``beta`` versions are **not** production-ready.
Do not use the ``master`` branch in production.
## Showcase
You can directly try Iris on our [demo instance](https://v200.beta.dfir-iris.org).
One can also head to [tutorials](https://docs.dfir-iris.org/operations/tutorials/), we've put some videos there.
## Documentation
A comprehensive documentation is available on [docs.dfir-iris.org](https://docs.dfir-iris.org).
### Upgrades
Please read the release notes when upgrading versions. Most of the time the migrations are handled automatically, but some
changes might require some manual labor depending on the version.
### API
The API reference is available in the [documentation](https://docs.dfir-iris.org/operations/api/#references) or [documentation repository](https://github.com/dfir-iris/iris-doc-src).
## Help
You can reach us on [Discord](https://discord.gg/76tM6QUJza) or by [mail](mailto:contact@dfir-iris.org) if you have any question, issue or idea!
We are also on [Twitter](https://twitter.com/dfir_iris) and [Matrix](https://matrix.to/#/#dfir-iris:matrix.org).
## Considerations
Iris is still in its early stage. It can already be used in production, but please set backups of the database and DO NOT expose the interface on the Internet. We highly recommend using a private dedicated and secured network.
## License
The contents of this repository is available under [LGPL3 license](LICENSE.txt).
## Sponsoring
Special thanks to Deutsche Telekom Security GmbH for sponsoring us!

6
iris-web/SECURITY.md Normal file
View File

@ -0,0 +1,6 @@
## Reporting security vulnerabilities
*This applies to any repository present in the DFIR-IRIS organisation.*
In case a security vulnerability is found, we kindly ask you to report it by [email](mailto:report@dfir-iris.org) instead of creating an issue.
This will let us the time to patch and create a new release.
We are fully transparent and any raised security issue will be reported in the security advisory section once patched.

View File

@ -0,0 +1,32 @@
-----BEGIN CERTIFICATE-----
MIIFhTCCA22gAwIBAgIUVRU9xRSCLLph9QYUqpRs4DW3634wDQYJKoZIhvcNAQEL
BQAwUjELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxEjAQBgNVBAoM
CURGSVItSVJJUzEaMBgGA1UEAwwRREZJUi1JUklTLVJvb3QtQ0EwHhcNMjIwMTE4
MTAxNjM3WhcNMzIwMTE2MTAxNjM3WjBSMQswCQYDVQQGEwJGUjETMBEGA1UECAwK
U29tZS1TdGF0ZTESMBAGA1UECgwJREZJUi1JUklTMRowGAYDVQQDDBFERklSLUlS
SVMtUm9vdC1DQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANXa9X5Y
Glp4xy6ZFpgB8Db7t9a2qih+vxQ6J2RPHrVIH3LRJKb6Qt813toHG1V+T8a9msQ6
WutlR/hwcmJiP9kEqY1hAQ8fHYMimGt1aHXccTItKkoFRbhlOofwEkRLOk1GUUWC
9wjeYCzJ+ODaQljwC+uYg97lfeNILxXhmMJuhozXG+0LQch27CYUO5HZfJNYuVsh
lrGgrJu+o7aumonBcfyDHkWL9n4aZ4xPKIkzcIbqnfenRLsH8ZQF+PqcIUi9lDMo
B3zJg+31+L3G1/HEVkqO6OW1tM3Az66ihh6PG2+ETj07wMHP5TmbYerBDmk6L5Ft
By0tg+tDQ2oqd2iMXCegR2KeFgdJTjQqR+p+gccAJIYaM1vLZVgE+0phMKtm0ZiA
8E6E73HzmOLV3UO+YySF/E2esv/pB31TxE1Uwt35U11zRWS6tMqfdCE2T48wnHTu
5oUUgA2FO7gbGRUWz71LzdaF9/6tOAR3OY4d1iZYTiQBfeUSvcAQk4vPr+FyDDvM
JMQ5WwAG+VICQa1bn37RnJ3+5vUeYSFlEBRFABZYowzzUiQK1EFTUsA3Ou3i9AEp
jiYYxa4ctQA7zbTWYR8jmpMQ8DKxgZtHtio+3HKcGCndYmD8uV52weMXJNQXqV0c
v9Sr0e1nFMV2okYbhZ3KfI+2lsH1OL9hwuR1AgMBAAGjUzBRMB0GA1UdDgQWBBTF
rVzkgqJCIH/26xCCuQRdCA5L9zAfBgNVHSMEGDAWgBTFrVzkgqJCIH/26xCCuQRd
CA5L9zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBZs0BaFArd
3G42S5wKe/wM+llSLZHiAgCPeVRaGEe4M6mcG7hGOj0IrtoA4j1GvCMch6j65Dw3
wkQB2NSwEesSpnteSYFQ9UJmw/gzw+WSC/SVCRrEJ1/dKbdWQ9VjgwVRgekx2pyb
XXYBl9HTmNXJ55hnJ+3s9+iGJkiQu/zq9d5Yg0AP/4HjH4e6XgRZmN+7MsyCatt1
yJ0ekCJcg5MN/9mHKpYmfdDEfpP8b2/N5WmSqbKhEU43aNdQ68QiCwLDwj3piOwu
tPrj4CnTaVWjRd2AzzrhpZgvRwx7VZpCcwqk2pnNwiklyAsZQo6iEbzxChLJSnIj
Pw2a3wrPmdRgchbLYOr/oMcMmbxWlC7qQQiuzhMXncPJHAghe28GxKVMETArWkj6
L4+QsH7jaC45nTPRXFUmUbL7+Uz/8O17SswL4Thdp8/ZK5uqhx199whivrdhnzGh
MrSKiiDWst6hMo5HfJkPE4/UAXRkfbWAvj6WqEJX3Z1OgiHAvXtU1FPYhB7X9mla
vslWFT0W818GsP71a9TlwvSjVYto4U+kQXD0sk4ewmJuSz9oK2CwJh/gR5WJG4SC
YaVzHS7+8UtJ3m4H4+NQJIo1jHdzIy5nv3v/hDZXsWKE0O4ClDdt/bzqv8f21G9O
aVTSz0kMMqog8lQgdymRLxJ8BU1P9rszoA==
-----END CERTIFICATE-----

View File

@ -0,0 +1 @@
52366C4851D9245D91B07E288C197FE8DA976FA9

View File

@ -0,0 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKQIBAAKCAgEA1dr1flgaWnjHLpkWmAHwNvu31raqKH6/FDonZE8etUgfctEk
pvpC3zXe2gcbVX5Pxr2axDpa62VH+HByYmI/2QSpjWEBDx8dgyKYa3VoddxxMi0q
SgVFuGU6h/ASREs6TUZRRYL3CN5gLMn44NpCWPAL65iD3uV940gvFeGYwm6GjNcb
7QtByHbsJhQ7kdl8k1i5WyGWsaCsm76jtq6aicFx/IMeRYv2fhpnjE8oiTNwhuqd
96dEuwfxlAX4+pwhSL2UMygHfMmD7fX4vcbX8cRWSo7o5bW0zcDPrqKGHo8bb4RO
PTvAwc/lOZth6sEOaTovkW0HLS2D60NDaip3aIxcJ6BHYp4WB0lONCpH6n6BxwAk
hhozW8tlWAT7SmEwq2bRmIDwToTvcfOY4tXdQ75jJIX8TZ6y/+kHfVPETVTC3flT
XXNFZLq0yp90ITZPjzCcdO7mhRSADYU7uBsZFRbPvUvN1oX3/q04BHc5jh3WJlhO
JAF95RK9wBCTi8+v4XIMO8wkxDlbAAb5UgJBrVufftGcnf7m9R5hIWUQFEUAFlij
DPNSJArUQVNSwDc67eL0ASmOJhjFrhy1ADvNtNZhHyOakxDwMrGBm0e2Kj7ccpwY
Kd1iYPy5XnbB4xck1BepXRy/1KvR7WcUxXaiRhuFncp8j7aWwfU4v2HC5HUCAwEA
AQKCAgEAn7DTZLcRZsGNquQyFOxNniE1VCYuxfJvaQFL7QGP4rqqkShPgEicquUl
NhXceWjK1ZM8AI+62NBWf9Qn9gN7vehXW/U1vz7y4LtyqbuQd4JXHKrRS1jIiTs+
C8hfO5QZQx7hDVEQexTjKE7hg7Y3mQYXQKQwxL8F1DRQxLwjP/0ciAsRFV80jicP
jBfLq8uF1NmJ/90DFDzw55Ph2EZlq7xCC9c1QaWUOPIqpIFvuZQp0PVdZFMJZcg2
wtv64di4mgLGqbSYcrxfwc/NIJldI3IDJcW1b+LR0lrKOpOuJx+h0xIvAlaeR5ug
hfXbllr3EeibILMTis7UFVey/ZIcex0iq+LDNQCmE9kuxAnf8z9NFAxeXqUu1nt7
26ik/Ay1nBN0CosCIvp86EZipi4nnOLjw31nUCAxbUIyvidbg4+tfHFtAXLBc/Cb
8qljjzKqnKN3fbKkxIZGOeB/HI0Yxm18zhdiNbNONX/ZZ1RNJ1NHJYNEjuACDSWC
PdD9rE5chswA+3Zp+Glz/cEWZdDj8GaHn5Q7az9vVwYykxiRAhM2ew0kIUkG1SGD
z25n0zutV9bZ9evK7PonKDz3xHNodDiWI8uP55w0PAK+6P3CvqFxs4czofL3xhbR
40C9gq5icpN/gSe69HxfTAXFANPxGjotAlhftGxH/AX+mwJefYECggEBAPqDuUGj
4RTb3oAzECJCB25ZfTuelaW19MclvF/wW2SjqOpygablZV1gsWVVYD1hLkNk3YRA
KCcPr13ZCVbvwQZ3GHOJ/zjV1z3FOPdFko2gOgnX9N5rIhYXBxuneoD58rGQQAQs
Q7BMPPqAg+aotD94a+HWBhEHWF0CsbQwWTgdlDe/9u0na4TDrOKRSFbVO28IvJFO
uixVmg+mexPZEHet40F/migM+42lAwMxa1zGMaX59HzLpNMTxxV5om1u7aIjMbVt
gZxT987JBCMAGXmhOd7CUu/oPfnTJD/QdWTv2DeRpGY8aGBMnTRSD6NTWInYwqvA
+m951CWjG1fnqMUCggEBANqJvUyzY41VS7OtGmx12QX3KTZO3w8HrD5P3uoESVGk
ThHdjntQa/s1eV0w6nMqxB+k7ZSBVIoYe96jeJyETvsFjxqVSFexoN/LPM0neKUu
WPruIj+8xPo7Xfzkcu/1CXEVtJpgEF+S6kUxjwkWigol5djwcTHWLcHRWBpE2bR0
DKGXsaZe/xwqOppIjV8qcU3wkN7Kih1xXB3cDHw8KK4hXE2T4jXpMccTAur5k7Eo
mrH/aZ9abup2tIbG2gydnbyizVrSkFnmquZDT3E7tTRBMMM9ik9Hg9i0kBh+eQGe
qGBuqL27Uk90p8WdBbBlcRqTy9PYC/C5boG4rxeLJ/ECggEAHuidtOGZZc9y9nJW
bUOkUxMrhm2cnSOEHgYj5dpsDFC9CKA0Kvlmtky11oDgLPKOmLYbNrQLwDYJNxUO
N/HA6SFMnQTLvqFzuyVYT/n/iQXZ41kH12F0hTE2KU1SqhMsxIe9vkYP2/KsG2Q7
4fuysZoUBXs4qGU1m9Q3RLoqZ+gOX4qJ1tzkQS87Z4DxYfVRLfPwACWshsfRCFlM
GjjP9VQ5E93AkWx4pRNU+dHhI0M44PekGLmvjnOEPrwRNFtZaoXZHj9ynG0nyBW5
MvBSNFWWJEvpm7wV2XsZn94Cff+xt7l6hTqyzh7lzozJbSddZzSdYD+hJpvrdvFW
8FOe2QKCAQEAypJ1G30Job/X/URPQwx2UFZMKGAx2c/F9Li6q/evMvN3vo29/kaM
4X4u8pheKsUQqTiLVWYQxDVv1O945LSsNXlwrjaEqW0o72mIAa894Pe2WVuV+bj0
afPP6pSkihN8Xgu9rn+vjbg0WlFXAhiXelKo3U/7zTN4lLmFzkvV9bTA5KUlck+K
cEQgsFTiXr2L67A7yZi7MBGdTrxkAmENYGPiGLMlM83ma18PDFquccBurOJRuPnt
6H6CVpBLHPiZd3r9mdunHP55mhn4sMCk9jwbhE8uPtDOwXiWPW42oq676y+IUN8r
rCU5Qy+LT3iov/cSMFuKrehlK+/StaMzMQKCAQBK/jSm8RANTNXPemCDYuvitwvv
eGa3tmzUjo7HhNtaw2D6ff4eFSk8hdNzv/aQoGf7qqKjQWFGc2r9wF6GCmRLodDB
j7ob/e6VscYkWXM5DcRnrNQXwsPQK0szaDt6YBvUM/cmriT+xOOwqOJGYoW8lmAv
5q92QSDADLJPQRsLsKeABdXLWq5lnOcK+YCp/ABi5NItHkO9P7DEg5eEOX4c0BpG
LzbjkDNSXSx9VT6tIuCsFe4r/ucQwTqor4HVBmRKFiyrUJq9c1OeWqOYBdv6TLC+
uFBFC/M1QaLwklTBYgzyYHWM3nZYeiCl1+aP7Kc5xyJkR4R8F1SSeIGz5Mhq
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,23 @@
-----BEGIN CERTIFICATE-----
MIID5zCCAs+gAwIBAgIUR1vu5qp+qpHjXhQ/6sUnFOtgW2kwDQYJKoZIhvcNAQEL
BQAwgYIxCzAJBgNVBAYTAkZSMRYwFAYDVQQIDA1JbGUgZGUgRnJhbmNlMQ4wDAYD
VQQHDAVQYXJpczEYMBYGA1UECgwPQ1NJUlQtRlIgQWlyYnVzMRowGAYDVQQLDBFJ
bmNpZGVudCBSZXNwb25zZTEVMBMGA1UEAwwMaXJpcy5hcHAuZGV2MB4XDTIxMTIw
OTE0MTUyMloXDTIyMTIwOTE0MTUyMlowgYIxCzAJBgNVBAYTAkZSMRYwFAYDVQQI
DA1JbGUgZGUgRnJhbmNlMQ4wDAYDVQQHDAVQYXJpczEYMBYGA1UECgwPQ1NJUlQt
RlIgQWlyYnVzMRowGAYDVQQLDBFJbmNpZGVudCBSZXNwb25zZTEVMBMGA1UEAwwM
aXJpcy5hcHAuZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlZWm
hypo/ZJMjmqHSBviR9pzYJYaiSlafeEUa/9LlBe4Ecov74XLVy+3TuG3w1YFzD1M
57j+EJrcZcl5E67uIVreAtJNLdgqDyCk6nCk3BdGgEnhcmQCevLXaCsBH+Z9lBRy
ruuTQAihq3QJztosTuI+so9AaZgSmOm17vL45S3QiFIPUB/Pgv60BfYkd0SV1V4Y
709IKvlCXSixryA0hkqT12D6fNFDPqwbn1o7Ifd7qVqVxD0QS8Wf56PUD8J+41A7
WLzSy/fNKAUOSoOyhWvdh7s5uciqJEXDMh1BvrpBSCmkmW8aprWVOr6yaugmBg58
g4oaM0xWOcFFeIcdrQIDAQABo1MwUTAdBgNVHQ4EFgQUNavtDZIB1hMxp7X0pytN
xACnEigwHwYDVR0jBBgwFoAUNavtDZIB1hMxp7X0pytNxACnEigwDwYDVR0TAQH/
BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAlA9xbWAfoGVOLUY/nuQ+0slryO/e
C2ChAHehtNKJa3DOGJXTOp1qJJdfuHAHZgplfSZp36N87eHfygFaR04RaHNNUVpg
1vnADd0QvDwYiEbRyjLN+EFdxDcbcsqljUUfPMx1zjlA1Ff2dbCkOVYYfm5xDzoE
weFx6inCtZ0pHqWdF5R77n4Rg3dmR/98dXM3nXhFevoAI7FqyauYFL0QFLXvIufg
3zywJrolNLZrrbpkSJ9kWzIZn0OK4Q+5dSnpBEimBZSrJKbZhgS/uzCL5flezKTF
LzHY0CRXC7nXO5dY2baBbIqRvYlCgbmaN4J505Fn6YSmwm3deCan2xyGHg==
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCVlaaHKmj9kkyO
aodIG+JH2nNglhqJKVp94RRr/0uUF7gRyi/vhctXL7dO4bfDVgXMPUznuP4Qmtxl
yXkTru4hWt4C0k0t2CoPIKTqcKTcF0aASeFyZAJ68tdoKwEf5n2UFHKu65NACKGr
dAnO2ixO4j6yj0BpmBKY6bXu8vjlLdCIUg9QH8+C/rQF9iR3RJXVXhjvT0gq+UJd
KLGvIDSGSpPXYPp80UM+rBufWjsh93upWpXEPRBLxZ/no9QPwn7jUDtYvNLL980o
BQ5Kg7KFa92Huzm5yKokRcMyHUG+ukFIKaSZbxqmtZU6vrJq6CYGDnyDihozTFY5
wUV4hx2tAgMBAAECggEAV+BbvYpvtZAOA5iXswgWjknKgFKOckfmDo99NNj9KJoq
m+Dg+mDqjWTN1ryJ/Wp663qTxIoMT+r6UZ3j0GlzIgtE4/lyN92HD+4IlGXqpBXU
aCd/F3mjb2FcpKim93usCKNeoF5q2jJ378aywF+xqgIF/VZk6+PYARdDt4XsLI4w
vfJSbjRuynnSHl3kD2atcivAxYDu6AggQPsSPmF66z754eKA3BJIAWRUCdx/llTk
ARizLI4DFHKSYZq9pcKNtCrPIOrUkflG9QPZKn9dI0W+AaSroyqOQQvMY1NT3uEo
TsXYoyxHGH7+tkoaSHX6JteDe4YbZFbQ7z1s6ZHMIQKBgQDHYoS5wpCxOpCUCOxU
4s05n88tOJE7PnUQ1j1AosFes6qY1IFnsUss9ikF0aIL+gYvuEIU1z/ITDmBgWao
bJq6ySCHhyqOMZxMK+nuwIJQEmmIImfxEs8Hf891Cej5NO964VWIsBtNln10yLrj
Rc9J8J643O6YLyGuXDyXdxNcqQKBgQDADxnzPcou1wmlMK6zc7UeZ/bhi6hdYsWU
X3znd5jQZ8576A38g1v742A2yfWIftnNiMrCdwnS8x9Rw1ps160E5MvzEUOckqig
zJXn3PvO7tnReu4/Z4HoTUcbRtbBNMaIFgbW62A4S9CyiFZf9dONHoqhpYvbNJPx
kjGp6Ol3ZQKBgEzz2yIO0+VzIwXfg8cnWenZog5j/LmO24PKDA38QwGX+knOCrvI
k6kgwKh8Rjy1HNoiFW8RvI5DzRYMqWBrujRJGAL2yhfjUd2cPUdmiWT6Fjzyeody
qPDOBXW4g3BbW+pjOa3tujvxzy3ZozfAY8a31aqnqnaWCjvPYZtb298xAoGAYbIM
2D+xLhxyqpXV+DC+jAYEfnylG0PYD353MeMTV8fGMB89phpH2xyxX41iGZm1Pyj7
Qup8k9LaNqQxxjX7rAaafD1m8ClmH82R34z4hi3XnQh0UspbOYi9x/FD4qnu52CV
ABRhMKHYOkjB7zRD9X/4svtb5hibvQFJxA1XXUUCgYBaeZ7tZb8lWkd8v9uZ99qX
wpm2bO+RQpOeNkP31VpO3jj9/0S+SJSRc9a3JnNRLKNtKhNZpTlaP0coBqZqwb+u
gWAvdeZinxFwRj6VXvS8+2SP7ImRL1HgOwDQxDWXQxf3e3Zg7QoZLTea9Lq9Zf2g
JLbJbOUpEOe5W4M8xLItlg==
-----END PRIVATE KEY-----

View File

@ -0,0 +1,41 @@
.DEFAULT_GOAL := help
file := $2
IN_DIR = $(PWD)
.PHONY: help
help: ## Print the help message
@awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {printf "\033[36m%s\033[0m : %s\n", $$1, $$2}' $(MAKEFILE_LIST) | \
sort | \
column -s ':' -t
.PHONY: create
create: ## Create the iris app
kubectl apply -k $(IN_DIR)/admin; \
kubectl apply -k $(IN_DIR)/rabbitmq; \
kubectl apply -k $(IN_DIR)/psql; \
kubectl apply -k $(IN_DIR)/app; \
kubectl apply -k $(IN_DIR)/worker
.PHONY: delete
delete: ## Delete the iris app
kubectl delete -k $(IN_DIR)/worker ;\
kubectl delete -k $(IN_DIR)/app ;\
kubectl delete -k $(IN_DIR)/rabbitmq ;\
kubectl delete -k $(IN_DIR)/psql ;\
kubectl delete -k $(IN_DIR)/admin
.PHONY: deploy-specific-kustomization
deploy-specific-kustomization: ## Delpoy specific kustomization (ex- make deploy-specific-kustomization ARGS="path of kustomization.yml dir")
kubectl apply -k $(ARGS)
.PHONY: delete-specific-kustomization
delete-specific-kustomization: ## Delete specific kustomization (ex- make delete-specific-kustomization ARGS="path of kustomization.yml dir")
kubectl delete -k $(ARGS)
.PHONY: deploy-specific-manifest
deploy-specific-manifest: ## deploy specific manifest (ex- make deploy-specific-manifest ARGS="path of manifest dir")
kubectl apply -f $(ARGS)
.PHONY: delete-specific-manifest
delete-specific-manifest: ## delete specific manifest (ex- make delete-specific-manifest ARGS="path of manifest dir")
kubectl apply -f $(ARGS)

View File

@ -0,0 +1,80 @@
# The Iris EKS manifest to deploy Iris-web on AWS EKS.
Description:
- This manifest file will help to deploy the application on the AWS EKS.
## Prerequisites;
- Install AWS [CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#getting-started-install-instructions)
- Setup AWS EKS cluster. (terraform example [here](https://github.com/s3lva-kumar/terraform-aws-eks))
- Install AWS ebs CSI driver add-on on EKS cluster. (terraform example [here](https://github.com/s3lva-kumar/terraform-eks-plugin/tree/master/terraform-amazon-ebs-csi-driver))
- Install AWS alb ingress controler add-on on EKS cluster. (terraform example [here](https://github.com/s3lva-kumar/terraform-eks-plugin/tree/master/terraform-amazon-alb-ingress))
## Build & push Docker Images
- To build the docker images follow the commands 👇
``` bash
# Clone the iris-web repository
$ git clone https://github.com/dfir-iris/iris-web.git
$ cd iris-web
# Build the dockers (Build webApp and db docker images, skip the nginx because we using AWS ALB instead of nginx)
# app & woker:
$ docker build -t webapp:latest -f docker/webApp/Dockerfile .
# DB:
$ docker build -t db:latest -f docker/db/Dockerfile .
```
- Once the docker images built, push those images into AWS ECR
## Deploy:
- Before we deploy the manifeat, we need to update the Docker image on our manifest.
*Note: Same docker image to the app and worker*
- ### update app image:
- Naviaget to the deploy/eks_manifest/app directory.
- open the *deployment.yml* file and update the image here, which we pushed on the ECR.
![App Screenshot](./images/app-image-update.png)
- ### update worker image:
- Naviaget to the deploy/eks_manifest/worker directory.
- open the *deployment.yml* file and update the image here, which we pushed on the ECR.
![App Screenshot](./images/worker-image-update.png)
- ### update db image:
- Naviaget to the deploy/eks_manifest/psql directory.
- open the *deployment.yml* file and update the image here, which we pushed on the ECR.
![App Screenshot](./images/db-image-update.png)
- ### update the SSL and domain name on app ingress YAML file
- Naviaget to the deploy/eks_manifest/app directory.
- open the *ingress.yml* file and update the SSL and host
![App Screenshot](./images/ingress.png)
- *Note:*
- SSL :
Give a ACM certificate ARN.
- HOST :
Give the host name whatever you want. In additionally, once the ingress created it will be provisioned the ALB on AWS with this name "iris-alb". Then, configure the DNS 'CNAME' record with hostname *(which you given on ingress file)* point to the AWS alb 'DNS'
![APP Screenshot](./images/alb-dns.png)
- ### once updated the all the things which is mentioned above, then run the **Makefile**
- Navigate to the *deploy/eks_manifest*, here you can see the 'Makefile'
- To deploy app, run
``` bash
$ make
$ make create
```
- To delete app, run
*caution: it will be delete all things exclude DB*
``` bash
$ make
$ make delete
```
- ### Get Admin username and password
- Once everything created we can get administrator username and password from the app _pod_
``` bash
$ kubectl get pod -n iris-web
# Copy the pod name and give it on the below command (pod name looks like "pod/iris-app-deployment-🎲")
$ kubectl logs <pod_name> -n iris-web
# You can see the credential at the end of the logs
```

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: iris-web
name: iris-psql-claim
labels:
site: iris
spec:
accessModes:
- ReadWriteOnce
storageClassName: iris-sc
resources:
requests:
storage: 30Gi

View File

@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
name: admin-kustomize
labels:
site: iris
resources:
- namespace.yml
- storageclass.yml
- claim.yml

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: iris-web
labels:
site: iris

View File

@ -0,0 +1,13 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: iris-sc
labels:
site: iris
parameters:
fsType: ext4
type: gp2
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: iris-web
name: app-data
data:
POSTGRES_SERVER: iris-psql-service

View File

@ -0,0 +1,86 @@
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: iris-web
name: iris-app-deployment
labels:
site: iris
app: iris-app
spec:
replicas: 1
selector:
matchLabels:
app: iris-app
template:
metadata:
labels:
app: iris-app
spec:
containers:
- name: iris-app
image: iriswebapp_app:v2.2.2
ports:
- containerPort: 8000
command: ['nohup', './iris-entrypoint.sh', 'iriswebapp']
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_PASSWORD
- name: POSTGRES_ADMIN_USER
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_ADMIN_USER
- name: POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_ADMIN_PASSWORD
- name: POSTGRES_PORT
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_PORT
- name: DOCKERIZED
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: DOCKERIZED
- name: IRIS_SECRET_KEY
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: IRIS_SECRET_KEY
- name: IRIS_SECURITY_PASSWORD_SALT
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: IRIS_SECURITY_PASSWORD_SALT
- name: POSTGRES_SERVER
valueFrom:
configMapKeyRef:
name: app-data
key: POSTGRES_SERVER
volumeMounts:
- name: iris-pcv
mountPath: /home/iris/downloads
subPath: downloads
- name: iris-pcv
mountPath: /home/iris/user_templates
subPath: user_templates
- name: iris-pcv
mountPath: /home/iris/server_data
subPath: server_data
volumes:
- name: iris-pcv
persistentVolumeClaim:
claimName: iris-psql-claim

View File

@ -0,0 +1,29 @@
apiVersion: networking.k8s.io/v1 #extensions/v1beta1
kind: Ingress
metadata:
name: "iris-ingress"
namespace: "iris-web"
annotations:
alb.ingress.kubernetes.io/scheme: 'internet-facing'
alb.ingress.kubernetes.io/target-type: 'ip'
alb.ingress.kubernetes.io/group.name: 'iris-alb-group'
alb.ingress.kubernetes.io/load-balancer-name: 'iris-alb'
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]'
alb.ingress.kubernetes.io/certificate-arn: 'arn:aws:acm:us-east-1:650601597349:certificate/4915ba65-ec07-44c7-8f42-897cfe1574bb'
alb.ingress.kubernetes.io/ssl-policy: 'ELBSecurityPolicy-TLS13-1-2-2021-06'
alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
labels:
app: 'iris'
spec:
ingressClassName: 'alb'
rules:
- host: 'test.cmcloudlab1727.info'
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: "iris-app-service"
port:
number: 80

View File

@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
labels:
site: iris
resources:
- secrets.yml
- configmap.yml
- deployment.yml
- service.yml
- ingress.yml

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Secret
metadata:
name: iris-app-secrets
namespace: iris-web
labels:
site: iris
type: Opaque
data:
POSTGRES_USER: cmFwdG9y
POSTGRES_PASSWORD: YWRtaW4=
POSTGRES_ADMIN_USER: cmFwdG9y
POSTGRES_ADMIN_PASSWORD: YWRtaW4=
POSTGRES_PORT: NTQzMg==
DOCKERIZED: MQ==
IRIS_SECRET_KEY: QVZlcnlTdXBlclNlY3JldEtleS1Tb05vdFRoaXNPbmU=
IRIS_SECURITY_PASSWORD_SALT: QVJhbmRvbVNhbHQtTm90VGhpc09uZUVpdGhlcg==

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
namespace: iris-web
name: iris-app-service
labels:
site: iris
annotations:
alb.ingress.kubernetes.io/healthcheck-path: '/login'
spec:
selector:
app: iris-app
ports:
- protocol: TCP
port: 80
targetPort: 8000
type: ClusterIP

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

View File

@ -0,0 +1,58 @@
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: iris-web
name: iris-psql-db-deployment
labels:
app: iris-psql
site: iris
spec:
replicas: 1
selector:
matchLabels:
app: iris-psql
template:
metadata:
labels:
app: iris-psql
spec:
containers:
- name: iris-psql-db
image: iriswebapp_db:v2.2.2
ports:
- containerPort: 5432
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_PASSWORD
- name: POSTGRES_ADMIN_USER
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_ADMIN_USER
- name: POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_ADMIN_PASSWORD
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_DB
volumeMounts:
- name: persistent-storage
mountPath: /var/lib/postgresql/data
subPath: psqldata
volumes:
- name: persistent-storage
persistentVolumeClaim:
claimName: iris-psql-claim

View File

@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
labels:
site: iris
resources:
- secrets.yml
- deployment.yml
- service.yml

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Secret
metadata:
name: iris-psql-secrets
namespace: iris-web
labels:
site: iris
type: Opaque
data:
POSTGRES_USER: cG9zdGdyZXM=
POSTGRES_PASSWORD: YWRtaW4=
POSTGRES_ADMIN_USER: cmFwdG9y
POSTGRES_ADMIN_PASSWORD: YWRtaW4=
POSTGRES_DB: aXJpc19kYg==

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
namespace: iris-web
name: iris-psql-service
labels:
site: iris
spec:
selector:
app: iris-psql
ports:
- protocol: TCP
port: 5432
targetPort: 5432
type: ClusterIP

View File

@ -0,0 +1,25 @@
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: iris-web
name: iris-rabbitmq-deployment
labels:
app: iris-rabbitmq
site: iris
spec:
replicas: 1
selector:
matchLabels:
app: iris-rabbitmq
template:
metadata:
labels:
app: iris-rabbitmq
spec:
containers:
- name: iris-rabbitmq
image: rabbitmq:3-management-alpine
ports:
- containerPort: 5672

View File

@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
labels:
site: iris
resources:
- deployment.yml
- service.yml

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
namespace: iris-web
name: iris-rabbitmq-service
labels:
site: iris
spec:
selector:
app: iris-rabbitmq
ports:
- protocol: TCP
port: 5672
targetPort: 5672
type: ClusterIP

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: iris-web
name: worker-data
data:
POSTGRES_SERVER: iris-psql-service
CELERY_BROKER: amqp://iris-rabbitmq-service
IRIS_WORKER: iris-worker-service

View File

@ -0,0 +1,94 @@
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: iris-web
name: iris-worker-deployment
labels:
app: iris-worker
site: iris
spec:
replicas: 1
selector:
matchLabels:
app: iris-worker
template:
metadata:
labels:
app: iris-worker
spec:
containers:
- name: iris-worker
image: iriswebapp_app:v2.2.2
command: ['./wait-for-iriswebapp.sh', 'iris-app-service:8000', './iris-entrypoint.sh', 'iris-worker']
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_PASSWORD
- name: POSTGRES_ADMIN_USER
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_ADMIN_USER
- name: POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_ADMIN_PASSWORD
- name: POSTGRES_PORT
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_PORT
- name: DOCKERIZED
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: DOCKERIZED
- name: IRIS_SECRET_KEY
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: IRIS_SECRET_KEY
- name: IRIS_SECURITY_PASSWORD_SALT
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: IRIS_SECURITY_PASSWORD_SALT
- name: POSTGRES_SERVER
valueFrom:
configMapKeyRef:
name: worker-data
key: POSTGRES_SERVER
- name: CELERY_BROKER
valueFrom:
configMapKeyRef:
name: worker-data
key: CELERY_BROKER
- name: IRIS_WORKER
valueFrom:
configMapKeyRef:
name: worker-data
key: IRIS_WORKER
volumeMounts:
- name: iris-pcv
mountPath: /home/iris/downloads
subPath: downloads
- name: iris-pcv
mountPath: /home/iris/user_templates
subPath: user_templates
- name: iris-pcv
mountPath: /home/iris/server_data
subPath: server_data
volumes:
- name: iris-pcv
persistentVolumeClaim:
claimName: iris-psql-claim

View File

@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
labels:
site: iris
resources:
- secrets.yml
- configmap.yml
- deployment.yml
- service.yml

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Secret
metadata:
name: iris-worker-secrets
namespace: iris-web
labels:
site: iris
type: Opaque
data:
POSTGRES_USER: cmFwdG9y
POSTGRES_PASSWORD: YWRtaW4=
POSTGRES_ADMIN_USER: cmFwdG9y
POSTGRES_ADMIN_PASSWORD: YWRtaW4=
POSTGRES_PORT: NTQzMg==
DOCKERIZED: MQ==
IRIS_SECRET_KEY: QVZlcnlTdXBlclNlY3JldEtleS1Tb05vdFRoaXNPbmU=
IRIS_SECURITY_PASSWORD_SALT: QVJhbmRvbVNhbHQtTm90VGhpc09uZUVpdGhlcg==

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
namespace: iris-web
name: iris-worker-service
labels:
site: iris
spec:
selector:
app: iris-worker
ports:
- protocol: TCP
port: 80
type: ClusterIP

View File

@ -0,0 +1,26 @@
SHELL := /bin/bash
check-helm:
@helm version || $(MAKE) install-helm
check-kubectl:
@kubectl version || $(MAKE) install-kubectl
install-helm:
@curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
@chmod 700 get_helm.sh
@./get_helm.sh
@rm get_helm.sh
install-kubectl:
@curl -LO 'https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl'
@sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
@rm kubectl
install-iris:
@helm upgrade --install iris charts/ --values charts/values.yaml -n <name_space>
delete-iris:
@helm delete iris -n <name_space>
check-dependencies: check-helm check-kubectl

View File

@ -0,0 +1,125 @@
# Prerequisites
- Kubernetes cluster must be on the running stage (Kubernetes 1.26+)
- Helm 3.1.0
# Installing the Charts
## Installing Nginx Ingress Controller
The Ingress is a Kubernetes resource that lets you configure an HTTP load balancer for applications running on Kubernetes, represented by one or more Services. Such a load balancer is necessary to deliver those applications to clients outside of the Kubernetes cluster
The Ingress resource supports the following features:
⦿ Content-based routing:
- `Host-based routing:` For example, routing requests with the host header foo.example.com to one group of services and the host header bar.example.com to another group.
- `Path-based routing:` For example, routing requests with the URI that starts with /serviceA to service A and requests with the URI that starts with /serviceB to service B.
⦿ **TLS/SSL** termination for each hostname, such as foo.example.com.
Before installing Iris-web install the Nginx ingress controller
```
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm install my-release ingress-nginx/ingress-nginx -n <Name_Space>
```
> **Info**: `my-release` is the name that you choose
## Installing Iris Web
Clone this Repository
```bash
$ git clone https://github.com/dfir-iris/iris-web.git
```
To install the chart with the release name `my-release`:
```bash
$ helm install my-release charts/ --values charts/values.yaml -n <Name_Space>
```
The command deploys **iris-web** on the Kubernetes cluster in the default configuration.
## Checking Dependencies
To check if Helm and kubectl are installed, run the following command:
```
make check-dependencies
```
If any of the dependencies are missing, the corresponding installation command will be executed automatically.
## Installing Iris
To install Iris, run the following command:
```
make install-iris
```
This will upgrade or install the Iris application using Helm. The installation uses the provided charts/values.yaml file and installs it in the specified namespace.
Replace `<name_space>` with the desired namespace for the Iris application.
## Deleting Iris
To delete the Iris application, run the following command:
```
make delete-iris
```
This will delete the Iris application using Helm. The application will be removed from the specified namespace.
Replace `<name_space>` with the namespace where the Iris application is installed.
> **Tip**: List all releases using `helm list`
# Uninstalling the Charts
To uninstall/delete the `my-release` deployment:
The command removes all the Kubernetes components associated with the chart and deletes the release.
```bash
$ helm delete my-release -n <Name_Space>
```
# Parameters
The [Parameters](#parameters) section lists the parameters that can be configured during installation.
### Common parameters
| Name | Description | Value |
| --| -- | -- |
| `replicaCount` | Number of Iris replicas to deploy | `1` |
### Lable parameters
| Name | Description | Value |
| --| -- | -- |
| `app` | Define metadata app name | `string` |
| `name` | Define lables name | `string` |
### Image parameters
Using Dockerfile or Docker compose create images for Iris and apply image to their respective yaml file.
> **Note**: For kubernetes use modified Dockerfile.k8s file to create an images
| Name | Description | Value |
| --| -- | -- |
| `image.repository` | Iris image repository | `string` |
| `image.tag` | Iris image tag | `latest` |
| `image.pullPolicy` | Iris image pull policy | `string` |
### Service parameters
| Name | Description | Value |
| --| -- | -- |
| `service.type` | Iris service type | `LoadBalancer`|
| `service.port` | Iris service port | `80` |
## Ingress parameters
| Name | Description | Value |
| --| -- | -- |
| `host_name` | Hostname for Iris app | `string`|
## How to expose the application?
List the Ingress resource on the Kubernetes cluster
```
kubectl get ingress -n <Name_Space>
```
Expose the application with your Hostname

View File

@ -0,0 +1,11 @@
### Todo
- [ ] ArtifactHub configuration
### In Progress
- [ ] ArtifactHub configuration
### Done ✓
- [ ] ArtifactHub configuration

View File

@ -0,0 +1,24 @@
apiVersion: v2
name: iris-web
description: A Helm chart for Iris Web
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@ -0,0 +1,28 @@
Release Name: {{ .Release.Name }}
Chart Name: {{ .Chart.Name }}
Chart Version: {{ .Chart.Version }}
Chart Description: {{ .Chart.Description }}
The following Kubernetes resources have been deployed:
{{- if .Values.ingress.enabled }}
Ingress:
- Name: {{ .Release.Name }}-ingress
Host: {{ index .Values.ingress.hosts 0 "host" }}
Path: {{ index .Values.ingress.hosts 0 "paths" 0 "path" }}
Service Name: {{ index .Values.ingress.hosts 0 "paths" 0 "serviceName" }}
Service Port: {{ index .Values.ingress.hosts 0 "paths" 0 "servicePort" }}
{{- end }}
{{- if eq .Values.ingress.enabled true }}
To access your application, ensure that the necessary configurations are set up in your cluster.
- If you have DNS set up:
- Access your application using the configured domain: http://{{ index .Values.ingress.hosts 0 "host" }}
{{- else }}
No Ingress resources deployed.
{{- end }}
Ensure that your application service ({{ index .Values.ingress.hosts 0 "paths" 0 "serviceName" }}) is up and running on port {{ index .Values.ingress.hosts 0 "paths" 0 "servicePort" }}.
Happy exploring!

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "iris-web.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "iris-web.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "iris-web.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "iris-web.labels" -}}
helm.sh/chart: {{ include "iris-web.chart" . }}
{{ include "iris-web.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "iris-web.selectorLabels" -}}
app.kubernetes.io/name: {{ include "iris-web.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "iris-web.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "iris-web.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,85 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.irisapp.name }}
spec:
replicas: {{ .Values.irisapp.replicaCount }}
selector:
matchLabels:
app: {{ .Values.irisapp.app }}
template:
metadata:
labels:
app: {{ .Values.irisapp.app }}
spec:
containers:
- name: {{ .Values.irisapp.name }}
image: "{{ .Values.irisapp.image}}:{{ .Values.irisapp.tag }}"
imagePullPolicy: "{{ .Values.irisapp.imagePullPolicy }}"
command: ['nohup', './iris-entrypoint.sh', 'iriswebapp']
env:
- name: DOCKERIZED # Setting Database name
value: {{ .Values.irisapp.DOCKERIZED | quote }}
- name: POSTGRES_USER # Setting Database username
value: {{ .Values.irisapp.POSTGRES_USER| quote }}
- name: POSTGRES_PASSWORDD # Setting Database password
value: {{ .Values.irisapp.POSTGRES_PASSWORD | quote }}
- name: POSTGRES_ADMIN_USER # Setting Database admin user
value: {{ .Values.irisapp.POSTGRES_ADMIN_USER | quote }}
- name: POSTGRES_ADMIN_PASSWORD # Setting Database admin password
value: {{ .Values.irisapp.POSTGRES_ADMIN_PASSWORD | quote }}
- name: POSTGRES_PORT # Setting Database port
value: {{ .Values.irisapp.POSTGRES_PORT | quote }}
- name: POSTGRES_SERVER # Setting Database server
value: {{ .Values.irisapp.POSTGRES_SERVER | quote }}
- name: IRIS_SECRET_KEY
value: {{ .Values.irisapp.IRIS_SECRET_KEY | quote }}
- name: IRIS_SECURITY_PASSWORD_SALT
value: {{ .Values.irisapp.IRIS_SECURITY_PASSWORD_SALT | quote }}
ports:
- containerPort: 8000
volumeMounts:
- mountPath: /home/iris/downloads
name: iris-downloads
- mountPath: /home/iris/user_templates
name: user-templates
- mountPath: /home/iris/server_data
name: server-data
volumes:
- name: iris-downloads
emptyDir: {}
- name: user-templates
emptyDir: {}
- name: server-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.irisapp.name }}
labels:
app: {{ .Values.irisapp.app }}
spec:
type: {{ .Values.irisapp.type }}
ports:
- port: {{ .Values.irisapp.service.port }}
targetPort: {{ .Values.irisapp.service.targetPort }}
selector:
app: {{ .Values.irisapp.app }}
---

View File

@ -0,0 +1,69 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.irisworker.name }}
spec:
replicas: {{ .Values.irisworker.replicaCount }}
selector:
matchLabels:
app: {{ .Values.irisworker.app }}
template:
metadata:
labels:
app: {{ .Values.irisworker.app }}
spec:
containers:
- name: {{ .Values.irisworker.name }}
image: "{{ .Values.irisworker.image}}:{{ .Values.irisworker.tag }}"
imagePullPolicy: "{{ .Values.irisworker.imagePullPolicy }}"
command: ['./wait-for-iriswebapp.sh', 'iriswebapp-app.test.svc.cluster.local:8000', './iris-entrypoint.sh', 'iris-worker']
env:
- name: DOCKERIZED
value: {{ .Values.irisworker.DOCKERIZED | quote }}
- name: POSTGRES_USER
value: {{ .Values.irisworker.POSTGRES_USER | quote }}
- name: POSTGRES_PASSWORDD
value: {{ .Values.irisworker.POSTGRES_PASSWORD | quote }}
- name: POSTGRES_ADMIN_USER
value: {{ .Values.irisworker.POSTGRES_ADMIN_USER | quote }}
- name: POSTGRES_ADMIN_PASSWORD
value: {{ .Values.irisworker.POSTGRES_ADMIN_PASSWORD | quote }}
- name: POSTGRES_PORT
value: {{ .Values.irisworker.POSTGRES_PORT | quote }}
- name: POSTGRES_SERVER
value: {{ .Values.irisworker.POSTGRES_SERVER | quote }}
- name: IRIS_SECRET_KEY
value: {{ .Values.irisworker.IRIS_SECRET_KEY | quote }}
- name: IRIS_SECURITY_PASSWORD_SALT
value: {{ .Values.irisworker.IRIS_SECURITY_PASSWORD_SALT | quote }}
ports:
- containerPort: 80
volumeMounts:
- mountPath: /home/iris/downloads
name: iris-downloads
- mountPath: /home/iris/user_templates
name: user-templates
- mountPath: /home/iris/server_data
name: server-data
volumes:
- name: iris-downloads
emptyDir: {}
- name: user-templates
emptyDir: {}
- name: server-data
emptyDir: {}
---

View File

@ -0,0 +1,32 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Values.ingress.name }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
rules:
{{- range $host := .Values.ingress.hosts }}
- host: {{ $host.host }}
http:
paths:
{{- range $path := $host.paths }}
- path: {{ $path.path }}
pathType: Prefix
backend:
service:
name: {{ $path.serviceName }}
port:
number: {{ $path.servicePort }}
{{- end }}
{{- end }}
{{- with .Values.ingress.tls }}
tls:
{{- range . }}
- hosts:
{{- range .hosts }}
- {{ . }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,104 @@
---
# Here I have used a hostpath
# Local volumes can only be used as a statically created PersistentVolume. Dynamic provisioning is not supported.
# If you need to go with Dynamic volumes you may choose AWS EBS or EFS
kind: PersistentVolume
apiVersion: v1
metadata:
name: postgres-pv-volume
labels:
app: {{ .Values.postgres.app }}
spec:
storageClassName: pv
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
hostPath:
path: /var/lib/data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: postgres-pv-claim
labels:
app: {{ .Values.postgres.app }}
spec:
storageClassName: pv
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.postgres.name }}
spec:
replicas: {{ .Values.postgres.replicaCount }}
selector:
matchLabels:
app: {{ .Values.postgres.app }}
template:
metadata:
labels:
app: {{ .Values.postgres.app }}
spec:
containers:
- name: {{ .Values.postgres.name }}
image: "{{ .Values.postgres.image}}:{{ .Values.postgres.tag }}"
imagePullPolicy: "{{ .Values.postgres.imagePullPolicy }}"
env:
- name: POSTGRES_DB # Setting Database name
value: {{ .Values.postgres.POSTGRES_DB | quote }}
- name: POSTGRES_USER # Setting Database username
value: {{ .Values.postgres.POSTGRES_ADMIN_USER | quote }}
- name: POSTGRES_PASSWORDD # Setting Database password
value: {{ .Values.postgres.POSTGRES_PASSWORD | quote }}
- name: POSTGRES_ADMIN_USER # Setting Database admin user
value: {{ .Values.postgres.POSTGRES_ADMIN_USER | quote }}
- name: POSTGRES_ADMIN_PASSWORD # Setting Database admin password
value: {{ .Values.postgres.POSTGRES_ADMIN_PASSWORD | quote }}
- name: POSTGRES_PORT # Setting Database port
value: {{ .Values.postgres.POSTGRES_PORT | quote }}
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
ports:
- containerPort: 5432
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgredb
volumes:
- name: postgredb
persistentVolumeClaim:
claimName: postgres-pv-claim
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.postgres.name }}
labels:
app: {{ .Values.postgres.app }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.postgres.service.port }}
selector:
app: {{ .Values.postgres.app }}
---

View File

@ -0,0 +1,36 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.rabbitmq.name }}
spec:
selector:
matchLabels:
app: {{ .Values.rabbitmq.app }}
replicas: {{ .Values.rabbitmq.replicaCount }}
template:
metadata:
labels:
app: {{ .Values.rabbitmq.app }}
spec:
containers:
- image: "{{ .Values.rabbitmq.image}}:{{ .Values.rabbitmq.tag}}"
imagePullPolicy: {{ .Values.rabbitmq.imagePullPolicy}}
name: {{ .Values.rabbitmq.name }}
ports:
- containerPort: 5672
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.rabbitmq.name }}
spec:
ports:
- port: 5672
targetPort: 5672
protocol: TCP
type: ClusterIP
selector:
app: {{ .Values.rabbitmq.app }}
---

View File

@ -0,0 +1,159 @@
## @section rabbitmq Configuration
##
rabbitmq:
## @param rabbitmq.app App name for rabbitmq
##
app: rabbitmq
## @param rabbitmq.name Name for rabbitmq
##
name: rabbitmq
## @param rabbitmq.image Image rabbitmq deployment
##
image: rabbitmq
## @param rabbitmq.tag Tag for rabbitmq
##
tag: 3-management-alpine
## @param rabbitmq.imagePullPolicy Policy for rabbitmq
##
imagePullPolicy: "IfNotPresent"
## @param rabbitmq.replicaCount ReplicaCount for rabbitmq
##
replicaCount: 1
## @section PostgreSQL Configuration
##
postgres:
## @param postgres.app PostgreSQL App
##
app: postgres
## @param postgres.name PostgreSQL Name
##
name: postgres
## @param postgres.image PostgreSQL Image
##
image: <postgres_image>
## @param postgres.tag PostgreSQL Tag
tag: <tag>
## @param postgres.imagePullPolicy PostgreSQL PullPolicy
##
imagePullPolicy: "IfNotPresent"
## @param postgres.replicaCount PostgreSQL ReplicaCount
##
replicaCount: 1
## @param postgres.service PostgreSQL Service
##
service:
port: 5432
## @param PostgreSQL Environments
##
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_ADMIN_USER: raptor
POSTGRES_ADMIN_PASSWORD: postgres
POSTGRES_DB: iris_db
POSTGRES_PORT: 5432
## @section Iris Frontend Configuration
##
irisapp:
## @param irisapp.app Iris Frontend App
##
app: iriswebapp-app
## @param irisapp.name Iris Frontend Name
##
name: iriswebapp-app
## @param irisapp.image Iris Frontend Image
##
image: <irisapp_image>
## @param irisapp.tag Iris Frontend Tag
##
tag: <tag>
## @param irisapp.imagePullPolicy Iris Frontend imagePullPolicy
##
imagePullPolicy: "IfNotPresent"
## @param irisapp.replicaCount Iris Frontend replicaCount
##
replicaCount: 1
## @param irisapp.service Iris Frontend Service
##
service:
port: 80
targetPort: 8000
## @param irisapp.type Iris Frontend Service type
##
type: ClusterIP
## @param Iris Frontend Environments
##
POSTGRES_USER: raptor
POSTGRES_PASSWORD: postgres
POSTGRES_ADMIN_USER: raptor
POSTGRES_ADMIN_PASSWORD: postgres
POSTGRES_PORT: 5432
POSTGRES_SERVER: postgres.<name_space>.svc.cluster.local
DOCKERIZED: 1
IRIS_SECRET_KEY: AVerySuperSecretKey-SoNotThisOne
IRIS_SECURITY_PASSWORD_SALT: ARandomSalt-NotThisOneEither
## @section Iris Backend Configuration
##
irisworker:
## @param irisworker.app Iris Backend App
##
app: iriswebapp-worker
## @param irisworker.name Iris Backend Name
##
name: iriswebapp-worker
## @param irisworker.image Iris Backend Image
##
image: <irisworker_image>
## @param irisworker.tag Iris Backend Tag
##
tag: <tag>
## @param irisworker.imagePullPolicy Iris Backend imagePullPolicy
##
imagePullPolicy: "IfNotPresent"
## @param irisworker.replicaCount Iris Backend replicaCount
##
replicaCount: 1
## @param Iris Backend Environments
##
POSTGRES_USER: raptor
POSTGRES_PASSWORD: postgres
POSTGRES_ADMIN_USER: raptor
POSTGRES_ADMIN_PASSWORD: postgres
POSTGRES_PORT: 5432
POSTGRES_SERVER: postgres.<name_space>.svc.cluster.local
DOCKERIZED: 1
IRIS_SECRET_KEY: AVerySuperSecretKey-SoNotThisOne
IRIS_SECURITY_PASSWORD_SALT: ARandomSalt-NotThisOneEither
## @section Nginx Ingress Configuration
##
ingress:
enabled: true
name: iris-ingress
className: nginx
annotations:
# Add any annotations specific to your Ingress controller
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/ssl-redirect: "false"
hosts:
- host: <host_name>
paths:
- path: /
pathType: Prefix
serviceName: iriswebapp-app
servicePort: 80
tls:
- secretName: iris-ingress-tls-secret
hosts:
- <host_name>

158
iris-web/docker-compose.yml Normal file
View File

@ -0,0 +1,158 @@
# IRIS Source Code
# contact@dfir-iris.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
version: "3.5"
services:
rabbitmq:
image: rabbitmq:3-management-alpine
container_name: iriswebapp_rabbitmq
networks:
- iris_backend
db:
build:
context: docker/db
container_name: iriswebapp_db
image: iriswebapp_db:v2.3.3
restart: always
# Used for debugging purposes, should be deleted for production
ports:
- "127.0.0.1:5432:5432"
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_ADMIN_USER
- POSTGRES_ADMIN_PASSWORD
- POSTGRES_DB
networks:
- iris_backend
volumes:
- db_data:/var/lib/postgresql/data
app:
build:
context: .
dockerfile: docker/webApp/Dockerfile
image: iriswebapp_app:v2.3.3
container_name: iriswebapp_app
command: ['nohup', './iris-entrypoint.sh', 'iriswebapp']
volumes:
# RootCA necessary when dealing with an auth server without a trusted CA signed certificate
- ./certificates/rootCA/irisRootCACert.pem:/etc/irisRootCACert.pem:ro
- ./certificates/:/home/iris/certificates/:ro
- ./certificates/ldap/:/iriswebapp/certificates/ldap/:ro
- iris-downloads:/home/iris/downloads
- user_templates:/home/iris/user_templates
- server_data:/home/iris/server_data
restart: always
depends_on:
- "rabbitmq"
- "db"
# Used for debugging purposes, should be deleted for production
ports:
- "127.0.0.1:8000:8000"
env_file:
- .env
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_ADMIN_USER
- POSTGRES_ADMIN_PASSWORD
- POSTGRES_SERVER
- POSTGRES_PORT
- DOCKERIZED
- IRIS_SECRET_KEY
- IRIS_SECURITY_PASSWORD_SALT
networks:
- iris_backend
- iris_frontend
- shared-network
worker:
build:
context: .
dockerfile: docker/webApp/Dockerfile
image: iriswebapp_app:v2.3.3
container_name: iriswebapp_worker
command: ['./wait-for-iriswebapp.sh', 'app:8000', './iris-entrypoint.sh', 'iris-worker']
volumes:
- ./certificates/rootCA/irisRootCACert.pem:/etc/irisRootCACert.pem:ro
- ./certificates/:/home/iris/certificates/:ro
- ./certificates/ldap/:/iriswebapp/certificates/ldap/:ro
- iris-downloads:/home/iris/downloads
- user_templates:/home/iris/user_templates
- server_data:/home/iris/server_data
depends_on:
- "rabbitmq"
- "db"
- "app"
env_file:
- .env
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_ADMIN_USER
- POSTGRES_ADMIN_PASSWORD
- POSTGRES_SERVER
- POSTGRES_PORT
- DOCKERIZED
- IRIS_SECRET_KEY
- IRIS_SECURITY_PASSWORD_SALT
- IRIS_WORKER
networks:
- iris_backend
- shared-network
nginx:
build:
context: ./docker/nginx
args:
NGINX_CONF_GID: 1234
NGINX_CONF_FILE: nginx.conf
image: iriswebapp_nginx:v2.3.3
container_name: iriswebapp_nginx
environment:
- IRIS_UPSTREAM_SERVER
- IRIS_UPSTREAM_PORT
- INTERFACE_HTTPS_PORT
- SERVER_NAME
- CERT_FILENAME
- KEY_FILENAME
- IRIS_AUTHENTICATION_TYPE
networks:
- iris_frontend
- shared-network
ports:
- "${INTERFACE_HTTPS_PORT:-8443}:${INTERFACE_HTTPS_PORT:-8443}"
volumes:
- "./certificates/web_certificates/:/www/certs/:ro"
restart: on-failure:5
depends_on:
- "app"
volumes:
iris-downloads:
user_templates:
server_data:
db_data:
networks:
iris_backend:
name: iris_backend
iris_frontend:
name: iris_frontend
shared-network:
external: true

View File

@ -0,0 +1,22 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
FROM postgres:12-alpine
COPY create_user.sh /docker-entrypoint-initdb.d/10-create_user.sh

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -e
POSTGRES="psql --username ${POSTGRES_USER}"
echo "Creating database role: ${POSTGRES_ADMIN_USER}"
$POSTGRES <<-EOSQL
CREATE USER ${POSTGRES_ADMIN_USER} WITH CREATEDB SUPERUSER PASSWORD '${POSTGRES_ADMIN_PASSWORD}';
EOSQL

View File

@ -0,0 +1,49 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
FROM nginx:1.21.3
RUN apt-get update && apt-get install -y curl
# Used to pass protected files to the container through volumes
ARG NGINX_CONF_GID
ARG NGINX_CONF_FILE
RUN groupadd -g ${NGINX_CONF_GID} az-app-nginx-conf && usermod -a -G az-app-nginx-conf www-data
COPY entrypoint.sh /entrypoint.sh
RUN chmod 700 /entrypoint.sh
RUN chown www-data:www-data /entrypoint.sh
COPY ${NGINX_CONF_FILE} /etc/nginx/nginx.conf
# log
RUN touch /var/log/nginx/audit_platform_error.log && chown -R www-data:www-data /var/log/nginx/audit_platform_error.log
RUN touch /var/log/nginx/audit_platform_access.log && chown -R www-data:www-data /var/log/nginx/audit_platform_access.log
# Security
RUN touch /var/run/nginx.pid && chown -R www-data:www-data /var/run/nginx.pid /var/cache/nginx /etc/nginx/nginx.conf
RUN mkdir -p /www/certs/
USER www-data
HEALTHCHECK --interval=5s --timeout=3s CMD curl --fail -k https://127.0.0.1:${INTERFACE_HTTPS_PORT:-8443} || exit 1
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -0,0 +1,29 @@
#!/usr/bin/env bash
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
set -e
# envsubst will make a substitution on every $variable in a file, since the nginx file contains nginx variable like $host, we have to limit the substitution to this set
# otherwise, each nginx variable will be replaced by an empty string
envsubst '${INTERFACE_HTTPS_PORT} ${IRIS_UPSTREAM_SERVER} ${IRIS_UPSTREAM_PORT} ${SERVER_NAME} ${KEY_FILENAME} ${CERT_FILENAME}' < /etc/nginx/nginx.conf > /tmp/nginx.conf
cp /tmp/nginx.conf /etc/nginx/nginx.conf
rm /tmp/nginx.conf
exec nginx -g "daemon off;"

View File

@ -0,0 +1,161 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
worker_processes auto;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
map $request_uri $csp_header {
default "default-src 'self' https://analytics.dfir-iris.org; script-src 'self' 'unsafe-inline' https://analytics.dfir-iris.org; style-src 'self' 'unsafe-inline';";
}
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log debug;
server_tokens off;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
types_hash_max_size 2048;
types_hash_bucket_size 128;
proxy_headers_hash_max_size 2048;
proxy_headers_hash_bucket_size 128;
proxy_buffering on;
proxy_buffers 8 16k;
proxy_buffer_size 4k;
client_header_buffer_size 2k;
large_client_header_buffers 8 64k;
client_body_buffer_size 64k;
client_max_body_size 100M;
reset_timedout_connection on;
keepalive_timeout 90s;
client_body_timeout 90s;
send_timeout 90s;
client_header_timeout 90s;
fastcgi_read_timeout 90s;
# WORKING TIMEOUT FOR PROXY CONF
proxy_read_timeout 90s;
uwsgi_read_timeout 90s;
gzip off;
gzip_disable "MSIE [1-6]\.";
# FORWARD CLIENT IDENTITY TO SERVER
proxy_set_header HOST $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# FULLY DISABLE SERVER CACHE
add_header Last-Modified $date_gmt;
add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0';
if_modified_since off;
expires off;
etag off;
proxy_no_cache 1;
proxy_cache_bypass 1;
# SSL CONF, STRONG CIPHERS ONLY
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_certificate /www/certs/${CERT_FILENAME};
ssl_certificate_key /www/certs/${KEY_FILENAME};
ssl_ecdh_curve secp521r1:secp384r1:prime256v1;
ssl_buffer_size 4k;
# DISABLE SSL SESSION CACHE
ssl_session_tickets off;
ssl_session_cache none;
access_log /var/log/nginx/audit_platform_access.log main;
error_log /var/log/nginx/audit_platform_error.log debug;
server {
listen ${INTERFACE_HTTPS_PORT} ssl;
server_name ${SERVER_NAME};
root /www/data;
index index.html;
error_page 500 502 503 504 /50x.html;
add_header Content-Security-Policy $csp_header;
# SECURITY HEADERS
add_header X-XSS-Protection "1; mode=block";
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
# max-age = 31536000s = 1 year
add_header Strict-Transport-Security "max-age=31536000: includeSubDomains" always;
add_header Front-End-Https on;
location / {
proxy_pass http://${IRIS_UPSTREAM_SERVER}:${IRIS_UPSTREAM_PORT};
location ~ ^/(manage/templates/add|manage/cases/upload_files) {
keepalive_timeout 10m;
client_body_timeout 10m;
send_timeout 10m;
proxy_read_timeout 10m;
client_max_body_size 0M;
proxy_request_buffering off;
proxy_pass http://${IRIS_UPSTREAM_SERVER}:${IRIS_UPSTREAM_PORT};
}
location ~ ^/(datastore/file/add|datastore/file/add-interactive) {
keepalive_timeout 10m;
client_body_timeout 10m;
send_timeout 10m;
proxy_read_timeout 10m;
client_max_body_size 0M;
proxy_request_buffering off;
proxy_pass http://${IRIS_UPSTREAM_SERVER}:${IRIS_UPSTREAM_PORT};
}
}
location /socket.io {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_pass http://${IRIS_UPSTREAM_SERVER}:${IRIS_UPSTREAM_PORT}/socket.io;
}
location = /50x.html {
root /usr/share/nginx/html;
}
}
}

View File

@ -0,0 +1,74 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#################
# COMPILE IMAGE #
#################
FROM python:3.9 AS compile-image
RUN apt-get update
RUN python -m venv /opt/venv
# Make sure we use the virtualenv:
ENV PATH="/opt/venv/bin:$PATH"
COPY source/dependencies /dependencies
COPY source/requirements.txt /
RUN pip3 install -r requirements.txt
###############
# BUILD IMAGE #
###############
FROM python:3.9 as iriswebapp
ENV PYTHONUNBUFFERED=1
COPY --from=compile-image /opt/venv /opt/venv
# Make sure we use the virtualenv:
ENV PATH="/opt/venv/bin:$PATH"
# Define specific admin password at creation
#ENV IRIS_ADM_PASSWORD="MySuperFirstPasswordIWant"
RUN apt update
RUN apt install -y p7zip-full pgp rsync postgresql-client
RUN mkdir /iriswebapp/
RUN mkdir -p /home/iris/certificates
RUN mkdir -p /home/iris/user_templates
RUN mkdir -p /home/iris/server_data
RUN mkdir -p /home/iris/server_data/backup
RUN mkdir -p /home/iris/server_data/updates
RUN mkdir -p /home/iris/server_data/custom_assets
RUN mkdir -p /home/iris/server_data/datastore
WORKDIR /iriswebapp
COPY docker/webApp/iris-entrypoint.sh .
COPY docker/webApp/wait-for-iriswebapp.sh .
COPY ./source .
# Add execution right to binaries needed by evtx2splunk for iris_evtx module
RUN chmod +x /iriswebapp/dependencies/evtxdump_binaries/linux/x64/fd
RUN chmod +x /iriswebapp/dependencies/evtxdump_binaries/linux/x64/evtx_dump
RUN chmod +x iris-entrypoint.sh
RUN chmod +x wait-for-iriswebapp.sh
#ENTRYPOINT [ "./iris-entrypoint.sh" ]

View File

@ -0,0 +1,77 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#################
# COMPILE IMAGE #
#################
FROM python:3.9 AS compile-image
RUN apt-get update
RUN python -m venv /opt/venv
# Make sure we use the virtualenv:
ENV PATH="/opt/venv/bin:$PATH"
COPY source/dependencies /dependencies
COPY source/requirements.txt /
RUN pip3 install -r requirements.txt
###############
# BUILD IMAGE #
###############
FROM python:3.9 as iriswebapp
ENV PYTHONUNBUFFERED=1
COPY --from=compile-image /opt/venv /opt/venv
# Make sure we use the virtualenv:
ENV PATH="/opt/venv/bin:$PATH"
# Define specific admin password at creation
#ENV IRIS_ADM_PASSWORD="MySuperFirstPasswordIWant"
RUN apt update
RUN apt install -y p7zip-full pgp rsync postgresql-client
RUN mkdir /iriswebapp/
RUN mkdir -p /home/iris/certificates
RUN mkdir -p /home/iris/user_templates
RUN mkdir -p /home/iris/server_data
RUN mkdir -p /home/iris/server_data/backup
RUN mkdir -p /home/iris/server_data/updates
RUN mkdir -p /home/iris/server_data/custom_assets
RUN mkdir -p /home/iris/server_data/datastore
WORKDIR /iriswebapp
COPY docker/webApp/iris-entrypoint.sh .
COPY docker/webApp/wait-for-iriswebapp.sh .
COPY ../../certificates /home/iris/certificates/
COPY ../../certificates/rootCA/irisRootCACert.pem /etc/irisRootCACert.pem
COPY ../../certificates/ldap/ /iriswebapp/certificates/ldap/
COPY ./source .
# Add execution right to binaries needed by evtx2splunk for iris_evtx module
RUN chmod +x /iriswebapp/dependencies/evtxdump_binaries/linux/x64/fd
RUN chmod +x /iriswebapp/dependencies/evtxdump_binaries/linux/x64/evtx_dump
RUN chmod +x iris-entrypoint.sh
RUN chmod +x wait-for-iriswebapp.sh
#ENTRYPOINT [ "./iris-entrypoint.sh" ]

View File

@ -0,0 +1,35 @@
#!/bin/bash
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
target=${1-:app}
printf "Running ${target} ...\n"
if [[ "${target}" == iris-worker ]] ; then
celery -A app.celery worker -E -B -l INFO &
else
gunicorn app:app --worker-class eventlet --bind 0.0.0.0:8000 --timeout 180 --worker-connections 1000 --log-level=info &
fi
while true; do sleep 2; done

View File

@ -0,0 +1,34 @@
#!/bin/sh
# wait-for-iriswebapp.sh
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
set -e
host="$1"
shift
sleep 1
until curl "$host" >/dev/null 2>&1; do
>&2 echo "IRISwebapp is unavailable - sleeping"
sleep 1
done
>&2 echo "IRISwebapp is up - executing command"
exec "$@"

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 MiB

View File

@ -0,0 +1,32 @@
#!/usr/bin/env python3
#
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
bind = 'unix:sock'
workers = 4
accesslog = '-'
loglevel = 'warning'
errorlog = '/var/log/iris/errors.log'
timeout = 3000
def worker_exit(server, worker):
sys.exit(4)

View File

@ -0,0 +1,136 @@
#!/usr/bin/env python3
#
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import collections
import json
import logging as logger
import os
import urllib.parse
from flask import Flask
from flask import session
from flask_bcrypt import Bcrypt
from flask_caching import Cache
from flask_login import LoginManager
from flask_marshmallow import Marshmallow
from flask_socketio import SocketIO, Namespace
from flask_sqlalchemy import SQLAlchemy
from functools import partial
from sqlalchemy_imageattach.stores.fs import HttpExposedFileSystemStore
from werkzeug.middleware.proxy_fix import ProxyFix
from app.flask_dropzone import Dropzone
from app.iris_engine.tasker.celery import make_celery
class ReverseProxied(object):
def __init__(self, flask_app):
self._app = flask_app
def __call__(self, environ, start_response):
scheme = environ.get('HTTP_X_FORWARDED_PROTO', None)
if scheme is not None:
environ['wsgi.url_scheme'] = scheme
return self._app(environ, start_response)
class AlertsNamespace(Namespace):
pass
APP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_PATH = os.path.join(APP_PATH, 'templates/')
# Grabs the folder where the script runs.
basedir = os.path.abspath(os.path.dirname(__file__))
LOG_FORMAT = '%(asctime)s :: %(levelname)s :: %(module)s :: %(funcName)s :: %(message)s'
LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
logger.basicConfig(level=logger.INFO, format=LOG_FORMAT, datefmt=LOG_TIME_FORMAT)
app = Flask(__name__)
def ac_current_user_has_permission(*permissions):
"""
Return True if current user has permission
"""
for permission in permissions:
if session['permissions'] & permission.value == permission.value:
return True
return False
def ac_current_user_has_manage_perms():
if session['permissions'] != 1 and session['permissions'] & 0x1FFFFF0 != 0:
return True
return False
app.jinja_env.filters['unquote'] = lambda u: urllib.parse.unquote(u)
app.jinja_env.filters['tojsonsafe'] = lambda u: json.dumps(u, indent=4, ensure_ascii=False)
app.jinja_env.filters['tojsonindent'] = lambda u: json.dumps(u, indent=4)
app.jinja_env.filters['escape_dots'] = lambda u: u.replace('.', '[.]')
app.jinja_env.globals.update(user_has_perm=ac_current_user_has_permission)
app.jinja_env.globals.update(user_has_manage_perms=ac_current_user_has_manage_perms)
app.config.from_object('app.configuration.Config')
cache = Cache(app)
SQLALCHEMY_ENGINE_OPTIONS = {
"json_deserializer": partial(json.loads, object_pairs_hook=collections.OrderedDict)
}
db = SQLAlchemy(app, engine_options=SQLALCHEMY_ENGINE_OPTIONS) # flask-sqlalchemy
bc = Bcrypt(app) # flask-bcrypt
lm = LoginManager() # flask-loginmanager
lm.init_app(app) # init the login manager
ma = Marshmallow(app) # Init marshmallow
dropzone = Dropzone(app)
celery = make_celery(app)
store = HttpExposedFileSystemStore(
path='images',
prefix='/static/assets/images/'
)
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1)
app.wsgi_app = store.wsgi_middleware(app.wsgi_app)
socket_io = SocketIO(app, cors_allowed_origins="*")
alerts_namespace = AlertsNamespace('/alerts')
socket_io.on_namespace(alerts_namespace)
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
from app import views

View File

@ -0,0 +1,103 @@
[alembic]
# path to migration scripts
script_location = app/alembic
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python-dateutil library that can be
# installed by adding `alembic[tz]` to the pip requirements
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator"
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. Valid values are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
version_path_separator = os # default: use os.pathsep
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
#sqlalchemy.url = driver://user:pass@localhost/dbname
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,app,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[logger_app]
level = INFO
handlers =
qualname = app
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(asctime)s :: %(levelname)s :: %(module)s :: %(funcName)s :: %(message)s
datefmt = %Y-%m-%d %H:%M:%S

View File

@ -0,0 +1 @@
Generic single-database configuration.

View File

@ -0,0 +1,27 @@
from alembic import op
from sqlalchemy import engine_from_config
from sqlalchemy.engine import reflection
def _table_has_column(table, column):
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix='sqlalchemy.')
insp = reflection.Inspector.from_engine(engine)
has_column = False
for col in insp.get_columns(table):
if column != col['name']:
continue
has_column = True
return has_column
def _has_table(table_name):
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix="sqlalchemy."
)
inspector = reflection.Inspector.from_engine(engine)
tables = inspector.get_table_names()
return table_name in tables

View File

@ -0,0 +1,82 @@
from alembic import context
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
import os
os.environ["ALEMBIC"] = "1"
from app.configuration import SQLALCHEMY_BASE_ADMIN_URI, PG_DB_
config.set_main_option('sqlalchemy.url', SQLALCHEMY_BASE_ADMIN_URI + PG_DB_)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
#with context.begin_transaction(): -- Fixes stuck transaction. Need more info on that
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -0,0 +1,24 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@ -0,0 +1,31 @@
"""Add prevent post-init to register case objects again during boot
Revision ID: 00b43bc4e8ac
Revises: 2604f6962838
Create Date: 2023-05-05 18:43:07.236041
"""
from alembic import op
import sqlalchemy as sa
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '00b43bc4e8ac'
down_revision = '2604f6962838'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('server_settings', 'prevent_post_objects_repush'):
op.add_column('server_settings',
sa.Column('prevent_post_objects_repush', sa.Boolean(), default=False)
)
pass
def downgrade():
if _table_has_column('server_settings', 'prevent_post_objects_repush'):
op.drop_column('server_settings', 'prevent_post_objects_repush')
pass

View File

@ -0,0 +1,63 @@
"""Add tags to assets
Revision ID: 0db700644a4f
Revises: 6a3b3b627d45
Create Date: 2022-01-06 13:47:12.648707
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
from app.alembic.alembic_utils import _table_has_column
revision = '0db700644a4f'
down_revision = '6a3b3b627d45'
branch_labels = None
depends_on = None
def upgrade():
# Now issue changes on existing tables and migrate Asset tags
# Add column asset_tags to CaseAssets if not existing
if not _table_has_column('case_assets', 'asset_tags'):
op.add_column('case_assets',
sa.Column('asset_tags', sa.Text)
)
if _table_has_column('case_assets', 'asset_tags'):
# Set schema and make migration of data
t_case_assets = sa.Table(
'case_assets',
sa.MetaData(),
sa.Column('asset_id', sa.Integer, primary_key=True),
sa.Column('asset_name', sa.Text),
sa.Column('asset_description', sa.Text),
sa.Column('asset_domain', sa.Text),
sa.Column('asset_ip', sa.Text),
sa.Column('asset_info', sa.Text),
sa.Column('asset_compromised', sa.Boolean),
sa.Column('asset_type_id', sa.ForeignKey('asset_type.asset_id')),
sa.Column('asset_tags', sa.Text),
sa.Column('case_id', sa.ForeignKey('cases.case_id')),
sa.Column('date_added', sa.DateTime),
sa.Column('date_update', sa.DateTime),
sa.Column('user_id', sa.ForeignKey('user.id')),
sa.Column('analysis_status_id', sa.ForeignKey('analysis_status.id'))
)
# Migrate existing Assets
conn = op.get_bind()
res = conn.execute("SELECT asset_id from case_assets WHERE asset_tags IS NULL;")
results = res.fetchall()
if results:
for res in results:
conn.execute(t_case_assets.update().where(t_case_assets.c.asset_id == res[0]).values(
asset_tags=''
))
def downgrade():
pass

View File

@ -0,0 +1,45 @@
"""Add module types
Revision ID: 10a7616f3cc7
Revises: 874ba5e5da44
Create Date: 2022-02-04 07:46:32.382640
"""
import sqlalchemy as sa
from alembic import op
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '10a7616f3cc7'
down_revision = '874ba5e5da44'
branch_labels = None
depends_on = None
def upgrade():
# Issue changes on existing user activities table and migrate existing rows
# Add column is_from_api to user_activities if not existing and set existing ones to false
if not _table_has_column('iris_module', 'module_type'):
op.add_column('iris_module',
sa.Column('module_type', sa.Text)
)
t_ua = sa.Table(
'iris_module',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('module_type', sa.Text)
)
conn = op.get_bind()
conn.execute(t_ua.update().values(
module_type='pipeline'
))
pass
def downgrade():
pass

Some files were not shown because too many files have changed in this diff Show More