first sync
Some checks failed
Deployment Verification / deploy-and-test (push) Failing after 29s

This commit is contained in:
2025-03-04 07:59:21 +01:00
parent 9cdcf486b6
commit 506716e703
1450 changed files with 577316 additions and 62 deletions

30
iris-web/.bumpversion.cfg Normal file
View File

@ -0,0 +1,30 @@
[bumpversion]
current_version = 2.3.3
commit = True
tag = True
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(-(?P<release>.*)-(?P<build>\d+))?
serialize =
{major}.{minor}.{patch}-{release}-{build}
{major}.{minor}.{patch}
[bumpversion:part:release_name]
first_value = regular
optional_value = regular
values =
alpha
beta
rc
test
regular
[bumpversion:file:source/app/configuration.py]
search = IRIS_VERSION = "v{current_version}"
replace = IRIS_VERSION = "v{new_version}"
[bumpversion:file:README.md]
search = v{current_version}
replace = v{new_version}
[bumpversion:file:docker-compose.yml]
search = :v{current_version}
replace = :v{new_version}

15
iris-web/.deepsource.toml Normal file
View File

@ -0,0 +1,15 @@
version = 1
test_patterns = ["source/tests/**"]
exclude_patterns = [
"source/dependencies/**",
"source/app/templates/**",
"source/static/assets/**"
]
[[analyzers]]
name = "python"
[analyzers.meta]
runtime_version = "3.x.x"

61
iris-web/.env Normal file
View File

@ -0,0 +1,61 @@
# -- NGINX
SERVER_NAME=iris.app.dev
KEY_FILENAME=iris_dev_key.pem
CERT_FILENAME=iris_dev_cert.pem
# -- DATABASE
POSTGRES_USER=postgres
POSTGRES_PASSWORD=__MUST_BE_CHANGED__
POSTGRES_ADMIN_USER=raptor
POSTGRES_ADMIN_PASSWORD=__MUST_BE_CHANGED__
POSTGRES_DB=iris_db
POSTGRES_SERVER=db
POSTGRES_PORT=5432
# -- IRIS
DOCKERIZED=1
IRIS_SECRET_KEY=AVerySuperSecretKey-SoNotThisOne
IRIS_SECURITY_PASSWORD_SALT=ARandomSalt-NotThisOneEither
IRIS_UPSTREAM_SERVER=app
IRIS_UPSTREAM_PORT=8000
# -- WORKER
CELERY_BROKER=amqp://rabbitmq
# -- AUTH
IRIS_AUTHENTICATION_TYPE=local
## optional
IRIS_ADM_PASSWORD=MySuperAdminPassword!
#IRIS_ADM_API_KEY=B8BA5D730210B50F41C06941582D7965D57319D5685440587F98DFDC45A01594
#IRIS_ADM_EMAIL=admin@localhost
IRIS_ADM_USERNAME=administrator
# requests the just-in-time creation of users with ldap authentification (see https://github.com/dfir-iris/iris-web/issues/203)
#IRIS_AUTHENTICATION_CREATE_USER_IF_NOT_EXIST=True
# the group to which newly created users are initially added, default value is Analysts
#IRIS_NEW_USERS_DEFAULT_GROUP=
# -- FOR LDAP AUTHENTICATION
#IRIS_AUTHENTICATION_TYPE=ldap
#LDAP_SERVER=127.0.0.1
#LDAP_AUTHENTICATION_TYPE=SIMPLE
#LDAP_PORT=3890
#LDAP_USER_PREFIX=uid=
#LDAP_USER_SUFFIX=ou=people,dc=example,dc=com
#LDAP_USE_SSL=False
# base DN in which to search for users
#LDAP_SEARCH_DN=ou=users,dc=example,dc=org
# unique identifier to search the user
#LDAP_ATTRIBUTE_IDENTIFIER=cn
# name of the attribute to retrieve the user's display name
#LDAP_ATTRIBUTE_DISPLAY_NAME=displayName
# name of the attribute to retrieve the user's email address
#LDAP_ATTRIBUTE_MAIL=mail
#LDAP_VALIDATE_CERTIFICATE=True
#LDAP_TLS_VERSION=1.2
#LDAP_SERVER_CERTIFICATE=
#LDAP_PRIVATE_KEY=
#LDAP_PRIVATE_KEY_PASSWORD=
# -- LISTENING PORT
INTERFACE_HTTPS_PORT=8443

2
iris-web/.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,2 @@
github: [whikernel]
open_collective: dfir-iris

View File

@ -0,0 +1,38 @@
---
name: Bug report
about: Create a report to help us improve
title: "[BUG] "
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.

View File

@ -0,0 +1,22 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[FR]"
labels: enhancement
assignees: ''
---
*Please ensure your feature request is not already on the roadmap or associated with an issue. This can be checked [here](https://github.com/orgs/dfir-iris/projects/1/views/4).*
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

25
iris-web/.gitignore vendored Normal file
View File

@ -0,0 +1,25 @@
flask/
*.pyc
dev
node_modules
source/app/database.db
source/app/build
yarn.lock
yarn-error.log
*.psd
test/
source/app/config.priv.ini
source/app/config.test.ini
.idea/
libesedb-*/
orcparser/
.DS_Store
.vscode/
*.code-workspace
nohup.out
celerybeat-schedule.db
.scannerwork/
source/app/static/assets/dist/
source/app/static/assets/img/graph/*
!source/app/static/assets/img/graph/*.png
run_nv_test.py

27
iris-web/CODESTYLE.md Normal file
View File

@ -0,0 +1,27 @@
# Coding style
If you wish to develop in DFIR-IRIS, please make sure to read the following tips.
## Commits
Try to follow the repository convention :
- If it's not linked to an issue, use the format `[action] Commit message`, with `action` being a 3 letters action related to the commit, eg `ADD`for additions, `DEL` for deletions, `IMP` for improvements, etc.
- If it's linked to an issue, prepend with the issue ID, i.e `[#issue_id][action] Commit message`
## Code
The code should be pretty easy to apprehend. It's not perfect but it will improve over time.
Some documentation about development is available [here](https://dfir-iris.github.io/development/).
Here are the main takes :
- **Routes** : these are the things that describes how URI should be handled. Routes are split by categories as in the UI menu.
They are defined in `source > app > blueprints`. A route providing a web page (i.e non API) relies on templates.
Each page template is present in the `templates` directory of the target route.
- **Database requests**: we are trying to split the DB code from the routes code. This is partially done and will improve over time. The DB code is provided in `source > app > datamgmt`.
- **HTML pages**: as specified above each page template is set in the `templates` directory of the corresponding route. These templates are based on layouts, which are defined in `source > app > templates`.
- **Static contents** : images, JS and CSS are defined in `source > app > static > assets`.
If your code implies database changes, please create an alembic migration script.
```
alembic -c app/alembic.ini revision -m <What's changed>
```
And then modifies the script in `source > app > alembic` so that the migration can be done automatically.

47
iris-web/CONFIGURATION.md Normal file
View File

@ -0,0 +1,47 @@
# IRIS Configuration
In order to connect to the database and other systems certain configurations are needed. This document lists all available configurations.
## How to set configuration variables
There are 3 different options to set configuration variables
1. Azure Key Vault
2. Environment Variables
3. The config.ini file
### Azure Key Vault
The first option that is checked is the Azure Key Vault. In order to use this the `AZURE_KEY_VAULT_NAME` should be specified.
Since Azure Key Vault does not support underscores you should remove this from the configuration name. For example: `POSTGRES_USER` becomes `POSTGRES-USER`.
### Environment Variables
The second option is using environment variables, which gives the most amount of flexibility.
### Config.ini
The last and fallback option is the config.ini. Within the project there is a `config.model.ini`, which is not used but gives the example how the file should look like. If the application is started with the environment variable `DOCKERIZED=1` then the `config.docker.ini` is loaded, otherwhise the `config.priv.ini` is loaded.
## Environment variable only
A few configs are environment variables only:
- `IRIS_WORKER` - Specifies if the process is the worker
- `DOCKERIZED` - Should be set if running in docker, also loads the other config.ini
## Configuration options
## POSTGRES
The POSTGRES section has the following configurations:
- `POSTGRES_USER` - The user IRIS uses
- `POSTGRES_PASSWORD` - The password for the user IRIS uses
- `POSTGRES_ADMIN_USER` - The user IRIS uses for table migrations
- `POSTGRES_ADMIN_PASSWORD` - The password for the user IRIS uses for table migrations
- `POSTGRES_HOST` - The server address
- `POSTGRES_PORT` - The server port
## CELERY
- `CELERY_BROKER` - The broker address used by [Celery](https://github.com/celery/celery)
## IRIS
- `IRIS_SECRET_KEY` - The secret key used by Flask.
- `IRIS_SECURITY_PASSWORD_SALT` - ??

20
iris-web/CONTRIBUTING.md Normal file
View File

@ -0,0 +1,20 @@
# Contributing to DFIR-IRIS
*This applies to any repository present in the DFIR-IRIS organisation.*
We are an open project, and we gladly accept contributions of any kinds. The two main ways to contribute are by
creating issues or submitting pull requests.
## Issues
Please try to follow the templates that are provided for feature requests and bugs. Also ensure that you feature or issue
is not already mentioned in the [roadmap](https://github.com/orgs/dfir-iris/projects/1/views/4). If an issue is similar
but not fit perfectly with what you have in mind, you can add comments to it, and it will take into account.
If you want to report a security issue, please read the [security page](https://github.com/dfir-iris/iris-web/SECURITY.md).
## Pull requests
Please make sure to follow the [code guideline](https://github.com/dfir-iris/iris-web/CODESTYLE.md) when writing your code.
The pull requests must be submitted on the `develop` branch of the project. Ensure that before submitting you are
up-to-date with it.
## Others
If you have any ideas not directly link to the code itself, you can directly contact us by [email](mailto:contact@dfir-iris.org).

165
iris-web/LICENSE.txt Normal file
View File

@ -0,0 +1,165 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

118
iris-web/README.md Normal file
View File

@ -0,0 +1,118 @@
<p align="center">
<img src="source/app/static/assets/img/logo.ico" />
</p>
<p align="center">
Incident Response Investigation System
<br>
<i>Current Version v2.3.3</i>
<br>
<a href="https://v200.beta.dfir-iris.org">Online Demonstration</a>
</p>
# IRIS
[![License: LGPL v3](https://img.shields.io/badge/License-LGPL_v3-blue.svg)](./LICENSE.txt)
Iris is a web collaborative platform aiming to help incident responders sharing technical details during investigations.
![demo_timeline](img/timeline_speed.gif)
## Table of contents
- [Getting Started](#getting-started)
- [Run IrisWeb](#run-irisweb)
- [Configuration](#configuration)
- [Versioning](#versioning)
- [Showcase](#showcase)
- [Documentation](#documentation)
- [Upgrades](#upgrades)
- [API](#api)
- [Help](#help)
- [Considerations](#considerations)
- [License](#license)
## Getting started
It is divided in two main parts, IrisWeb and IrisModules.
- IrisWeb is the web application which contains the core of
Iris (web interface, database management, etc).
- IrisModules are extensions of the core that allow third parties to process
data via Iris (eg enrich IOCs with MISP and VT, upload and injection of EVTX into Splunk).
IrisWeb can work without any modules though defaults ones are preinstalled. Head to ``Manage > Modules`` in the UI
to configure and enable them.
### Running Iris
To ease the installation and upgrades, Iris is shipped in Docker containers. Thanks to Docker compose,
it can be ready in a few minutes.
``` bash
# Clone the iris-web repository
git clone https://github.com/dfir-iris/iris-web.git
cd iris-web
# Checkout to the last tagged version
git checkout v2.3.3
# Copy the environment file
cp .env.model .env
# Build the dockers
docker-compose build
# Run IRIS
docker-compose up
```
Iris shall be available on the host interface, port 443, protocol HTTPS - ``https://<your_instance_ip>``.
By default, an ``administrator`` account is created. The password is printed in stdout the very first time Iris is started. It won't be printed anymore after that.
``WARNING :: post_init :: create_safe_admin :: >>>`` can be searched in the logs of the `webapp` docker to find the password.
The initial password can be set via the [configuration](https://docs.dfir-iris.org/operations/configuration/).
Iris is split on 5 Docker services, each with a different role.
- ``app``: The core, including web server, DB management, module management etc.
- ``db``: A PostgresSQL database
- ``RabbitMQ``: A RabbitMQ engine to handle jobs queuing and processing
- ``worker``: Jobs handler relying on RabbitMQ
- ``nginx``: A NGINX reverse proxy
### Configuration
There are three different options for configuring the settings and credentials: Azure Key Vault, Environment Variables and Configuration Files. This is also the order of priority, if a settings is not set it will fall back on the next option.
For all available configuration options see [configuration](https://docs.dfir-iris.org/operations/configuration/).
## Versioning
Starting from version 2.0.0, Iris is following the [Semantic Versioning 2.0](https://semver.org/) guidelines.
The code ready for production is always tagged with a version number.
``alpha`` and ``beta`` versions are **not** production-ready.
Do not use the ``master`` branch in production.
## Showcase
You can directly try Iris on our [demo instance](https://v200.beta.dfir-iris.org).
One can also head to [tutorials](https://docs.dfir-iris.org/operations/tutorials/), we've put some videos there.
## Documentation
A comprehensive documentation is available on [docs.dfir-iris.org](https://docs.dfir-iris.org).
### Upgrades
Please read the release notes when upgrading versions. Most of the time the migrations are handled automatically, but some
changes might require some manual labor depending on the version.
### API
The API reference is available in the [documentation](https://docs.dfir-iris.org/operations/api/#references) or [documentation repository](https://github.com/dfir-iris/iris-doc-src).
## Help
You can reach us on [Discord](https://discord.gg/76tM6QUJza) or by [mail](mailto:contact@dfir-iris.org) if you have any question, issue or idea!
We are also on [Twitter](https://twitter.com/dfir_iris) and [Matrix](https://matrix.to/#/#dfir-iris:matrix.org).
## Considerations
Iris is still in its early stage. It can already be used in production, but please set backups of the database and DO NOT expose the interface on the Internet. We highly recommend using a private dedicated and secured network.
## License
The contents of this repository is available under [LGPL3 license](LICENSE.txt).
## Sponsoring
Special thanks to Deutsche Telekom Security GmbH for sponsoring us!

6
iris-web/SECURITY.md Normal file
View File

@ -0,0 +1,6 @@
## Reporting security vulnerabilities
*This applies to any repository present in the DFIR-IRIS organisation.*
In case a security vulnerability is found, we kindly ask you to report it by [email](mailto:report@dfir-iris.org) instead of creating an issue.
This will let us the time to patch and create a new release.
We are fully transparent and any raised security issue will be reported in the security advisory section once patched.

View File

@ -0,0 +1,32 @@
-----BEGIN CERTIFICATE-----
MIIFhTCCA22gAwIBAgIUVRU9xRSCLLph9QYUqpRs4DW3634wDQYJKoZIhvcNAQEL
BQAwUjELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxEjAQBgNVBAoM
CURGSVItSVJJUzEaMBgGA1UEAwwRREZJUi1JUklTLVJvb3QtQ0EwHhcNMjIwMTE4
MTAxNjM3WhcNMzIwMTE2MTAxNjM3WjBSMQswCQYDVQQGEwJGUjETMBEGA1UECAwK
U29tZS1TdGF0ZTESMBAGA1UECgwJREZJUi1JUklTMRowGAYDVQQDDBFERklSLUlS
SVMtUm9vdC1DQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANXa9X5Y
Glp4xy6ZFpgB8Db7t9a2qih+vxQ6J2RPHrVIH3LRJKb6Qt813toHG1V+T8a9msQ6
WutlR/hwcmJiP9kEqY1hAQ8fHYMimGt1aHXccTItKkoFRbhlOofwEkRLOk1GUUWC
9wjeYCzJ+ODaQljwC+uYg97lfeNILxXhmMJuhozXG+0LQch27CYUO5HZfJNYuVsh
lrGgrJu+o7aumonBcfyDHkWL9n4aZ4xPKIkzcIbqnfenRLsH8ZQF+PqcIUi9lDMo
B3zJg+31+L3G1/HEVkqO6OW1tM3Az66ihh6PG2+ETj07wMHP5TmbYerBDmk6L5Ft
By0tg+tDQ2oqd2iMXCegR2KeFgdJTjQqR+p+gccAJIYaM1vLZVgE+0phMKtm0ZiA
8E6E73HzmOLV3UO+YySF/E2esv/pB31TxE1Uwt35U11zRWS6tMqfdCE2T48wnHTu
5oUUgA2FO7gbGRUWz71LzdaF9/6tOAR3OY4d1iZYTiQBfeUSvcAQk4vPr+FyDDvM
JMQ5WwAG+VICQa1bn37RnJ3+5vUeYSFlEBRFABZYowzzUiQK1EFTUsA3Ou3i9AEp
jiYYxa4ctQA7zbTWYR8jmpMQ8DKxgZtHtio+3HKcGCndYmD8uV52weMXJNQXqV0c
v9Sr0e1nFMV2okYbhZ3KfI+2lsH1OL9hwuR1AgMBAAGjUzBRMB0GA1UdDgQWBBTF
rVzkgqJCIH/26xCCuQRdCA5L9zAfBgNVHSMEGDAWgBTFrVzkgqJCIH/26xCCuQRd
CA5L9zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBZs0BaFArd
3G42S5wKe/wM+llSLZHiAgCPeVRaGEe4M6mcG7hGOj0IrtoA4j1GvCMch6j65Dw3
wkQB2NSwEesSpnteSYFQ9UJmw/gzw+WSC/SVCRrEJ1/dKbdWQ9VjgwVRgekx2pyb
XXYBl9HTmNXJ55hnJ+3s9+iGJkiQu/zq9d5Yg0AP/4HjH4e6XgRZmN+7MsyCatt1
yJ0ekCJcg5MN/9mHKpYmfdDEfpP8b2/N5WmSqbKhEU43aNdQ68QiCwLDwj3piOwu
tPrj4CnTaVWjRd2AzzrhpZgvRwx7VZpCcwqk2pnNwiklyAsZQo6iEbzxChLJSnIj
Pw2a3wrPmdRgchbLYOr/oMcMmbxWlC7qQQiuzhMXncPJHAghe28GxKVMETArWkj6
L4+QsH7jaC45nTPRXFUmUbL7+Uz/8O17SswL4Thdp8/ZK5uqhx199whivrdhnzGh
MrSKiiDWst6hMo5HfJkPE4/UAXRkfbWAvj6WqEJX3Z1OgiHAvXtU1FPYhB7X9mla
vslWFT0W818GsP71a9TlwvSjVYto4U+kQXD0sk4ewmJuSz9oK2CwJh/gR5WJG4SC
YaVzHS7+8UtJ3m4H4+NQJIo1jHdzIy5nv3v/hDZXsWKE0O4ClDdt/bzqv8f21G9O
aVTSz0kMMqog8lQgdymRLxJ8BU1P9rszoA==
-----END CERTIFICATE-----

View File

@ -0,0 +1 @@
52366C4851D9245D91B07E288C197FE8DA976FA9

View File

@ -0,0 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKQIBAAKCAgEA1dr1flgaWnjHLpkWmAHwNvu31raqKH6/FDonZE8etUgfctEk
pvpC3zXe2gcbVX5Pxr2axDpa62VH+HByYmI/2QSpjWEBDx8dgyKYa3VoddxxMi0q
SgVFuGU6h/ASREs6TUZRRYL3CN5gLMn44NpCWPAL65iD3uV940gvFeGYwm6GjNcb
7QtByHbsJhQ7kdl8k1i5WyGWsaCsm76jtq6aicFx/IMeRYv2fhpnjE8oiTNwhuqd
96dEuwfxlAX4+pwhSL2UMygHfMmD7fX4vcbX8cRWSo7o5bW0zcDPrqKGHo8bb4RO
PTvAwc/lOZth6sEOaTovkW0HLS2D60NDaip3aIxcJ6BHYp4WB0lONCpH6n6BxwAk
hhozW8tlWAT7SmEwq2bRmIDwToTvcfOY4tXdQ75jJIX8TZ6y/+kHfVPETVTC3flT
XXNFZLq0yp90ITZPjzCcdO7mhRSADYU7uBsZFRbPvUvN1oX3/q04BHc5jh3WJlhO
JAF95RK9wBCTi8+v4XIMO8wkxDlbAAb5UgJBrVufftGcnf7m9R5hIWUQFEUAFlij
DPNSJArUQVNSwDc67eL0ASmOJhjFrhy1ADvNtNZhHyOakxDwMrGBm0e2Kj7ccpwY
Kd1iYPy5XnbB4xck1BepXRy/1KvR7WcUxXaiRhuFncp8j7aWwfU4v2HC5HUCAwEA
AQKCAgEAn7DTZLcRZsGNquQyFOxNniE1VCYuxfJvaQFL7QGP4rqqkShPgEicquUl
NhXceWjK1ZM8AI+62NBWf9Qn9gN7vehXW/U1vz7y4LtyqbuQd4JXHKrRS1jIiTs+
C8hfO5QZQx7hDVEQexTjKE7hg7Y3mQYXQKQwxL8F1DRQxLwjP/0ciAsRFV80jicP
jBfLq8uF1NmJ/90DFDzw55Ph2EZlq7xCC9c1QaWUOPIqpIFvuZQp0PVdZFMJZcg2
wtv64di4mgLGqbSYcrxfwc/NIJldI3IDJcW1b+LR0lrKOpOuJx+h0xIvAlaeR5ug
hfXbllr3EeibILMTis7UFVey/ZIcex0iq+LDNQCmE9kuxAnf8z9NFAxeXqUu1nt7
26ik/Ay1nBN0CosCIvp86EZipi4nnOLjw31nUCAxbUIyvidbg4+tfHFtAXLBc/Cb
8qljjzKqnKN3fbKkxIZGOeB/HI0Yxm18zhdiNbNONX/ZZ1RNJ1NHJYNEjuACDSWC
PdD9rE5chswA+3Zp+Glz/cEWZdDj8GaHn5Q7az9vVwYykxiRAhM2ew0kIUkG1SGD
z25n0zutV9bZ9evK7PonKDz3xHNodDiWI8uP55w0PAK+6P3CvqFxs4czofL3xhbR
40C9gq5icpN/gSe69HxfTAXFANPxGjotAlhftGxH/AX+mwJefYECggEBAPqDuUGj
4RTb3oAzECJCB25ZfTuelaW19MclvF/wW2SjqOpygablZV1gsWVVYD1hLkNk3YRA
KCcPr13ZCVbvwQZ3GHOJ/zjV1z3FOPdFko2gOgnX9N5rIhYXBxuneoD58rGQQAQs
Q7BMPPqAg+aotD94a+HWBhEHWF0CsbQwWTgdlDe/9u0na4TDrOKRSFbVO28IvJFO
uixVmg+mexPZEHet40F/migM+42lAwMxa1zGMaX59HzLpNMTxxV5om1u7aIjMbVt
gZxT987JBCMAGXmhOd7CUu/oPfnTJD/QdWTv2DeRpGY8aGBMnTRSD6NTWInYwqvA
+m951CWjG1fnqMUCggEBANqJvUyzY41VS7OtGmx12QX3KTZO3w8HrD5P3uoESVGk
ThHdjntQa/s1eV0w6nMqxB+k7ZSBVIoYe96jeJyETvsFjxqVSFexoN/LPM0neKUu
WPruIj+8xPo7Xfzkcu/1CXEVtJpgEF+S6kUxjwkWigol5djwcTHWLcHRWBpE2bR0
DKGXsaZe/xwqOppIjV8qcU3wkN7Kih1xXB3cDHw8KK4hXE2T4jXpMccTAur5k7Eo
mrH/aZ9abup2tIbG2gydnbyizVrSkFnmquZDT3E7tTRBMMM9ik9Hg9i0kBh+eQGe
qGBuqL27Uk90p8WdBbBlcRqTy9PYC/C5boG4rxeLJ/ECggEAHuidtOGZZc9y9nJW
bUOkUxMrhm2cnSOEHgYj5dpsDFC9CKA0Kvlmtky11oDgLPKOmLYbNrQLwDYJNxUO
N/HA6SFMnQTLvqFzuyVYT/n/iQXZ41kH12F0hTE2KU1SqhMsxIe9vkYP2/KsG2Q7
4fuysZoUBXs4qGU1m9Q3RLoqZ+gOX4qJ1tzkQS87Z4DxYfVRLfPwACWshsfRCFlM
GjjP9VQ5E93AkWx4pRNU+dHhI0M44PekGLmvjnOEPrwRNFtZaoXZHj9ynG0nyBW5
MvBSNFWWJEvpm7wV2XsZn94Cff+xt7l6hTqyzh7lzozJbSddZzSdYD+hJpvrdvFW
8FOe2QKCAQEAypJ1G30Job/X/URPQwx2UFZMKGAx2c/F9Li6q/evMvN3vo29/kaM
4X4u8pheKsUQqTiLVWYQxDVv1O945LSsNXlwrjaEqW0o72mIAa894Pe2WVuV+bj0
afPP6pSkihN8Xgu9rn+vjbg0WlFXAhiXelKo3U/7zTN4lLmFzkvV9bTA5KUlck+K
cEQgsFTiXr2L67A7yZi7MBGdTrxkAmENYGPiGLMlM83ma18PDFquccBurOJRuPnt
6H6CVpBLHPiZd3r9mdunHP55mhn4sMCk9jwbhE8uPtDOwXiWPW42oq676y+IUN8r
rCU5Qy+LT3iov/cSMFuKrehlK+/StaMzMQKCAQBK/jSm8RANTNXPemCDYuvitwvv
eGa3tmzUjo7HhNtaw2D6ff4eFSk8hdNzv/aQoGf7qqKjQWFGc2r9wF6GCmRLodDB
j7ob/e6VscYkWXM5DcRnrNQXwsPQK0szaDt6YBvUM/cmriT+xOOwqOJGYoW8lmAv
5q92QSDADLJPQRsLsKeABdXLWq5lnOcK+YCp/ABi5NItHkO9P7DEg5eEOX4c0BpG
LzbjkDNSXSx9VT6tIuCsFe4r/ucQwTqor4HVBmRKFiyrUJq9c1OeWqOYBdv6TLC+
uFBFC/M1QaLwklTBYgzyYHWM3nZYeiCl1+aP7Kc5xyJkR4R8F1SSeIGz5Mhq
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,23 @@
-----BEGIN CERTIFICATE-----
MIID5zCCAs+gAwIBAgIUR1vu5qp+qpHjXhQ/6sUnFOtgW2kwDQYJKoZIhvcNAQEL
BQAwgYIxCzAJBgNVBAYTAkZSMRYwFAYDVQQIDA1JbGUgZGUgRnJhbmNlMQ4wDAYD
VQQHDAVQYXJpczEYMBYGA1UECgwPQ1NJUlQtRlIgQWlyYnVzMRowGAYDVQQLDBFJ
bmNpZGVudCBSZXNwb25zZTEVMBMGA1UEAwwMaXJpcy5hcHAuZGV2MB4XDTIxMTIw
OTE0MTUyMloXDTIyMTIwOTE0MTUyMlowgYIxCzAJBgNVBAYTAkZSMRYwFAYDVQQI
DA1JbGUgZGUgRnJhbmNlMQ4wDAYDVQQHDAVQYXJpczEYMBYGA1UECgwPQ1NJUlQt
RlIgQWlyYnVzMRowGAYDVQQLDBFJbmNpZGVudCBSZXNwb25zZTEVMBMGA1UEAwwM
aXJpcy5hcHAuZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlZWm
hypo/ZJMjmqHSBviR9pzYJYaiSlafeEUa/9LlBe4Ecov74XLVy+3TuG3w1YFzD1M
57j+EJrcZcl5E67uIVreAtJNLdgqDyCk6nCk3BdGgEnhcmQCevLXaCsBH+Z9lBRy
ruuTQAihq3QJztosTuI+so9AaZgSmOm17vL45S3QiFIPUB/Pgv60BfYkd0SV1V4Y
709IKvlCXSixryA0hkqT12D6fNFDPqwbn1o7Ifd7qVqVxD0QS8Wf56PUD8J+41A7
WLzSy/fNKAUOSoOyhWvdh7s5uciqJEXDMh1BvrpBSCmkmW8aprWVOr6yaugmBg58
g4oaM0xWOcFFeIcdrQIDAQABo1MwUTAdBgNVHQ4EFgQUNavtDZIB1hMxp7X0pytN
xACnEigwHwYDVR0jBBgwFoAUNavtDZIB1hMxp7X0pytNxACnEigwDwYDVR0TAQH/
BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAlA9xbWAfoGVOLUY/nuQ+0slryO/e
C2ChAHehtNKJa3DOGJXTOp1qJJdfuHAHZgplfSZp36N87eHfygFaR04RaHNNUVpg
1vnADd0QvDwYiEbRyjLN+EFdxDcbcsqljUUfPMx1zjlA1Ff2dbCkOVYYfm5xDzoE
weFx6inCtZ0pHqWdF5R77n4Rg3dmR/98dXM3nXhFevoAI7FqyauYFL0QFLXvIufg
3zywJrolNLZrrbpkSJ9kWzIZn0OK4Q+5dSnpBEimBZSrJKbZhgS/uzCL5flezKTF
LzHY0CRXC7nXO5dY2baBbIqRvYlCgbmaN4J505Fn6YSmwm3deCan2xyGHg==
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCVlaaHKmj9kkyO
aodIG+JH2nNglhqJKVp94RRr/0uUF7gRyi/vhctXL7dO4bfDVgXMPUznuP4Qmtxl
yXkTru4hWt4C0k0t2CoPIKTqcKTcF0aASeFyZAJ68tdoKwEf5n2UFHKu65NACKGr
dAnO2ixO4j6yj0BpmBKY6bXu8vjlLdCIUg9QH8+C/rQF9iR3RJXVXhjvT0gq+UJd
KLGvIDSGSpPXYPp80UM+rBufWjsh93upWpXEPRBLxZ/no9QPwn7jUDtYvNLL980o
BQ5Kg7KFa92Huzm5yKokRcMyHUG+ukFIKaSZbxqmtZU6vrJq6CYGDnyDihozTFY5
wUV4hx2tAgMBAAECggEAV+BbvYpvtZAOA5iXswgWjknKgFKOckfmDo99NNj9KJoq
m+Dg+mDqjWTN1ryJ/Wp663qTxIoMT+r6UZ3j0GlzIgtE4/lyN92HD+4IlGXqpBXU
aCd/F3mjb2FcpKim93usCKNeoF5q2jJ378aywF+xqgIF/VZk6+PYARdDt4XsLI4w
vfJSbjRuynnSHl3kD2atcivAxYDu6AggQPsSPmF66z754eKA3BJIAWRUCdx/llTk
ARizLI4DFHKSYZq9pcKNtCrPIOrUkflG9QPZKn9dI0W+AaSroyqOQQvMY1NT3uEo
TsXYoyxHGH7+tkoaSHX6JteDe4YbZFbQ7z1s6ZHMIQKBgQDHYoS5wpCxOpCUCOxU
4s05n88tOJE7PnUQ1j1AosFes6qY1IFnsUss9ikF0aIL+gYvuEIU1z/ITDmBgWao
bJq6ySCHhyqOMZxMK+nuwIJQEmmIImfxEs8Hf891Cej5NO964VWIsBtNln10yLrj
Rc9J8J643O6YLyGuXDyXdxNcqQKBgQDADxnzPcou1wmlMK6zc7UeZ/bhi6hdYsWU
X3znd5jQZ8576A38g1v742A2yfWIftnNiMrCdwnS8x9Rw1ps160E5MvzEUOckqig
zJXn3PvO7tnReu4/Z4HoTUcbRtbBNMaIFgbW62A4S9CyiFZf9dONHoqhpYvbNJPx
kjGp6Ol3ZQKBgEzz2yIO0+VzIwXfg8cnWenZog5j/LmO24PKDA38QwGX+knOCrvI
k6kgwKh8Rjy1HNoiFW8RvI5DzRYMqWBrujRJGAL2yhfjUd2cPUdmiWT6Fjzyeody
qPDOBXW4g3BbW+pjOa3tujvxzy3ZozfAY8a31aqnqnaWCjvPYZtb298xAoGAYbIM
2D+xLhxyqpXV+DC+jAYEfnylG0PYD353MeMTV8fGMB89phpH2xyxX41iGZm1Pyj7
Qup8k9LaNqQxxjX7rAaafD1m8ClmH82R34z4hi3XnQh0UspbOYi9x/FD4qnu52CV
ABRhMKHYOkjB7zRD9X/4svtb5hibvQFJxA1XXUUCgYBaeZ7tZb8lWkd8v9uZ99qX
wpm2bO+RQpOeNkP31VpO3jj9/0S+SJSRc9a3JnNRLKNtKhNZpTlaP0coBqZqwb+u
gWAvdeZinxFwRj6VXvS8+2SP7ImRL1HgOwDQxDWXQxf3e3Zg7QoZLTea9Lq9Zf2g
JLbJbOUpEOe5W4M8xLItlg==
-----END PRIVATE KEY-----

View File

@ -0,0 +1,41 @@
.DEFAULT_GOAL := help
file := $2
IN_DIR = $(PWD)
.PHONY: help
help: ## Print the help message
@awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {printf "\033[36m%s\033[0m : %s\n", $$1, $$2}' $(MAKEFILE_LIST) | \
sort | \
column -s ':' -t
.PHONY: create
create: ## Create the iris app
kubectl apply -k $(IN_DIR)/admin; \
kubectl apply -k $(IN_DIR)/rabbitmq; \
kubectl apply -k $(IN_DIR)/psql; \
kubectl apply -k $(IN_DIR)/app; \
kubectl apply -k $(IN_DIR)/worker
.PHONY: delete
delete: ## Delete the iris app
kubectl delete -k $(IN_DIR)/worker ;\
kubectl delete -k $(IN_DIR)/app ;\
kubectl delete -k $(IN_DIR)/rabbitmq ;\
kubectl delete -k $(IN_DIR)/psql ;\
kubectl delete -k $(IN_DIR)/admin
.PHONY: deploy-specific-kustomization
deploy-specific-kustomization: ## Delpoy specific kustomization (ex- make deploy-specific-kustomization ARGS="path of kustomization.yml dir")
kubectl apply -k $(ARGS)
.PHONY: delete-specific-kustomization
delete-specific-kustomization: ## Delete specific kustomization (ex- make delete-specific-kustomization ARGS="path of kustomization.yml dir")
kubectl delete -k $(ARGS)
.PHONY: deploy-specific-manifest
deploy-specific-manifest: ## deploy specific manifest (ex- make deploy-specific-manifest ARGS="path of manifest dir")
kubectl apply -f $(ARGS)
.PHONY: delete-specific-manifest
delete-specific-manifest: ## delete specific manifest (ex- make delete-specific-manifest ARGS="path of manifest dir")
kubectl apply -f $(ARGS)

View File

@ -0,0 +1,80 @@
# The Iris EKS manifest to deploy Iris-web on AWS EKS.
Description:
- This manifest file will help to deploy the application on the AWS EKS.
## Prerequisites;
- Install AWS [CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#getting-started-install-instructions)
- Setup AWS EKS cluster. (terraform example [here](https://github.com/s3lva-kumar/terraform-aws-eks))
- Install AWS ebs CSI driver add-on on EKS cluster. (terraform example [here](https://github.com/s3lva-kumar/terraform-eks-plugin/tree/master/terraform-amazon-ebs-csi-driver))
- Install AWS alb ingress controler add-on on EKS cluster. (terraform example [here](https://github.com/s3lva-kumar/terraform-eks-plugin/tree/master/terraform-amazon-alb-ingress))
## Build & push Docker Images
- To build the docker images follow the commands 👇
``` bash
# Clone the iris-web repository
$ git clone https://github.com/dfir-iris/iris-web.git
$ cd iris-web
# Build the dockers (Build webApp and db docker images, skip the nginx because we using AWS ALB instead of nginx)
# app & woker:
$ docker build -t webapp:latest -f docker/webApp/Dockerfile .
# DB:
$ docker build -t db:latest -f docker/db/Dockerfile .
```
- Once the docker images built, push those images into AWS ECR
## Deploy:
- Before we deploy the manifeat, we need to update the Docker image on our manifest.
*Note: Same docker image to the app and worker*
- ### update app image:
- Naviaget to the deploy/eks_manifest/app directory.
- open the *deployment.yml* file and update the image here, which we pushed on the ECR.
![App Screenshot](./images/app-image-update.png)
- ### update worker image:
- Naviaget to the deploy/eks_manifest/worker directory.
- open the *deployment.yml* file and update the image here, which we pushed on the ECR.
![App Screenshot](./images/worker-image-update.png)
- ### update db image:
- Naviaget to the deploy/eks_manifest/psql directory.
- open the *deployment.yml* file and update the image here, which we pushed on the ECR.
![App Screenshot](./images/db-image-update.png)
- ### update the SSL and domain name on app ingress YAML file
- Naviaget to the deploy/eks_manifest/app directory.
- open the *ingress.yml* file and update the SSL and host
![App Screenshot](./images/ingress.png)
- *Note:*
- SSL :
Give a ACM certificate ARN.
- HOST :
Give the host name whatever you want. In additionally, once the ingress created it will be provisioned the ALB on AWS with this name "iris-alb". Then, configure the DNS 'CNAME' record with hostname *(which you given on ingress file)* point to the AWS alb 'DNS'
![APP Screenshot](./images/alb-dns.png)
- ### once updated the all the things which is mentioned above, then run the **Makefile**
- Navigate to the *deploy/eks_manifest*, here you can see the 'Makefile'
- To deploy app, run
``` bash
$ make
$ make create
```
- To delete app, run
*caution: it will be delete all things exclude DB*
``` bash
$ make
$ make delete
```
- ### Get Admin username and password
- Once everything created we can get administrator username and password from the app _pod_
``` bash
$ kubectl get pod -n iris-web
# Copy the pod name and give it on the below command (pod name looks like "pod/iris-app-deployment-🎲")
$ kubectl logs <pod_name> -n iris-web
# You can see the credential at the end of the logs
```

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: iris-web
name: iris-psql-claim
labels:
site: iris
spec:
accessModes:
- ReadWriteOnce
storageClassName: iris-sc
resources:
requests:
storage: 30Gi

View File

@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
name: admin-kustomize
labels:
site: iris
resources:
- namespace.yml
- storageclass.yml
- claim.yml

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: iris-web
labels:
site: iris

View File

@ -0,0 +1,13 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: iris-sc
labels:
site: iris
parameters:
fsType: ext4
type: gp2
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: iris-web
name: app-data
data:
POSTGRES_SERVER: iris-psql-service

View File

@ -0,0 +1,86 @@
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: iris-web
name: iris-app-deployment
labels:
site: iris
app: iris-app
spec:
replicas: 1
selector:
matchLabels:
app: iris-app
template:
metadata:
labels:
app: iris-app
spec:
containers:
- name: iris-app
image: iriswebapp_app:v2.2.2
ports:
- containerPort: 8000
command: ['nohup', './iris-entrypoint.sh', 'iriswebapp']
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_PASSWORD
- name: POSTGRES_ADMIN_USER
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_ADMIN_USER
- name: POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_ADMIN_PASSWORD
- name: POSTGRES_PORT
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_PORT
- name: DOCKERIZED
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: DOCKERIZED
- name: IRIS_SECRET_KEY
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: IRIS_SECRET_KEY
- name: IRIS_SECURITY_PASSWORD_SALT
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: IRIS_SECURITY_PASSWORD_SALT
- name: POSTGRES_SERVER
valueFrom:
configMapKeyRef:
name: app-data
key: POSTGRES_SERVER
volumeMounts:
- name: iris-pcv
mountPath: /home/iris/downloads
subPath: downloads
- name: iris-pcv
mountPath: /home/iris/user_templates
subPath: user_templates
- name: iris-pcv
mountPath: /home/iris/server_data
subPath: server_data
volumes:
- name: iris-pcv
persistentVolumeClaim:
claimName: iris-psql-claim

View File

@ -0,0 +1,29 @@
apiVersion: networking.k8s.io/v1 #extensions/v1beta1
kind: Ingress
metadata:
name: "iris-ingress"
namespace: "iris-web"
annotations:
alb.ingress.kubernetes.io/scheme: 'internet-facing'
alb.ingress.kubernetes.io/target-type: 'ip'
alb.ingress.kubernetes.io/group.name: 'iris-alb-group'
alb.ingress.kubernetes.io/load-balancer-name: 'iris-alb'
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]'
alb.ingress.kubernetes.io/certificate-arn: 'arn:aws:acm:us-east-1:650601597349:certificate/4915ba65-ec07-44c7-8f42-897cfe1574bb'
alb.ingress.kubernetes.io/ssl-policy: 'ELBSecurityPolicy-TLS13-1-2-2021-06'
alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
labels:
app: 'iris'
spec:
ingressClassName: 'alb'
rules:
- host: 'test.cmcloudlab1727.info'
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: "iris-app-service"
port:
number: 80

View File

@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
labels:
site: iris
resources:
- secrets.yml
- configmap.yml
- deployment.yml
- service.yml
- ingress.yml

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Secret
metadata:
name: iris-app-secrets
namespace: iris-web
labels:
site: iris
type: Opaque
data:
POSTGRES_USER: cmFwdG9y
POSTGRES_PASSWORD: YWRtaW4=
POSTGRES_ADMIN_USER: cmFwdG9y
POSTGRES_ADMIN_PASSWORD: YWRtaW4=
POSTGRES_PORT: NTQzMg==
DOCKERIZED: MQ==
IRIS_SECRET_KEY: QVZlcnlTdXBlclNlY3JldEtleS1Tb05vdFRoaXNPbmU=
IRIS_SECURITY_PASSWORD_SALT: QVJhbmRvbVNhbHQtTm90VGhpc09uZUVpdGhlcg==

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
namespace: iris-web
name: iris-app-service
labels:
site: iris
annotations:
alb.ingress.kubernetes.io/healthcheck-path: '/login'
spec:
selector:
app: iris-app
ports:
- protocol: TCP
port: 80
targetPort: 8000
type: ClusterIP

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

View File

@ -0,0 +1,58 @@
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: iris-web
name: iris-psql-db-deployment
labels:
app: iris-psql
site: iris
spec:
replicas: 1
selector:
matchLabels:
app: iris-psql
template:
metadata:
labels:
app: iris-psql
spec:
containers:
- name: iris-psql-db
image: iriswebapp_db:v2.2.2
ports:
- containerPort: 5432
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_PASSWORD
- name: POSTGRES_ADMIN_USER
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_ADMIN_USER
- name: POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_ADMIN_PASSWORD
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: iris-psql-secrets
key: POSTGRES_DB
volumeMounts:
- name: persistent-storage
mountPath: /var/lib/postgresql/data
subPath: psqldata
volumes:
- name: persistent-storage
persistentVolumeClaim:
claimName: iris-psql-claim

View File

@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
labels:
site: iris
resources:
- secrets.yml
- deployment.yml
- service.yml

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Secret
metadata:
name: iris-psql-secrets
namespace: iris-web
labels:
site: iris
type: Opaque
data:
POSTGRES_USER: cG9zdGdyZXM=
POSTGRES_PASSWORD: YWRtaW4=
POSTGRES_ADMIN_USER: cmFwdG9y
POSTGRES_ADMIN_PASSWORD: YWRtaW4=
POSTGRES_DB: aXJpc19kYg==

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
namespace: iris-web
name: iris-psql-service
labels:
site: iris
spec:
selector:
app: iris-psql
ports:
- protocol: TCP
port: 5432
targetPort: 5432
type: ClusterIP

View File

@ -0,0 +1,25 @@
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: iris-web
name: iris-rabbitmq-deployment
labels:
app: iris-rabbitmq
site: iris
spec:
replicas: 1
selector:
matchLabels:
app: iris-rabbitmq
template:
metadata:
labels:
app: iris-rabbitmq
spec:
containers:
- name: iris-rabbitmq
image: rabbitmq:3-management-alpine
ports:
- containerPort: 5672

View File

@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
labels:
site: iris
resources:
- deployment.yml
- service.yml

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
namespace: iris-web
name: iris-rabbitmq-service
labels:
site: iris
spec:
selector:
app: iris-rabbitmq
ports:
- protocol: TCP
port: 5672
targetPort: 5672
type: ClusterIP

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: iris-web
name: worker-data
data:
POSTGRES_SERVER: iris-psql-service
CELERY_BROKER: amqp://iris-rabbitmq-service
IRIS_WORKER: iris-worker-service

View File

@ -0,0 +1,94 @@
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: iris-web
name: iris-worker-deployment
labels:
app: iris-worker
site: iris
spec:
replicas: 1
selector:
matchLabels:
app: iris-worker
template:
metadata:
labels:
app: iris-worker
spec:
containers:
- name: iris-worker
image: iriswebapp_app:v2.2.2
command: ['./wait-for-iriswebapp.sh', 'iris-app-service:8000', './iris-entrypoint.sh', 'iris-worker']
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_PASSWORD
- name: POSTGRES_ADMIN_USER
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_ADMIN_USER
- name: POSTGRES_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_ADMIN_PASSWORD
- name: POSTGRES_PORT
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: POSTGRES_PORT
- name: DOCKERIZED
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: DOCKERIZED
- name: IRIS_SECRET_KEY
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: IRIS_SECRET_KEY
- name: IRIS_SECURITY_PASSWORD_SALT
valueFrom:
secretKeyRef:
name: iris-app-secrets
key: IRIS_SECURITY_PASSWORD_SALT
- name: POSTGRES_SERVER
valueFrom:
configMapKeyRef:
name: worker-data
key: POSTGRES_SERVER
- name: CELERY_BROKER
valueFrom:
configMapKeyRef:
name: worker-data
key: CELERY_BROKER
- name: IRIS_WORKER
valueFrom:
configMapKeyRef:
name: worker-data
key: IRIS_WORKER
volumeMounts:
- name: iris-pcv
mountPath: /home/iris/downloads
subPath: downloads
- name: iris-pcv
mountPath: /home/iris/user_templates
subPath: user_templates
- name: iris-pcv
mountPath: /home/iris/server_data
subPath: server_data
volumes:
- name: iris-pcv
persistentVolumeClaim:
claimName: iris-psql-claim

View File

@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
metadata:
labels:
site: iris
resources:
- secrets.yml
- configmap.yml
- deployment.yml
- service.yml

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Secret
metadata:
name: iris-worker-secrets
namespace: iris-web
labels:
site: iris
type: Opaque
data:
POSTGRES_USER: cmFwdG9y
POSTGRES_PASSWORD: YWRtaW4=
POSTGRES_ADMIN_USER: cmFwdG9y
POSTGRES_ADMIN_PASSWORD: YWRtaW4=
POSTGRES_PORT: NTQzMg==
DOCKERIZED: MQ==
IRIS_SECRET_KEY: QVZlcnlTdXBlclNlY3JldEtleS1Tb05vdFRoaXNPbmU=
IRIS_SECURITY_PASSWORD_SALT: QVJhbmRvbVNhbHQtTm90VGhpc09uZUVpdGhlcg==

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
namespace: iris-web
name: iris-worker-service
labels:
site: iris
spec:
selector:
app: iris-worker
ports:
- protocol: TCP
port: 80
type: ClusterIP

View File

@ -0,0 +1,26 @@
SHELL := /bin/bash
check-helm:
@helm version || $(MAKE) install-helm
check-kubectl:
@kubectl version || $(MAKE) install-kubectl
install-helm:
@curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
@chmod 700 get_helm.sh
@./get_helm.sh
@rm get_helm.sh
install-kubectl:
@curl -LO 'https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl'
@sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
@rm kubectl
install-iris:
@helm upgrade --install iris charts/ --values charts/values.yaml -n <name_space>
delete-iris:
@helm delete iris -n <name_space>
check-dependencies: check-helm check-kubectl

View File

@ -0,0 +1,125 @@
# Prerequisites
- Kubernetes cluster must be on the running stage (Kubernetes 1.26+)
- Helm 3.1.0
# Installing the Charts
## Installing Nginx Ingress Controller
The Ingress is a Kubernetes resource that lets you configure an HTTP load balancer for applications running on Kubernetes, represented by one or more Services. Such a load balancer is necessary to deliver those applications to clients outside of the Kubernetes cluster
The Ingress resource supports the following features:
⦿ Content-based routing:
- `Host-based routing:` For example, routing requests with the host header foo.example.com to one group of services and the host header bar.example.com to another group.
- `Path-based routing:` For example, routing requests with the URI that starts with /serviceA to service A and requests with the URI that starts with /serviceB to service B.
⦿ **TLS/SSL** termination for each hostname, such as foo.example.com.
Before installing Iris-web install the Nginx ingress controller
```
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm install my-release ingress-nginx/ingress-nginx -n <Name_Space>
```
> **Info**: `my-release` is the name that you choose
## Installing Iris Web
Clone this Repository
```bash
$ git clone https://github.com/dfir-iris/iris-web.git
```
To install the chart with the release name `my-release`:
```bash
$ helm install my-release charts/ --values charts/values.yaml -n <Name_Space>
```
The command deploys **iris-web** on the Kubernetes cluster in the default configuration.
## Checking Dependencies
To check if Helm and kubectl are installed, run the following command:
```
make check-dependencies
```
If any of the dependencies are missing, the corresponding installation command will be executed automatically.
## Installing Iris
To install Iris, run the following command:
```
make install-iris
```
This will upgrade or install the Iris application using Helm. The installation uses the provided charts/values.yaml file and installs it in the specified namespace.
Replace `<name_space>` with the desired namespace for the Iris application.
## Deleting Iris
To delete the Iris application, run the following command:
```
make delete-iris
```
This will delete the Iris application using Helm. The application will be removed from the specified namespace.
Replace `<name_space>` with the namespace where the Iris application is installed.
> **Tip**: List all releases using `helm list`
# Uninstalling the Charts
To uninstall/delete the `my-release` deployment:
The command removes all the Kubernetes components associated with the chart and deletes the release.
```bash
$ helm delete my-release -n <Name_Space>
```
# Parameters
The [Parameters](#parameters) section lists the parameters that can be configured during installation.
### Common parameters
| Name | Description | Value |
| --| -- | -- |
| `replicaCount` | Number of Iris replicas to deploy | `1` |
### Lable parameters
| Name | Description | Value |
| --| -- | -- |
| `app` | Define metadata app name | `string` |
| `name` | Define lables name | `string` |
### Image parameters
Using Dockerfile or Docker compose create images for Iris and apply image to their respective yaml file.
> **Note**: For kubernetes use modified Dockerfile.k8s file to create an images
| Name | Description | Value |
| --| -- | -- |
| `image.repository` | Iris image repository | `string` |
| `image.tag` | Iris image tag | `latest` |
| `image.pullPolicy` | Iris image pull policy | `string` |
### Service parameters
| Name | Description | Value |
| --| -- | -- |
| `service.type` | Iris service type | `LoadBalancer`|
| `service.port` | Iris service port | `80` |
## Ingress parameters
| Name | Description | Value |
| --| -- | -- |
| `host_name` | Hostname for Iris app | `string`|
## How to expose the application?
List the Ingress resource on the Kubernetes cluster
```
kubectl get ingress -n <Name_Space>
```
Expose the application with your Hostname

View File

@ -0,0 +1,11 @@
### Todo
- [ ] ArtifactHub configuration
### In Progress
- [ ] ArtifactHub configuration
### Done ✓
- [ ] ArtifactHub configuration

View File

@ -0,0 +1,24 @@
apiVersion: v2
name: iris-web
description: A Helm chart for Iris Web
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@ -0,0 +1,28 @@
Release Name: {{ .Release.Name }}
Chart Name: {{ .Chart.Name }}
Chart Version: {{ .Chart.Version }}
Chart Description: {{ .Chart.Description }}
The following Kubernetes resources have been deployed:
{{- if .Values.ingress.enabled }}
Ingress:
- Name: {{ .Release.Name }}-ingress
Host: {{ index .Values.ingress.hosts 0 "host" }}
Path: {{ index .Values.ingress.hosts 0 "paths" 0 "path" }}
Service Name: {{ index .Values.ingress.hosts 0 "paths" 0 "serviceName" }}
Service Port: {{ index .Values.ingress.hosts 0 "paths" 0 "servicePort" }}
{{- end }}
{{- if eq .Values.ingress.enabled true }}
To access your application, ensure that the necessary configurations are set up in your cluster.
- If you have DNS set up:
- Access your application using the configured domain: http://{{ index .Values.ingress.hosts 0 "host" }}
{{- else }}
No Ingress resources deployed.
{{- end }}
Ensure that your application service ({{ index .Values.ingress.hosts 0 "paths" 0 "serviceName" }}) is up and running on port {{ index .Values.ingress.hosts 0 "paths" 0 "servicePort" }}.
Happy exploring!

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "iris-web.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "iris-web.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "iris-web.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "iris-web.labels" -}}
helm.sh/chart: {{ include "iris-web.chart" . }}
{{ include "iris-web.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "iris-web.selectorLabels" -}}
app.kubernetes.io/name: {{ include "iris-web.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "iris-web.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "iris-web.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,85 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.irisapp.name }}
spec:
replicas: {{ .Values.irisapp.replicaCount }}
selector:
matchLabels:
app: {{ .Values.irisapp.app }}
template:
metadata:
labels:
app: {{ .Values.irisapp.app }}
spec:
containers:
- name: {{ .Values.irisapp.name }}
image: "{{ .Values.irisapp.image}}:{{ .Values.irisapp.tag }}"
imagePullPolicy: "{{ .Values.irisapp.imagePullPolicy }}"
command: ['nohup', './iris-entrypoint.sh', 'iriswebapp']
env:
- name: DOCKERIZED # Setting Database name
value: {{ .Values.irisapp.DOCKERIZED | quote }}
- name: POSTGRES_USER # Setting Database username
value: {{ .Values.irisapp.POSTGRES_USER| quote }}
- name: POSTGRES_PASSWORDD # Setting Database password
value: {{ .Values.irisapp.POSTGRES_PASSWORD | quote }}
- name: POSTGRES_ADMIN_USER # Setting Database admin user
value: {{ .Values.irisapp.POSTGRES_ADMIN_USER | quote }}
- name: POSTGRES_ADMIN_PASSWORD # Setting Database admin password
value: {{ .Values.irisapp.POSTGRES_ADMIN_PASSWORD | quote }}
- name: POSTGRES_PORT # Setting Database port
value: {{ .Values.irisapp.POSTGRES_PORT | quote }}
- name: POSTGRES_SERVER # Setting Database server
value: {{ .Values.irisapp.POSTGRES_SERVER | quote }}
- name: IRIS_SECRET_KEY
value: {{ .Values.irisapp.IRIS_SECRET_KEY | quote }}
- name: IRIS_SECURITY_PASSWORD_SALT
value: {{ .Values.irisapp.IRIS_SECURITY_PASSWORD_SALT | quote }}
ports:
- containerPort: 8000
volumeMounts:
- mountPath: /home/iris/downloads
name: iris-downloads
- mountPath: /home/iris/user_templates
name: user-templates
- mountPath: /home/iris/server_data
name: server-data
volumes:
- name: iris-downloads
emptyDir: {}
- name: user-templates
emptyDir: {}
- name: server-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.irisapp.name }}
labels:
app: {{ .Values.irisapp.app }}
spec:
type: {{ .Values.irisapp.type }}
ports:
- port: {{ .Values.irisapp.service.port }}
targetPort: {{ .Values.irisapp.service.targetPort }}
selector:
app: {{ .Values.irisapp.app }}
---

View File

@ -0,0 +1,69 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.irisworker.name }}
spec:
replicas: {{ .Values.irisworker.replicaCount }}
selector:
matchLabels:
app: {{ .Values.irisworker.app }}
template:
metadata:
labels:
app: {{ .Values.irisworker.app }}
spec:
containers:
- name: {{ .Values.irisworker.name }}
image: "{{ .Values.irisworker.image}}:{{ .Values.irisworker.tag }}"
imagePullPolicy: "{{ .Values.irisworker.imagePullPolicy }}"
command: ['./wait-for-iriswebapp.sh', 'iriswebapp-app.test.svc.cluster.local:8000', './iris-entrypoint.sh', 'iris-worker']
env:
- name: DOCKERIZED
value: {{ .Values.irisworker.DOCKERIZED | quote }}
- name: POSTGRES_USER
value: {{ .Values.irisworker.POSTGRES_USER | quote }}
- name: POSTGRES_PASSWORDD
value: {{ .Values.irisworker.POSTGRES_PASSWORD | quote }}
- name: POSTGRES_ADMIN_USER
value: {{ .Values.irisworker.POSTGRES_ADMIN_USER | quote }}
- name: POSTGRES_ADMIN_PASSWORD
value: {{ .Values.irisworker.POSTGRES_ADMIN_PASSWORD | quote }}
- name: POSTGRES_PORT
value: {{ .Values.irisworker.POSTGRES_PORT | quote }}
- name: POSTGRES_SERVER
value: {{ .Values.irisworker.POSTGRES_SERVER | quote }}
- name: IRIS_SECRET_KEY
value: {{ .Values.irisworker.IRIS_SECRET_KEY | quote }}
- name: IRIS_SECURITY_PASSWORD_SALT
value: {{ .Values.irisworker.IRIS_SECURITY_PASSWORD_SALT | quote }}
ports:
- containerPort: 80
volumeMounts:
- mountPath: /home/iris/downloads
name: iris-downloads
- mountPath: /home/iris/user_templates
name: user-templates
- mountPath: /home/iris/server_data
name: server-data
volumes:
- name: iris-downloads
emptyDir: {}
- name: user-templates
emptyDir: {}
- name: server-data
emptyDir: {}
---

View File

@ -0,0 +1,32 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Values.ingress.name }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
rules:
{{- range $host := .Values.ingress.hosts }}
- host: {{ $host.host }}
http:
paths:
{{- range $path := $host.paths }}
- path: {{ $path.path }}
pathType: Prefix
backend:
service:
name: {{ $path.serviceName }}
port:
number: {{ $path.servicePort }}
{{- end }}
{{- end }}
{{- with .Values.ingress.tls }}
tls:
{{- range . }}
- hosts:
{{- range .hosts }}
- {{ . }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,104 @@
---
# Here I have used a hostpath
# Local volumes can only be used as a statically created PersistentVolume. Dynamic provisioning is not supported.
# If you need to go with Dynamic volumes you may choose AWS EBS or EFS
kind: PersistentVolume
apiVersion: v1
metadata:
name: postgres-pv-volume
labels:
app: {{ .Values.postgres.app }}
spec:
storageClassName: pv
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
hostPath:
path: /var/lib/data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: postgres-pv-claim
labels:
app: {{ .Values.postgres.app }}
spec:
storageClassName: pv
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.postgres.name }}
spec:
replicas: {{ .Values.postgres.replicaCount }}
selector:
matchLabels:
app: {{ .Values.postgres.app }}
template:
metadata:
labels:
app: {{ .Values.postgres.app }}
spec:
containers:
- name: {{ .Values.postgres.name }}
image: "{{ .Values.postgres.image}}:{{ .Values.postgres.tag }}"
imagePullPolicy: "{{ .Values.postgres.imagePullPolicy }}"
env:
- name: POSTGRES_DB # Setting Database name
value: {{ .Values.postgres.POSTGRES_DB | quote }}
- name: POSTGRES_USER # Setting Database username
value: {{ .Values.postgres.POSTGRES_ADMIN_USER | quote }}
- name: POSTGRES_PASSWORDD # Setting Database password
value: {{ .Values.postgres.POSTGRES_PASSWORD | quote }}
- name: POSTGRES_ADMIN_USER # Setting Database admin user
value: {{ .Values.postgres.POSTGRES_ADMIN_USER | quote }}
- name: POSTGRES_ADMIN_PASSWORD # Setting Database admin password
value: {{ .Values.postgres.POSTGRES_ADMIN_PASSWORD | quote }}
- name: POSTGRES_PORT # Setting Database port
value: {{ .Values.postgres.POSTGRES_PORT | quote }}
- name: POSTGRES_HOST_AUTH_METHOD
value: trust
ports:
- containerPort: 5432
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgredb
volumes:
- name: postgredb
persistentVolumeClaim:
claimName: postgres-pv-claim
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.postgres.name }}
labels:
app: {{ .Values.postgres.app }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.postgres.service.port }}
selector:
app: {{ .Values.postgres.app }}
---

View File

@ -0,0 +1,36 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.rabbitmq.name }}
spec:
selector:
matchLabels:
app: {{ .Values.rabbitmq.app }}
replicas: {{ .Values.rabbitmq.replicaCount }}
template:
metadata:
labels:
app: {{ .Values.rabbitmq.app }}
spec:
containers:
- image: "{{ .Values.rabbitmq.image}}:{{ .Values.rabbitmq.tag}}"
imagePullPolicy: {{ .Values.rabbitmq.imagePullPolicy}}
name: {{ .Values.rabbitmq.name }}
ports:
- containerPort: 5672
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.rabbitmq.name }}
spec:
ports:
- port: 5672
targetPort: 5672
protocol: TCP
type: ClusterIP
selector:
app: {{ .Values.rabbitmq.app }}
---

View File

@ -0,0 +1,159 @@
## @section rabbitmq Configuration
##
rabbitmq:
## @param rabbitmq.app App name for rabbitmq
##
app: rabbitmq
## @param rabbitmq.name Name for rabbitmq
##
name: rabbitmq
## @param rabbitmq.image Image rabbitmq deployment
##
image: rabbitmq
## @param rabbitmq.tag Tag for rabbitmq
##
tag: 3-management-alpine
## @param rabbitmq.imagePullPolicy Policy for rabbitmq
##
imagePullPolicy: "IfNotPresent"
## @param rabbitmq.replicaCount ReplicaCount for rabbitmq
##
replicaCount: 1
## @section PostgreSQL Configuration
##
postgres:
## @param postgres.app PostgreSQL App
##
app: postgres
## @param postgres.name PostgreSQL Name
##
name: postgres
## @param postgres.image PostgreSQL Image
##
image: <postgres_image>
## @param postgres.tag PostgreSQL Tag
tag: <tag>
## @param postgres.imagePullPolicy PostgreSQL PullPolicy
##
imagePullPolicy: "IfNotPresent"
## @param postgres.replicaCount PostgreSQL ReplicaCount
##
replicaCount: 1
## @param postgres.service PostgreSQL Service
##
service:
port: 5432
## @param PostgreSQL Environments
##
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_ADMIN_USER: raptor
POSTGRES_ADMIN_PASSWORD: postgres
POSTGRES_DB: iris_db
POSTGRES_PORT: 5432
## @section Iris Frontend Configuration
##
irisapp:
## @param irisapp.app Iris Frontend App
##
app: iriswebapp-app
## @param irisapp.name Iris Frontend Name
##
name: iriswebapp-app
## @param irisapp.image Iris Frontend Image
##
image: <irisapp_image>
## @param irisapp.tag Iris Frontend Tag
##
tag: <tag>
## @param irisapp.imagePullPolicy Iris Frontend imagePullPolicy
##
imagePullPolicy: "IfNotPresent"
## @param irisapp.replicaCount Iris Frontend replicaCount
##
replicaCount: 1
## @param irisapp.service Iris Frontend Service
##
service:
port: 80
targetPort: 8000
## @param irisapp.type Iris Frontend Service type
##
type: ClusterIP
## @param Iris Frontend Environments
##
POSTGRES_USER: raptor
POSTGRES_PASSWORD: postgres
POSTGRES_ADMIN_USER: raptor
POSTGRES_ADMIN_PASSWORD: postgres
POSTGRES_PORT: 5432
POSTGRES_SERVER: postgres.<name_space>.svc.cluster.local
DOCKERIZED: 1
IRIS_SECRET_KEY: AVerySuperSecretKey-SoNotThisOne
IRIS_SECURITY_PASSWORD_SALT: ARandomSalt-NotThisOneEither
## @section Iris Backend Configuration
##
irisworker:
## @param irisworker.app Iris Backend App
##
app: iriswebapp-worker
## @param irisworker.name Iris Backend Name
##
name: iriswebapp-worker
## @param irisworker.image Iris Backend Image
##
image: <irisworker_image>
## @param irisworker.tag Iris Backend Tag
##
tag: <tag>
## @param irisworker.imagePullPolicy Iris Backend imagePullPolicy
##
imagePullPolicy: "IfNotPresent"
## @param irisworker.replicaCount Iris Backend replicaCount
##
replicaCount: 1
## @param Iris Backend Environments
##
POSTGRES_USER: raptor
POSTGRES_PASSWORD: postgres
POSTGRES_ADMIN_USER: raptor
POSTGRES_ADMIN_PASSWORD: postgres
POSTGRES_PORT: 5432
POSTGRES_SERVER: postgres.<name_space>.svc.cluster.local
DOCKERIZED: 1
IRIS_SECRET_KEY: AVerySuperSecretKey-SoNotThisOne
IRIS_SECURITY_PASSWORD_SALT: ARandomSalt-NotThisOneEither
## @section Nginx Ingress Configuration
##
ingress:
enabled: true
name: iris-ingress
className: nginx
annotations:
# Add any annotations specific to your Ingress controller
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/ssl-redirect: "false"
hosts:
- host: <host_name>
paths:
- path: /
pathType: Prefix
serviceName: iriswebapp-app
servicePort: 80
tls:
- secretName: iris-ingress-tls-secret
hosts:
- <host_name>

158
iris-web/docker-compose.yml Normal file
View File

@ -0,0 +1,158 @@
# IRIS Source Code
# contact@dfir-iris.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
version: "3.5"
services:
rabbitmq:
image: rabbitmq:3-management-alpine
container_name: iriswebapp_rabbitmq
networks:
- iris_backend
db:
build:
context: docker/db
container_name: iriswebapp_db
image: iriswebapp_db:v2.3.3
restart: always
# Used for debugging purposes, should be deleted for production
ports:
- "127.0.0.1:5432:5432"
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_ADMIN_USER
- POSTGRES_ADMIN_PASSWORD
- POSTGRES_DB
networks:
- iris_backend
volumes:
- db_data:/var/lib/postgresql/data
app:
build:
context: .
dockerfile: docker/webApp/Dockerfile
image: iriswebapp_app:v2.3.3
container_name: iriswebapp_app
command: ['nohup', './iris-entrypoint.sh', 'iriswebapp']
volumes:
# RootCA necessary when dealing with an auth server without a trusted CA signed certificate
- ./certificates/rootCA/irisRootCACert.pem:/etc/irisRootCACert.pem:ro
- ./certificates/:/home/iris/certificates/:ro
- ./certificates/ldap/:/iriswebapp/certificates/ldap/:ro
- iris-downloads:/home/iris/downloads
- user_templates:/home/iris/user_templates
- server_data:/home/iris/server_data
restart: always
depends_on:
- "rabbitmq"
- "db"
# Used for debugging purposes, should be deleted for production
ports:
- "127.0.0.1:8000:8000"
env_file:
- .env
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_ADMIN_USER
- POSTGRES_ADMIN_PASSWORD
- POSTGRES_SERVER
- POSTGRES_PORT
- DOCKERIZED
- IRIS_SECRET_KEY
- IRIS_SECURITY_PASSWORD_SALT
networks:
- iris_backend
- iris_frontend
- shared-network
worker:
build:
context: .
dockerfile: docker/webApp/Dockerfile
image: iriswebapp_app:v2.3.3
container_name: iriswebapp_worker
command: ['./wait-for-iriswebapp.sh', 'app:8000', './iris-entrypoint.sh', 'iris-worker']
volumes:
- ./certificates/rootCA/irisRootCACert.pem:/etc/irisRootCACert.pem:ro
- ./certificates/:/home/iris/certificates/:ro
- ./certificates/ldap/:/iriswebapp/certificates/ldap/:ro
- iris-downloads:/home/iris/downloads
- user_templates:/home/iris/user_templates
- server_data:/home/iris/server_data
depends_on:
- "rabbitmq"
- "db"
- "app"
env_file:
- .env
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_ADMIN_USER
- POSTGRES_ADMIN_PASSWORD
- POSTGRES_SERVER
- POSTGRES_PORT
- DOCKERIZED
- IRIS_SECRET_KEY
- IRIS_SECURITY_PASSWORD_SALT
- IRIS_WORKER
networks:
- iris_backend
- shared-network
nginx:
build:
context: ./docker/nginx
args:
NGINX_CONF_GID: 1234
NGINX_CONF_FILE: nginx.conf
image: iriswebapp_nginx:v2.3.3
container_name: iriswebapp_nginx
environment:
- IRIS_UPSTREAM_SERVER
- IRIS_UPSTREAM_PORT
- INTERFACE_HTTPS_PORT
- SERVER_NAME
- CERT_FILENAME
- KEY_FILENAME
- IRIS_AUTHENTICATION_TYPE
networks:
- iris_frontend
- shared-network
ports:
- "${INTERFACE_HTTPS_PORT:-8443}:${INTERFACE_HTTPS_PORT:-8443}"
volumes:
- "./certificates/web_certificates/:/www/certs/:ro"
restart: on-failure:5
depends_on:
- "app"
volumes:
iris-downloads:
user_templates:
server_data:
db_data:
networks:
iris_backend:
name: iris_backend
iris_frontend:
name: iris_frontend
shared-network:
external: true

View File

@ -0,0 +1,22 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
FROM postgres:12-alpine
COPY create_user.sh /docker-entrypoint-initdb.d/10-create_user.sh

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -e
POSTGRES="psql --username ${POSTGRES_USER}"
echo "Creating database role: ${POSTGRES_ADMIN_USER}"
$POSTGRES <<-EOSQL
CREATE USER ${POSTGRES_ADMIN_USER} WITH CREATEDB SUPERUSER PASSWORD '${POSTGRES_ADMIN_PASSWORD}';
EOSQL

View File

@ -0,0 +1,49 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
FROM nginx:1.21.3
RUN apt-get update && apt-get install -y curl
# Used to pass protected files to the container through volumes
ARG NGINX_CONF_GID
ARG NGINX_CONF_FILE
RUN groupadd -g ${NGINX_CONF_GID} az-app-nginx-conf && usermod -a -G az-app-nginx-conf www-data
COPY entrypoint.sh /entrypoint.sh
RUN chmod 700 /entrypoint.sh
RUN chown www-data:www-data /entrypoint.sh
COPY ${NGINX_CONF_FILE} /etc/nginx/nginx.conf
# log
RUN touch /var/log/nginx/audit_platform_error.log && chown -R www-data:www-data /var/log/nginx/audit_platform_error.log
RUN touch /var/log/nginx/audit_platform_access.log && chown -R www-data:www-data /var/log/nginx/audit_platform_access.log
# Security
RUN touch /var/run/nginx.pid && chown -R www-data:www-data /var/run/nginx.pid /var/cache/nginx /etc/nginx/nginx.conf
RUN mkdir -p /www/certs/
USER www-data
HEALTHCHECK --interval=5s --timeout=3s CMD curl --fail -k https://127.0.0.1:${INTERFACE_HTTPS_PORT:-8443} || exit 1
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -0,0 +1,29 @@
#!/usr/bin/env bash
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
set -e
# envsubst will make a substitution on every $variable in a file, since the nginx file contains nginx variable like $host, we have to limit the substitution to this set
# otherwise, each nginx variable will be replaced by an empty string
envsubst '${INTERFACE_HTTPS_PORT} ${IRIS_UPSTREAM_SERVER} ${IRIS_UPSTREAM_PORT} ${SERVER_NAME} ${KEY_FILENAME} ${CERT_FILENAME}' < /etc/nginx/nginx.conf > /tmp/nginx.conf
cp /tmp/nginx.conf /etc/nginx/nginx.conf
rm /tmp/nginx.conf
exec nginx -g "daemon off;"

View File

@ -0,0 +1,161 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
worker_processes auto;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
map $request_uri $csp_header {
default "default-src 'self' https://analytics.dfir-iris.org; script-src 'self' 'unsafe-inline' https://analytics.dfir-iris.org; style-src 'self' 'unsafe-inline';";
}
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log debug;
server_tokens off;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
types_hash_max_size 2048;
types_hash_bucket_size 128;
proxy_headers_hash_max_size 2048;
proxy_headers_hash_bucket_size 128;
proxy_buffering on;
proxy_buffers 8 16k;
proxy_buffer_size 4k;
client_header_buffer_size 2k;
large_client_header_buffers 8 64k;
client_body_buffer_size 64k;
client_max_body_size 100M;
reset_timedout_connection on;
keepalive_timeout 90s;
client_body_timeout 90s;
send_timeout 90s;
client_header_timeout 90s;
fastcgi_read_timeout 90s;
# WORKING TIMEOUT FOR PROXY CONF
proxy_read_timeout 90s;
uwsgi_read_timeout 90s;
gzip off;
gzip_disable "MSIE [1-6]\.";
# FORWARD CLIENT IDENTITY TO SERVER
proxy_set_header HOST $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# FULLY DISABLE SERVER CACHE
add_header Last-Modified $date_gmt;
add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0';
if_modified_since off;
expires off;
etag off;
proxy_no_cache 1;
proxy_cache_bypass 1;
# SSL CONF, STRONG CIPHERS ONLY
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_certificate /www/certs/${CERT_FILENAME};
ssl_certificate_key /www/certs/${KEY_FILENAME};
ssl_ecdh_curve secp521r1:secp384r1:prime256v1;
ssl_buffer_size 4k;
# DISABLE SSL SESSION CACHE
ssl_session_tickets off;
ssl_session_cache none;
access_log /var/log/nginx/audit_platform_access.log main;
error_log /var/log/nginx/audit_platform_error.log debug;
server {
listen ${INTERFACE_HTTPS_PORT} ssl;
server_name ${SERVER_NAME};
root /www/data;
index index.html;
error_page 500 502 503 504 /50x.html;
add_header Content-Security-Policy $csp_header;
# SECURITY HEADERS
add_header X-XSS-Protection "1; mode=block";
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
# max-age = 31536000s = 1 year
add_header Strict-Transport-Security "max-age=31536000: includeSubDomains" always;
add_header Front-End-Https on;
location / {
proxy_pass http://${IRIS_UPSTREAM_SERVER}:${IRIS_UPSTREAM_PORT};
location ~ ^/(manage/templates/add|manage/cases/upload_files) {
keepalive_timeout 10m;
client_body_timeout 10m;
send_timeout 10m;
proxy_read_timeout 10m;
client_max_body_size 0M;
proxy_request_buffering off;
proxy_pass http://${IRIS_UPSTREAM_SERVER}:${IRIS_UPSTREAM_PORT};
}
location ~ ^/(datastore/file/add|datastore/file/add-interactive) {
keepalive_timeout 10m;
client_body_timeout 10m;
send_timeout 10m;
proxy_read_timeout 10m;
client_max_body_size 0M;
proxy_request_buffering off;
proxy_pass http://${IRIS_UPSTREAM_SERVER}:${IRIS_UPSTREAM_PORT};
}
}
location /socket.io {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_pass http://${IRIS_UPSTREAM_SERVER}:${IRIS_UPSTREAM_PORT}/socket.io;
}
location = /50x.html {
root /usr/share/nginx/html;
}
}
}

View File

@ -0,0 +1,74 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#################
# COMPILE IMAGE #
#################
FROM python:3.9 AS compile-image
RUN apt-get update
RUN python -m venv /opt/venv
# Make sure we use the virtualenv:
ENV PATH="/opt/venv/bin:$PATH"
COPY source/dependencies /dependencies
COPY source/requirements.txt /
RUN pip3 install -r requirements.txt
###############
# BUILD IMAGE #
###############
FROM python:3.9 as iriswebapp
ENV PYTHONUNBUFFERED=1
COPY --from=compile-image /opt/venv /opt/venv
# Make sure we use the virtualenv:
ENV PATH="/opt/venv/bin:$PATH"
# Define specific admin password at creation
#ENV IRIS_ADM_PASSWORD="MySuperFirstPasswordIWant"
RUN apt update
RUN apt install -y p7zip-full pgp rsync postgresql-client
RUN mkdir /iriswebapp/
RUN mkdir -p /home/iris/certificates
RUN mkdir -p /home/iris/user_templates
RUN mkdir -p /home/iris/server_data
RUN mkdir -p /home/iris/server_data/backup
RUN mkdir -p /home/iris/server_data/updates
RUN mkdir -p /home/iris/server_data/custom_assets
RUN mkdir -p /home/iris/server_data/datastore
WORKDIR /iriswebapp
COPY docker/webApp/iris-entrypoint.sh .
COPY docker/webApp/wait-for-iriswebapp.sh .
COPY ./source .
# Add execution right to binaries needed by evtx2splunk for iris_evtx module
RUN chmod +x /iriswebapp/dependencies/evtxdump_binaries/linux/x64/fd
RUN chmod +x /iriswebapp/dependencies/evtxdump_binaries/linux/x64/evtx_dump
RUN chmod +x iris-entrypoint.sh
RUN chmod +x wait-for-iriswebapp.sh
#ENTRYPOINT [ "./iris-entrypoint.sh" ]

View File

@ -0,0 +1,77 @@
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#################
# COMPILE IMAGE #
#################
FROM python:3.9 AS compile-image
RUN apt-get update
RUN python -m venv /opt/venv
# Make sure we use the virtualenv:
ENV PATH="/opt/venv/bin:$PATH"
COPY source/dependencies /dependencies
COPY source/requirements.txt /
RUN pip3 install -r requirements.txt
###############
# BUILD IMAGE #
###############
FROM python:3.9 as iriswebapp
ENV PYTHONUNBUFFERED=1
COPY --from=compile-image /opt/venv /opt/venv
# Make sure we use the virtualenv:
ENV PATH="/opt/venv/bin:$PATH"
# Define specific admin password at creation
#ENV IRIS_ADM_PASSWORD="MySuperFirstPasswordIWant"
RUN apt update
RUN apt install -y p7zip-full pgp rsync postgresql-client
RUN mkdir /iriswebapp/
RUN mkdir -p /home/iris/certificates
RUN mkdir -p /home/iris/user_templates
RUN mkdir -p /home/iris/server_data
RUN mkdir -p /home/iris/server_data/backup
RUN mkdir -p /home/iris/server_data/updates
RUN mkdir -p /home/iris/server_data/custom_assets
RUN mkdir -p /home/iris/server_data/datastore
WORKDIR /iriswebapp
COPY docker/webApp/iris-entrypoint.sh .
COPY docker/webApp/wait-for-iriswebapp.sh .
COPY ../../certificates /home/iris/certificates/
COPY ../../certificates/rootCA/irisRootCACert.pem /etc/irisRootCACert.pem
COPY ../../certificates/ldap/ /iriswebapp/certificates/ldap/
COPY ./source .
# Add execution right to binaries needed by evtx2splunk for iris_evtx module
RUN chmod +x /iriswebapp/dependencies/evtxdump_binaries/linux/x64/fd
RUN chmod +x /iriswebapp/dependencies/evtxdump_binaries/linux/x64/evtx_dump
RUN chmod +x iris-entrypoint.sh
RUN chmod +x wait-for-iriswebapp.sh
#ENTRYPOINT [ "./iris-entrypoint.sh" ]

View File

@ -0,0 +1,35 @@
#!/bin/bash
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
target=${1-:app}
printf "Running ${target} ...\n"
if [[ "${target}" == iris-worker ]] ; then
celery -A app.celery worker -E -B -l INFO &
else
gunicorn app:app --worker-class eventlet --bind 0.0.0.0:8000 --timeout 180 --worker-connections 1000 --log-level=info &
fi
while true; do sleep 2; done

View File

@ -0,0 +1,34 @@
#!/bin/sh
# wait-for-iriswebapp.sh
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
set -e
host="$1"
shift
sleep 1
until curl "$host" >/dev/null 2>&1; do
>&2 echo "IRISwebapp is unavailable - sleeping"
sleep 1
done
>&2 echo "IRISwebapp is up - executing command"
exec "$@"

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 MiB

View File

@ -0,0 +1,32 @@
#!/usr/bin/env python3
#
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
bind = 'unix:sock'
workers = 4
accesslog = '-'
loglevel = 'warning'
errorlog = '/var/log/iris/errors.log'
timeout = 3000
def worker_exit(server, worker):
sys.exit(4)

View File

@ -0,0 +1,136 @@
#!/usr/bin/env python3
#
# IRIS Source Code
# Copyright (C) 2021 - Airbus CyberSecurity (SAS)
# ir@cyberactionlab.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import collections
import json
import logging as logger
import os
import urllib.parse
from flask import Flask
from flask import session
from flask_bcrypt import Bcrypt
from flask_caching import Cache
from flask_login import LoginManager
from flask_marshmallow import Marshmallow
from flask_socketio import SocketIO, Namespace
from flask_sqlalchemy import SQLAlchemy
from functools import partial
from sqlalchemy_imageattach.stores.fs import HttpExposedFileSystemStore
from werkzeug.middleware.proxy_fix import ProxyFix
from app.flask_dropzone import Dropzone
from app.iris_engine.tasker.celery import make_celery
class ReverseProxied(object):
def __init__(self, flask_app):
self._app = flask_app
def __call__(self, environ, start_response):
scheme = environ.get('HTTP_X_FORWARDED_PROTO', None)
if scheme is not None:
environ['wsgi.url_scheme'] = scheme
return self._app(environ, start_response)
class AlertsNamespace(Namespace):
pass
APP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_PATH = os.path.join(APP_PATH, 'templates/')
# Grabs the folder where the script runs.
basedir = os.path.abspath(os.path.dirname(__file__))
LOG_FORMAT = '%(asctime)s :: %(levelname)s :: %(module)s :: %(funcName)s :: %(message)s'
LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
logger.basicConfig(level=logger.INFO, format=LOG_FORMAT, datefmt=LOG_TIME_FORMAT)
app = Flask(__name__)
def ac_current_user_has_permission(*permissions):
"""
Return True if current user has permission
"""
for permission in permissions:
if session['permissions'] & permission.value == permission.value:
return True
return False
def ac_current_user_has_manage_perms():
if session['permissions'] != 1 and session['permissions'] & 0x1FFFFF0 != 0:
return True
return False
app.jinja_env.filters['unquote'] = lambda u: urllib.parse.unquote(u)
app.jinja_env.filters['tojsonsafe'] = lambda u: json.dumps(u, indent=4, ensure_ascii=False)
app.jinja_env.filters['tojsonindent'] = lambda u: json.dumps(u, indent=4)
app.jinja_env.filters['escape_dots'] = lambda u: u.replace('.', '[.]')
app.jinja_env.globals.update(user_has_perm=ac_current_user_has_permission)
app.jinja_env.globals.update(user_has_manage_perms=ac_current_user_has_manage_perms)
app.config.from_object('app.configuration.Config')
cache = Cache(app)
SQLALCHEMY_ENGINE_OPTIONS = {
"json_deserializer": partial(json.loads, object_pairs_hook=collections.OrderedDict)
}
db = SQLAlchemy(app, engine_options=SQLALCHEMY_ENGINE_OPTIONS) # flask-sqlalchemy
bc = Bcrypt(app) # flask-bcrypt
lm = LoginManager() # flask-loginmanager
lm.init_app(app) # init the login manager
ma = Marshmallow(app) # Init marshmallow
dropzone = Dropzone(app)
celery = make_celery(app)
store = HttpExposedFileSystemStore(
path='images',
prefix='/static/assets/images/'
)
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1)
app.wsgi_app = store.wsgi_middleware(app.wsgi_app)
socket_io = SocketIO(app, cors_allowed_origins="*")
alerts_namespace = AlertsNamespace('/alerts')
socket_io.on_namespace(alerts_namespace)
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
from app import views

View File

@ -0,0 +1,103 @@
[alembic]
# path to migration scripts
script_location = app/alembic
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python-dateutil library that can be
# installed by adding `alembic[tz]` to the pip requirements
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator"
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. Valid values are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
version_path_separator = os # default: use os.pathsep
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
#sqlalchemy.url = driver://user:pass@localhost/dbname
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,app,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[logger_app]
level = INFO
handlers =
qualname = app
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(asctime)s :: %(levelname)s :: %(module)s :: %(funcName)s :: %(message)s
datefmt = %Y-%m-%d %H:%M:%S

View File

@ -0,0 +1 @@
Generic single-database configuration.

View File

@ -0,0 +1,27 @@
from alembic import op
from sqlalchemy import engine_from_config
from sqlalchemy.engine import reflection
def _table_has_column(table, column):
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix='sqlalchemy.')
insp = reflection.Inspector.from_engine(engine)
has_column = False
for col in insp.get_columns(table):
if column != col['name']:
continue
has_column = True
return has_column
def _has_table(table_name):
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix="sqlalchemy."
)
inspector = reflection.Inspector.from_engine(engine)
tables = inspector.get_table_names()
return table_name in tables

View File

@ -0,0 +1,82 @@
from alembic import context
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
import os
os.environ["ALEMBIC"] = "1"
from app.configuration import SQLALCHEMY_BASE_ADMIN_URI, PG_DB_
config.set_main_option('sqlalchemy.url', SQLALCHEMY_BASE_ADMIN_URI + PG_DB_)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
#with context.begin_transaction(): -- Fixes stuck transaction. Need more info on that
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -0,0 +1,24 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@ -0,0 +1,31 @@
"""Add prevent post-init to register case objects again during boot
Revision ID: 00b43bc4e8ac
Revises: 2604f6962838
Create Date: 2023-05-05 18:43:07.236041
"""
from alembic import op
import sqlalchemy as sa
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '00b43bc4e8ac'
down_revision = '2604f6962838'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('server_settings', 'prevent_post_objects_repush'):
op.add_column('server_settings',
sa.Column('prevent_post_objects_repush', sa.Boolean(), default=False)
)
pass
def downgrade():
if _table_has_column('server_settings', 'prevent_post_objects_repush'):
op.drop_column('server_settings', 'prevent_post_objects_repush')
pass

View File

@ -0,0 +1,63 @@
"""Add tags to assets
Revision ID: 0db700644a4f
Revises: 6a3b3b627d45
Create Date: 2022-01-06 13:47:12.648707
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
from app.alembic.alembic_utils import _table_has_column
revision = '0db700644a4f'
down_revision = '6a3b3b627d45'
branch_labels = None
depends_on = None
def upgrade():
# Now issue changes on existing tables and migrate Asset tags
# Add column asset_tags to CaseAssets if not existing
if not _table_has_column('case_assets', 'asset_tags'):
op.add_column('case_assets',
sa.Column('asset_tags', sa.Text)
)
if _table_has_column('case_assets', 'asset_tags'):
# Set schema and make migration of data
t_case_assets = sa.Table(
'case_assets',
sa.MetaData(),
sa.Column('asset_id', sa.Integer, primary_key=True),
sa.Column('asset_name', sa.Text),
sa.Column('asset_description', sa.Text),
sa.Column('asset_domain', sa.Text),
sa.Column('asset_ip', sa.Text),
sa.Column('asset_info', sa.Text),
sa.Column('asset_compromised', sa.Boolean),
sa.Column('asset_type_id', sa.ForeignKey('asset_type.asset_id')),
sa.Column('asset_tags', sa.Text),
sa.Column('case_id', sa.ForeignKey('cases.case_id')),
sa.Column('date_added', sa.DateTime),
sa.Column('date_update', sa.DateTime),
sa.Column('user_id', sa.ForeignKey('user.id')),
sa.Column('analysis_status_id', sa.ForeignKey('analysis_status.id'))
)
# Migrate existing Assets
conn = op.get_bind()
res = conn.execute("SELECT asset_id from case_assets WHERE asset_tags IS NULL;")
results = res.fetchall()
if results:
for res in results:
conn.execute(t_case_assets.update().where(t_case_assets.c.asset_id == res[0]).values(
asset_tags=''
))
def downgrade():
pass

View File

@ -0,0 +1,45 @@
"""Add module types
Revision ID: 10a7616f3cc7
Revises: 874ba5e5da44
Create Date: 2022-02-04 07:46:32.382640
"""
import sqlalchemy as sa
from alembic import op
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '10a7616f3cc7'
down_revision = '874ba5e5da44'
branch_labels = None
depends_on = None
def upgrade():
# Issue changes on existing user activities table and migrate existing rows
# Add column is_from_api to user_activities if not existing and set existing ones to false
if not _table_has_column('iris_module', 'module_type'):
op.add_column('iris_module',
sa.Column('module_type', sa.Text)
)
t_ua = sa.Table(
'iris_module',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('module_type', sa.Text)
)
conn = op.get_bind()
conn.execute(t_ua.update().values(
module_type='pipeline'
))
pass
def downgrade():
pass

View File

@ -0,0 +1,32 @@
"""Add deletion confirmation option
Revision ID: 11e066542a88
Revises: 20447ecb2245
Create Date: 2022-09-25 08:51:13.383431
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from sqlalchemy import Boolean
from app.alembic.alembic_utils import _table_has_column
revision = '11e066542a88'
down_revision = '20447ecb2245'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('user', 'has_deletion_confirmation'):
op.add_column('user',
sa.Column('has_deletion_confirmation', Boolean(), nullable=False, server_default='false')
)
pass
def downgrade():
pass

View File

@ -0,0 +1,51 @@
"""Add customer extended fields
Revision ID: 1df4adfa3160
Revises: a3eb60654ec4
Create Date: 2022-11-11 19:23:30.355618
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from app.alembic.alembic_utils import _table_has_column
revision = '1df4adfa3160'
down_revision = 'a3eb60654ec4'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('client', 'description'):
op.add_column('client',
sa.Column('description', sa.Text())
)
if not _table_has_column('client', 'sla'):
op.add_column('client',
sa.Column('sla', sa.Text())
)
if not _table_has_column('client', 'creation_date'):
op.add_column('client',
sa.Column('creation_date', sa.DateTime())
)
if not _table_has_column('client', 'last_update_date'):
op.add_column('client',
sa.Column('last_update_date', sa.DateTime())
)
if not _table_has_column('client', 'created_by'):
op.add_column('client',
sa.Column('created_by', sa.BigInteger(), sa.ForeignKey('user.id'), nullable=True)
)
pass
def downgrade():
pass

View File

@ -0,0 +1,183 @@
"""Objects UUID field
Revision ID: 20447ecb2245
Revises: ad4e0cd17597
Create Date: 2022-09-23 21:07:20.007874
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from sqlalchemy import text
from sqlalchemy.dialects.postgresql import UUID
from app.alembic.alembic_utils import _table_has_column
revision = '20447ecb2245'
down_revision = 'ad4e0cd17597'
branch_labels = None
depends_on = None
def upgrade():
# ---- Cases ----
op.alter_column('cases', 'case_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('cases', 'case_uuid'):
op.add_column('cases',
sa.Column('case_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- Events ----
op.alter_column('cases_events', 'event_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('cases_events', 'event_uuid'):
op.add_column('cases_events',
sa.Column('event_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- Clients ----
op.alter_column('client', 'client_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('client', 'client_uuid'):
op.add_column('client',
sa.Column('client_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- Case assets ----
op.alter_column('case_assets', 'asset_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('case_assets', 'asset_uuid'):
op.add_column('case_assets',
sa.Column('asset_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- Case objects states ----
op.alter_column('object_state', 'object_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
# ---- Case event IOC ----
op.alter_column('case_events_ioc', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
# ---- Case event assets ----
op.alter_column('case_events_assets', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
# ---- IOC ----
op.alter_column('ioc', 'ioc_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('ioc', 'ioc_uuid'):
op.add_column('ioc',
sa.Column('ioc_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- Notes ----
op.alter_column('notes', 'note_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('notes', 'note_uuid'):
op.add_column('notes',
sa.Column('note_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- Notes group ----
op.alter_column('notes_group', 'group_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('notes_group', 'group_uuid'):
op.add_column('notes_group',
sa.Column('group_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- Notes group link ----
op.alter_column('notes_group_link', 'link_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
# ---- case received files ----
op.alter_column('case_received_file', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('case_received_file', 'file_uuid'):
op.add_column('case_received_file',
sa.Column('file_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- case tasks ----
op.alter_column('case_tasks', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('case_tasks', 'task_uuid'):
op.add_column('case_tasks',
sa.Column('task_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- global tasks ----
op.alter_column('global_tasks', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
if not _table_has_column('global_tasks', 'task_uuid'):
op.add_column('global_tasks',
sa.Column('task_uuid', UUID(as_uuid=True), server_default=text("gen_random_uuid()"),
nullable=False)
)
# ---- user activity ----
op.alter_column('user_activity', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
# ---- Iris Hooks ----
op.alter_column('iris_module_hooks', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
pass
def downgrade():
pass

View File

@ -0,0 +1,54 @@
"""Add case state
Revision ID: 2604f6962838
Revises: db93d5c4c0aa
Create Date: 2023-05-05 11:16:19.997383
"""
from alembic import op
import sqlalchemy as sa
from app.models.cases import CaseState
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '2604f6962838'
down_revision = 'db93d5c4c0aa'
branch_labels = None
depends_on = None
def upgrade():
# Add the state_id column to the cases table
if not _table_has_column('cases', 'state_id'):
state_id = 1
state = CaseState.query.filter_by(state_id=state_id).first()
if state is None:
state = CaseState()
state.id=state_id
state.state_name='Unspecified'
state.state_description='Unspecified'
state.protected=True
op.bulk_insert(CaseState.__table__, [state.__dict__])
op.add_column(
'cases',
sa.Column('state_id', sa.Integer, sa.ForeignKey('case_state.state_id'), nullable=True,
server_default=sa.text("1"))
)
# Set the default value for the state_id column
op.execute("UPDATE cases SET state_id = 1")
# Create a foreign key constraint between cases.state_id and case_state.state_id
op.create_foreign_key(
None, 'cases', 'case_state', ['state_id'], ['state_id']
)
def downgrade():
pass

View File

@ -0,0 +1,38 @@
"""Adding IOC and assets enrichments
Revision ID: 2a4a8330b908
Revises: f727badcc4e1
Create Date: 2023-04-26 08:42:19.397146
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '2a4a8330b908'
down_revision = 'f727badcc4e1'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('case_assets', 'asset_enrichment'):
# Add asset_enrichment column to case_assets
op.add_column('case_assets', sa.Column('asset_enrichment', JSONB, nullable=True))
if not _table_has_column('ioc', 'ioc_enrichment'):
# Add ioc_enrichment column to ioc
op.add_column('ioc', sa.Column('ioc_enrichment', JSONB, nullable=True))
def downgrade():
if not _table_has_column('case_assets', 'asset_enrichment'):
# Remove asset_enrichment column from case_assets
op.drop_column('case_assets', 'asset_enrichment')
if _table_has_column('ioc', 'ioc_enrichment'):
# Remove ioc_enrichment column from ioc
op.drop_column('ioc', 'ioc_enrichment')

View File

@ -0,0 +1,43 @@
"""Add objects attributes
Revision ID: 2df770a4989c
Revises: 10a7616f3cc7
Create Date: 2022-02-11 20:13:14.365469
"""
import sqlalchemy as sa
from alembic import op
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '2df770a4989c'
down_revision = '10a7616f3cc7'
branch_labels = None
depends_on = None
def upgrade():
tables = ['ioc', 'case_assets', 'case_received_file', 'case_tasks', 'notes', 'cases_events', 'cases', 'client']
for table in tables:
if not _table_has_column(table, 'custom_attributes'):
op.add_column(table,
sa.Column('custom_attributes', sa.JSON)
)
t_ua = sa.Table(
table,
sa.MetaData(),
sa.Column('custom_attributes', sa.JSON)
)
conn = op.get_bind()
conn.execute(t_ua.update().values(
custom_attributes={}
))
pass
def downgrade():
pass

View File

@ -0,0 +1,31 @@
"""Add event flag
Revision ID: 3204e9116233
Revises: 11e066542a88
Create Date: 2022-10-02 13:44:36.996070
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from app.alembic.alembic_utils import _table_has_column
revision = '3204e9116233'
down_revision = '11e066542a88'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('cases_events', 'event_is_flagged'):
op.add_column('cases_events',
sa.Column('event_is_flagged', sa.Boolean, default=False)
)
pass
def downgrade():
pass

View File

@ -0,0 +1,28 @@
"""Rename opened to open
Revision ID: 3a4d4f15bd69
Revises: 65168cb6cc90
Create Date: 2023-10-05 11:36:45.246779
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3a4d4f15bd69'
down_revision = '65168cb6cc90'
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"UPDATE case_state SET state_name='Open' WHERE state_name='Opened'"
)
def downgrade():
op.execute(
"UPDATE case_state SET state_name='Opened' WHERE state_name='Open'"
)

View File

@ -0,0 +1,51 @@
"""Add compromise status to assets
Revision ID: 4ecdfcb34f7c
Revises: a929ef458490
Create Date: 2022-11-26 17:06:33.061363
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
from app.alembic.alembic_utils import _table_has_column
from app.models import CompromiseStatus
revision = '4ecdfcb34f7c'
down_revision = 'a929ef458490'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('case_assets', 'asset_compromise_status_id'):
op.add_column('case_assets',
sa.Column('asset_compromise_status_id',
sa.Integer(),
nullable=True))
# Set schema and make migration of data
t_assets = sa.Table(
'case_assets',
sa.MetaData(),
sa.Column('asset_id', sa.BigInteger, primary_key=True),
sa.Column('asset_compromise_status_id', sa.Integer, nullable=True),
sa.Column('asset_compromised', sa.Boolean, nullable=True)
)
conn = op.get_bind()
conn.execute(t_assets.update().values(
asset_compromise_status_id=CompromiseStatus.compromised.value
).where(t_assets.c.asset_compromised == True))
conn.execute(t_assets.update().values(
asset_compromise_status_id=CompromiseStatus.not_compromised.value
).where(t_assets.c.asset_compromised == False))
op.drop_column('case_assets', 'asset_compromised')
pass
def downgrade():
pass

View File

@ -0,0 +1,75 @@
"""Add uniqueness to Tags table
Revision ID: 50f28953a485
Revises: c959c298ca00
Create Date: 2023-04-06 16:17:40.043545
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import text
# revision identifiers, used by Alembic.
revision = '50f28953a485'
down_revision = 'c959c298ca00'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
# Update the CaseTags table to point to the first tag with the same title
conn.execute(text("""
WITH duplicates AS (
SELECT
MIN(id) as min_id,
tag_title
FROM
tags
GROUP BY
tag_title
HAVING
COUNT(*) > 1
),
duplicate_tags AS (
SELECT
id,
tag_title
FROM
tags
WHERE
tag_title IN (SELECT tag_title FROM duplicates)
)
UPDATE
case_tags
SET
tag_id = duplicates.min_id
FROM
duplicates,
duplicate_tags
WHERE
case_tags.tag_id = duplicate_tags.id
AND duplicate_tags.tag_title = duplicates.tag_title
AND duplicate_tags.id <> duplicates.min_id;
"""))
# Remove duplicates in the tags table
conn.execute(text("""
DELETE FROM tags
WHERE id IN (
SELECT id FROM (
SELECT id, ROW_NUMBER()
OVER (PARTITION BY tag_title ORDER BY id) AS rnum
FROM tags) t
WHERE t.rnum > 1);
"""))
# Add the unique constraint to the tag_title column
op.create_unique_constraint(None, 'tags', ['tag_title'])
pass
def downgrade():
pass

View File

@ -0,0 +1,39 @@
"""Reviewer in case
Revision ID: 65168cb6cc90
Revises: e33dd011fb87
Create Date: 2023-07-09 09:01:39.243870
"""
from alembic import op
import sqlalchemy as sa
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '65168cb6cc90'
down_revision = 'e33dd011fb87'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('cases', 'reviewer_id'):
op.add_column('cases',
sa.Column('reviewer_id', sa.Integer(), nullable=True)
)
op.create_foreign_key('fkey_cases_reviewer_id', 'cases', 'user', ['reviewer_id'], ['id'])
if not _table_has_column('cases', 'review_status_id'):
op.add_column('cases',
sa.Column('review_status_id', sa.Integer(), nullable=True)
)
op.create_foreign_key('fkey_cases_review_status_id', 'cases', 'review_status', ['review_status_id'], ['id'])
pass
def downgrade():
pass

View File

@ -0,0 +1,79 @@
"""Add IOC type
Revision ID: 6a3b3b627d45
Revises:
Create Date: 2022-01-01 23:40:35.283005
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
from app.alembic.alembic_utils import _table_has_column
revision = '6a3b3b627d45'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# IOC types is created by post init if not existing
# Now issue changes on existing tables and migrate IOC types
# Add column ioc_type_id to IOC if not existing
if not _table_has_column('ioc', 'ioc_type_id'):
op.add_column('ioc',
sa.Column('ioc_type_id', sa.Integer, sa.ForeignKey('ioc_type.type_id'))
)
# Add the foreign key of ioc_type to ioc
op.create_foreign_key(
constraint_name='ioc_ioc_type_id',
source_table="ioc",
referent_table="ioc_type",
local_cols=["ioc_type_id"],
remote_cols=["type_id"])
if _table_has_column('ioc', 'ioc_type'):
# Set schema and make migration of data
t_ioc = sa.Table(
'ioc',
sa.MetaData(),
sa.Column('ioc_id', sa.Integer, primary_key=True),
sa.Column('ioc_value', sa.Text),
sa.Column('ioc_type', sa.Unicode(length=50)),
sa.Column('ioc_type_id', sa.ForeignKey('ioc_type.type_id')),
sa.Column('ioc_tags', sa.Text),
sa.Column('user_id', sa.ForeignKey('user.id')),
sa.Column('ioc_misp', sa.Text),
sa.Column('ioc_tlp_id', sa.ForeignKey('tlp.tlp_id'))
)
to_update = [('Domain', 'domain'), ('IP', 'ip-any'), ('Hash', 'other'), ('File', 'filename'),
('Path', 'file-path'), ('Account', 'account'), ("Other", 'other')]
# Migrate existing IOCs
for src_up, dst_up in to_update:
conn = op.get_bind()
res = conn.execute(f"select ioc_id from ioc where ioc_type = '{src_up}';")
results = res.fetchall()
res = conn.execute(f"select type_id from ioc_type where type_name = '{dst_up}';")
e_info = res.fetchall()
if e_info:
domain_id = e_info[0][0]
for res in results:
conn.execute(t_ioc.update().where(t_ioc.c.ioc_id == res[0]).values(
ioc_type_id=domain_id
))
op.drop_column(
table_name='ioc',
column_name='ioc_type'
)
pass
def downgrade():
pass

View File

@ -0,0 +1,58 @@
"""Add server settings updates info
Revision ID: 79a9a54e8f9d
Revises: ff917e2ab02e
Create Date: 2022-05-05 18:39:19.027828
"""
import sqlalchemy as sa
from alembic import op
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '79a9a54e8f9d'
down_revision = 'ff917e2ab02e'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('server_settings', 'has_updates_available'):
op.add_column('server_settings',
sa.Column('has_updates_available', sa.Boolean)
)
t_ua = sa.Table(
'server_settings',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('has_updates_available', sa.Boolean)
)
conn = op.get_bind()
conn.execute(t_ua.update().values(
has_updates_available=False
))
if not _table_has_column('server_settings', 'enable_updates_check'):
op.add_column('server_settings',
sa.Column('enable_updates_check', sa.Boolean)
)
t_ua = sa.Table(
'server_settings',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('enable_updates_check', sa.Boolean)
)
conn = op.get_bind()
conn.execute(t_ua.update().values(
enable_updates_check=True
))
pass
def downgrade():
pass

View File

@ -0,0 +1,28 @@
"""Migrate user int to big int
Revision ID: 7cc588444b79
Revises: 92ecbf0f6d10
Create Date: 2022-06-14 08:28:59.027411
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7cc588444b79'
down_revision = '92ecbf0f6d10'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('user', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
pass
def downgrade():
pass

View File

@ -0,0 +1,44 @@
"""Add task log api field
Revision ID: 874ba5e5da44
Revises: c773a35c280f
Create Date: 2022-02-03 16:22:37.506019
"""
import sqlalchemy as sa
from alembic import op
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '874ba5e5da44'
down_revision = 'c773a35c280f'
branch_labels = None
depends_on = None
def upgrade():
# Issue changes on existing user activities table and migrate existing rows
# Add column is_from_api to user_activities if not existing and set existing ones to false
if not _table_has_column('user_activity', 'is_from_api'):
op.add_column('user_activity',
sa.Column('is_from_api', sa.Boolean)
)
t_ua = sa.Table(
'user_activity',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('is_from_api', sa.Boolean)
)
conn = op.get_bind()
conn.execute(t_ua.update().values(
is_from_api=False
))
pass
def downgrade():
pass

View File

@ -0,0 +1,46 @@
"""Modifying case tasks to remove assignee id for instead, adding a table named task_assignee
Revision ID: 875edc4adb40
Revises: fcc375ed37d1
Create Date: 2022-07-17 14:57:22.809977
"""
from alembic import op
from app.alembic.alembic_utils import _has_table
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '875edc4adb40'
down_revision = 'fcc375ed37d1'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
# Get all users with their roles
if _has_table("case_tasks"):
if _table_has_column("case_tasks", "task_assignee_id"):
res = conn.execute(f"select id, task_assignee_id from case_tasks")
results_tasks = res.fetchall()
for task in results_tasks:
task_id = task[0]
user_id = task[1]
if not user_id:
user_id = 1
# Migrate assignees to task_assignee
conn.execute(f"insert into task_assignee (user_id, task_id) values ({user_id}, {task_id}) "
f"on conflict do nothing;")
op.drop_column(
table_name='case_tasks',
column_name='task_assignee_id'
)
def downgrade():
pass

View File

@ -0,0 +1,42 @@
"""Add user external ID
Revision ID: 92ecbf0f6d10
Revises: cd519d2d24df
Create Date: 2022-06-13 08:59:04.860887
"""
import sqlalchemy as sa
from alembic import op
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = '92ecbf0f6d10'
down_revision = 'cd519d2d24df'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('user', 'external_id'):
op.add_column('user',
sa.Column('external_id', sa.Text)
)
t_ua = sa.Table(
'user',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('external_id', sa.Text)
)
conn = op.get_bind()
conn.execute(t_ua.update().values(
external_id=None
))
pass
def downgrade():
pass

View File

@ -0,0 +1,34 @@
"""Add cases status
Revision ID: a3eb60654ec4
Revises: 3204e9116233
Create Date: 2022-11-10 07:52:22.502834
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Integer
from sqlalchemy import text
from app.alembic.alembic_utils import _table_has_column
# revision identifiers, used by Alembic.
revision = 'a3eb60654ec4'
down_revision = '3204e9116233'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('cases', 'status_id'):
op.add_column('cases',
sa.Column('status_id', Integer, server_default=text("0"),
nullable=False)
)
pass
def downgrade():
pass

View File

@ -0,0 +1,42 @@
"""Add activity no display field
Revision ID: a929ef458490
Revises: 1df4adfa3160
Create Date: 2022-11-21 15:26:49.088050
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from app.alembic.alembic_utils import _table_has_column
revision = 'a929ef458490'
down_revision = '1df4adfa3160'
branch_labels = None
depends_on = None
def upgrade():
if not _table_has_column('user_activity', 'display_in_ui'):
op.add_column('user_activity',
sa.Column('display_in_ui', sa.Boolean, default=True)
)
t_ua = sa.Table(
'user_activity',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('display_in_ui', sa.Boolean)
)
conn = op.get_bind()
conn.execute(t_ua.update().values(
display_in_ui=True
))
pass
def downgrade():
pass

Some files were not shown because too many files have changed in this diff Show More