Skip to content
This repository has been archived by the owner on Aug 1, 2022. It is now read-only.

PMM-8306 migrate ami ova to docker #343

Open
wants to merge 32 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 26 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ pmm-az:
packer build -only azure-arm packer/pmm.json

pmm2-ovf: fetch
packer build -only virtualbox-ovf packer/pmm2.json
packer build -only pmm2.virtualbox-ovf.image packer/pmm2.pkr.hcl

pmm2-digitalocean:
packer build -only digitalocean -var 'single_disk=true' packer/pmm2.json
Expand Down
6 changes: 6 additions & 0 deletions packer/ansible/files/pmm/daemon.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"experimental": true,
"max-concurrent-downloads": 9,
"max-concurrent-uploads": 15,
"storage-driver": "overlay2"
}
206 changes: 206 additions & 0 deletions packer/ansible/pmm2.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
---
- name: Create PMM2 image
hosts: default
become: true
roles:
- lvm-init
tasks:
- name: Install packages
package:
name:
- docker
- python3-pip
- python3
- lvm2
- jq

- name: Install docker Python API
pip:
name: docker

- name: Enable Docker service
systemd:
name: docker.service
state: started
enabled: yes

- name: Add admin user
user:
name: admin
comment: PMM User
groups: wheel,adm,systemd-journal,docker
shell: /bin/bash

- name: Add sudo for admin user
copy:
content: |
admin ALL=(ALL) NOPASSWD: ALL
dest: /etc/sudoers.d/90-admin-user
mode: 0440

- name: change cloud user | Change cloud user
replace:
dest: /etc/cloud/cloud.cfg
regexp: 'name: ec2-user'
replace: 'name: admin'

- name: Turn off swap
sysctl:
name: vm.swappiness
value: 0
state: present

- name: Increase dirty ratio
sysctl:
name: vm.dirty_ratio
value: 80
state: present

- name: Decrease dirty_background_ratio
sysctl:
name: vm.dirty_background_ratio
value: 5

- name: Increase dirty_expire_centisecs
sysctl:
name: vm.dirty_expire_centisecs
value: 12000 #120 sec

- name: Increase net.core.somaxconn
sysctl:
name: net.core.somaxconn
value: 1024

- name: Increase net.core.netdev_max_backlog
sysctl:
name: net.core.netdev_max_backlog
value: 5000

- name: Increase net.core.rmem_max
sysctl:
name: net.core.rmem_max
value: 16777216

- name: Increase net.core.wmem_max
sysctl:
name: net.core.wmem_max
value: 16777216

- name: Increase net.ipv4.tcp_wmem
sysctl:
name: net.ipv4.tcp_wmem
value: 4096 12582912 16777216

- name: Increase net.ipv4.tcp_rmem
sysctl:
name: net.ipv4.tcp_rmem
value: 4096 12582912 16777216

- name: Increase net.ipv4.tcp_max_syn_backlog
sysctl:
name: net.ipv4.tcp_max_syn_backlog
value: 8192

- name: Disable net.ipv4.tcp_slow_start_after_idle
sysctl:
name: net.ipv4.tcp_slow_start_after_idle
value: 0

- name: Enable net.ipv4.tcp_tw_reuse
sysctl:
name: net.ipv4.tcp_tw_reuse
value: 1

- name: Change net.ipv4.ip_local_port_range
sysctl:
name: net.ipv4.ip_local_port_range
value: 10240 65535

- name: Change TCP Congestion Control Algorithm (net.ipv4.tcp_congestion_control)
sysctl:
name: net.ipv4.tcp_congestion_control
value: bbr

- name: Change net.ipv4.tcp_syn_retries
sysctl:
name: net.ipv4.tcp_syn_retries
value: 2

- name: Change BBR algoritm
sysctl:
name: net.core.default_qdisc
value: fq

- name: Increase kernel.perf_event_max_stack
sysctl:
name: kernel.perf_event_max_stack
value: 1023

- name: Increase fs.xfs.xfssyncd_centisecs
sysctl:
name: fs.xfs.xfssyncd_centisecs
value: 9000

- name: Increase fs.inotify.max_user_watches
sysctl:
name: fs.inotify.max_user_watches
value: 1048576

- name: pull the PMM image
docker_image:
name: "{{ pmm_server_image_name }}"
source: pull

- name: get srv directory path
shell: docker image inspect {{ pmm_server_image_name }} | jq -r .[0].GraphDriver.Data.UpperDir
register: image_path

- name:
copy:
src: "{{ image_path.stdout }}/srv/"
dest: /srv/
mode: preserve
remote_src: yes

- name: Copy content of 'srv' directory from image
synchronize:
src: "{{ image_path.stdout }}/srv/"
dest: /srv/
recursive: yes
delegate_to: "{{ inventory_hostname }}"

- name: Set AMI distribution
copy:
content: |
ami
dest: /srv/pmm-distribution

- name: Add script which show PMM URL
copy:
src: pmm/show-url
dest: /opt/show-url
mode: 0755

- name: Add Service for script which show PMM URL
copy:
src: banner.service
dest: /etc/systemd/system/banner.service
mode: 0755

- name: Enable PMM URL Service
systemd:
name: banner
state: started
enabled: yes

- name: Copy systemd service file to image
template:
src: pmm2.service
dest: /etc/systemd/system
owner: root
group: root

- name: Enable PMM2 container
systemd:
name: pmm2
enabled: yes
52 changes: 52 additions & 0 deletions packer/ansible/roles/lvm-init/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
---
- name: Wait until disk is ready
wait_for:
path: /dev/sdb

- name: Create Volume Group
lvg:
vg: DataVG
pvs: /dev/sdb

- name: Create Thin Pool
register: thin_pool
failed_when: "thin_pool is failed and 'Sorry, no shrinking of DataLV to 0 permitted' not in thin_pool.msg"
lvol:
lv: DataLV
vg: DataVG
size: 100%FREE
opts: --thinpool ThinPool -V 1G

- name: Format LVM
filesystem:
fstype: xfs
dev: /dev/DataVG/DataLV
opts: -L DATA

- name: Mount
mount:
name: "/srv"
src: LABEL=DATA
fstype: xfs
opts: defaults
state: mounted

- name: Create dirs | Create dirs
file: path={{ item }} state=directory
with_items:
- /var/lib/cloud/scripts/per-boot

- name: Data partition | Auto resize LVM
template:
src: resize-xfs-lvm
dest: /var/lib/cloud/scripts/per-boot/resize-xfs
mode: 0755

- name: Cron tasks | Add resize task to cron
cron:
name: "resize data partition"
minute: "*/5"
ademidoff marked this conversation as resolved.
Show resolved Hide resolved
user: root
job: "/var/lib/cloud/scripts/per-boot/resize-xfs"
cron_file: resizeXfs

7 changes: 7 additions & 0 deletions packer/ansible/roles/lvm-init/templates/resize-xfs-lvm
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/sh

/usr/sbin/pvresize $(/usr/bin/ls /dev/sda /dev/sdb /dev/xvdb 2>/dev/null | /usr/bin/grep -v ^$(/usr/sbin/pvdisplay -c | /usr/bin/grep ':VolGroup00:' | /usr/bin/cut -d ':' -f 1 | /usr/bin/tr -d '[:space:]' | /usr/bin/sed 's/[0-9]$//')$ | /usr/bin/grep -v ^$(/usr/bin/findmnt -f -n -o SOURCE / | /usr/bin/sed 's/[0-9]$//')$ | /usr/bin/grep -v ^$(/usr/bin/findmnt -f -n -o SOURCE /mnt/resource | /usr/bin/sed 's/[0-9]$//')$)
/usr/sbin/lvextend -l '1%VG' /dev/DataVG/ThinPool_tmeta
/usr/sbin/lvextend -l '100%VG' /dev/DataVG/ThinPool
/usr/sbin/lvextend -l '80%PVS' /dev/DataVG/DataLV
/usr/sbin/xfs_growfs -d /srv
16 changes: 16 additions & 0 deletions packer/ansible/templates/pmm2.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
[Unit]
Description=PMM2 Docker container
After=docker.service
Requires=docker.service

[Service]
TimeoutStartSec=0
Restart=always
ExecStartPre=-/usr/bin/docker exec pmm-server stop
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

shouldn't it be just /usr/bin/docker stop pmm-server?

ExecStartPre=-/usr/bin/docker rm pmm-server
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we remove pmm-server on each start? Looks like if the user upgrades PMM using the UI button and then restart the service, the service might use an incorrect image.

ExecStart=/usr/bin/docker run --volume /srv/:/srv/ --rm --name pmm-server \
--net host \
{{ pmm_server_image_name }}

[Install]
WantedBy=default.target
91 changes: 91 additions & 0 deletions packer/pmm2.pkr.hcl
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
packer {
required_plugins {
amazon = {
version = "=1.0.8"
source = "github.com/hashicorp/amazon"
}
}
}

variable "pmm_server_image_name" {
type = string
default = "perconalab/pmm-server:dev-latest"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we implement this feature for internal usage or for public?

}

variable "single_disk" {
type = string
default = "false"
}

variable "pmm2_server_repo" {
type = string
default = "testing"
}

variable "pmm_client_repos" {
type = string
default = "original testing"
}

variable "pmm_client_repo_name" {
type = string
default = "percona-testing-x86_64"
}

source "amazon-ebs" "image" {
ami_name = "PMM2 Server [${formatdate("YYYY-MM-DD hhmm", timestamp())}]"
instance_type = "c4.xlarge"
ena_support = "true"
region = "us-east-1"
subnet_id = "subnet-ee06e8e1"
security_group_id = "sg-688c2b1c"
ssh_username = "ec2-user"

launch_block_device_mappings {
delete_on_termination = true
device_name = "/dev/xvda"
volume_size = 8
volume_type = "gp3"
}

launch_block_device_mappings {
delete_on_termination = false
device_name = "/dev/xvdb"
volume_size = 50
volume_type = "gp3"
}

source_ami_filter {
filters = {
name = "*amzn2-ami-hvm-*"
root-device-type = "ebs"
virtualization-type = "hvm"
architecture = "x86_64"
}
most_recent = true
owners = ["amazon"]
}
tags = {
iit-billing-tag = "pmm-worker"
}
run_tags = {
iit-billing-tag = "pmm-ami"
}
run_volume_tags = {
iit-billing-tag = "pmm-ami"
}
}

build {
name = "pmm2"
sources = [
"source.amazon-ebs.image"
]
provisioner "ansible" {
extra_arguments = [
"--extra-vars",
"pmm_server_image_name=${var.pmm_server_image_name}"
]
playbook_file = "./packer/ansible/pmm2.yml"
}
}