Add grafana, block Chinese IPs

This commit is contained in:
Felix Ableitner 2020-10-05 15:41:43 +02:00
parent 81747da1da
commit d193e06657
12 changed files with 569 additions and 21 deletions

3
.gitignore vendored
View file

@ -1,2 +1,3 @@
inventory
prod
passwords/
vault_pass

View file

@ -1,5 +1,5 @@
[defaults]
inventory=inventory
[privilege_escalation]
become = True
[ssh_connection]
pipelining = True

View file

@ -13,12 +13,13 @@ services:
environment:
- GITEA_HOSTNAME=${GITEA_HOSTNAME}
depends_on:
- postgres
- redis
- postfix
mem_limit: 500m
weblate:
image: weblate/weblate:4.1-2
image: weblate/weblate:4.2.2-1
restart: always
ports:
- 127.0.0.1:3001:8080
@ -31,6 +32,8 @@ services:
- WEBLATE_ADMIN_EMAIL=noreply@${WEBLATE_HOSTNAME}
- WEBLATE_SERVER_EMAIL=noreply@${WEBLATE_HOSTNAME}
- WEBLATE_DEFAULT_FROM_EMAIL=noreply@${WEBLATE_HOSTNAME}
- WEBLATE_SITE_DOMAIN=${WEBLATE_HOSTNAME}
- WEBLATE_DEBUG=false
- REDIS_HOST=redis
- REDIS_DB=2
- POSTGRES_PASSWORD=${WEBLATE_POSTGRES_PASSWORD}
@ -43,6 +46,7 @@ services:
depends_on:
- postgres
- redis
- postfix
grafana:
image: grafana/grafana:7.0.4

7
files/nginx_status.conf Normal file
View file

@ -0,0 +1,7 @@
server {
listen 8090;
location /nginx_status {
stub_status;
access_log off;
}
}

36
group_vars/prod.yml Normal file
View file

@ -0,0 +1,36 @@
domain: yerbamate.dev
letsencrypt_contact_email: !vault |
$ANSIBLE_VAULT;1.1;AES256
61393837323736363138343338353563313337383033366232343836633337333033636362616437
3132396335636233386330643861613064353232383230380a303564646262613665373665633734
36633466366138323334386337383262353934323337343932633837663136616437326331366431
3261333962353964300a363535383130336164623862326165626466393334666638323964663834
3635
influxdb_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
61616638303232373339653737303566613430663763313864633365313166383163323033636364
3163313737323662343934323866633734633530646638610a666662663761363533373539316631
38613837646436323535303533373132353734353132333837323638326265366163623262373933
6464653632346366650a393535653230613961646233383862663266623435326230613933636166
66346536363164646539643164626231373461353533646434626161343334356264
weblate_admin_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
38366130306138306562663035303332353630326663303264643333363639383631656262346632
3938373461616365366137626165373036316465343939610a643630333166306132366338376438
66643335316162306333396430336132383564356339353361373537633865393263643661376139
3965313334616465620a333331386161613265643038663066313062383334656332323965653333
33656433633136303130653031346264653563643466653934303033646632356338
weblate_postgres_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
65613463303836396234653830353632313330316439623337653831653237646534383532656263
3136326236653437373137646665373737343332343334380a363564653566663664633936316339
66353238623038366230323239303634613963643635626632353739636564396430386565623466
6562383763396235340a313463643239333662393430613465363965666466303461663066386533
61323161323732396533373062663762383031336330653336376533633633393035
telegraf_influxdb_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
61343966363633306163646530646361613833663831623139376135396436623835333363663236
3235613761363138313236636164646131383234313532370a626234643530373339646133313332
36623563623434323336663262323939326534643834666465333863386231616439636132316436
3833303337393633320a313766336236303264376333373535353832646262666634383062303935
62393230366331396435313162636136333832623939666663623131343761633031

View file

@ -1,5 +0,0 @@
[peertube]
user@host domain=your_domain letsencrypt_contact_email=user@example.com
[all:vars]
ansible_connection=ssh

View file

@ -1,6 +1,5 @@
---
- hosts: all
become: yes
# Install python if required
# https://www.josharcher.uk/code/ansible-python-connection-failure-ubuntu-server-1604/
@ -30,20 +29,15 @@
- name: add all templates
template: src={{item.src}} dest={{item.dest}} mode={{item.mode}}
with_items:
- { src: 'templates/gitea.conf', dest: '/etc/nginx/sites-enabled/gitea.conf', mode: '0600' }
- { src: 'templates/weblate.conf', dest: '/etc/nginx/sites-enabled/weblate.conf', mode: '0600' }
- { src: 'templates/grafana.conf', dest: '/etc/nginx/sites-enabled/grafana.conf', mode: '0600' }
- { src: 'templates/env', dest: '/gitea/.env', mode: '0600' }
vars:
weblate_admin_password: "{{ lookup('password', 'passwords/{{ inventory_hostname }}/weblate_admin_password chars=ascii_letters,digits') }}"
weblate_postgres_password: "{{ lookup('password', 'passwords/{{ inventory_hostname }}/weblate_postgres_password chars=ascii_letters,digits') }}"
influxdb_password: "{{ lookup('password', 'passwords/{{ inventory_hostname }}/influxdb_password chars=ascii_letters,digits') }}"
- { src: '../templates/gitea.conf', dest: '/etc/nginx/sites-enabled/gitea.conf', mode: '0600' }
- { src: '../templates/weblate.conf', dest: '/etc/nginx/sites-enabled/weblate.conf', mode: '0600' }
- { src: '../templates/grafana.conf', dest: '/etc/nginx/sites-enabled/grafana.conf', mode: '0600' }
- { src: '../templates/env', dest: '/gitea/.env', mode: '0600' }
- name: copy all files
copy: src={{item.src}} dest={{item.dest}} mode={{item.mode}}
with_items:
- { src: 'files/docker-compose.yml', dest: '/gitea/docker-compose.yml', mode: '0755' }
- { src: 'files/influxdb.conf', dest: '/gitea/influxdb.conf', mode: '0755' }
- { src: '../files/docker-compose.yml', dest: '/gitea/docker-compose.yml', mode: '0755' }
- { src: '../files/influxdb.conf', dest: '/gitea/influxdb.conf', mode: '0755' }
- name: install dependencies
apt:

3
playbooks/site.yml Normal file
View file

@ -0,0 +1,3 @@
---
- import_playbook: gitea.yml
- import_playbook: telegraf.yml

44
playbooks/telegraf.yml Normal file
View file

@ -0,0 +1,44 @@
---
- hosts: all
tasks:
- name: copy nginx files
copy:
src: '../files/nginx_status.conf'
dest: '/etc/nginx/sites-enabled/nginx_status.conf'
- name: add telegraf apt key
apt_key:
keyserver: https://repos.influxdata.com/influxdb.key
id: 684A14CF2582E0C5
state: present
- name: add telegraf apt repository
apt_repository:
# Note: we need to adjust this manually for different ubuntu versions
repo: 'deb https://repos.influxdata.com/ubuntu bionic stable'
state: present
filename: influxdb
update_cache: yes
- name: add telegraf to docker group
action: user name=telegraf groups="docker" append=yes
- name: install telegraf
apt:
name: telegraf
state: present
- name: add telegraf config
template:
src: '../templates/telegraf.conf.j2'
dest: '/etc/telegraf/telegraf.conf'
owner: telegraf
group: telegraf
mode: '0600'
- name: start and enable telegraf service
systemd:
state: reloaded
name: telegraf
enabled: true

View file

@ -1,5 +1,11 @@
limit_req_zone $binary_remote_addr zone=gitea_ratelimit:10m rate=1r/s;
geoip_country /usr/share/GeoIP/GeoIP.dat;
map $geoip_country_code $allowed_country {
default yes;
CN no;
}
server {
listen 80;
server_name {{ domain }};
@ -45,6 +51,10 @@ server {
gzip_proxied any;
gzip_vary on;
if ($allowed_country = no) {
return 444;
}
location / {
limit_req zone=gitea_ratelimit burst=30 nodelay;
proxy_pass http://127.0.0.1:3000;

445
templates/telegraf.conf.j2 Normal file
View file

@ -0,0 +1,445 @@
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log target controls the destination for logs and can be one of "file",
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
## is determined by the "logfile" setting.
# logtarget = "file"
## Name of the file to be logged to when using the "file" logtarget. If set to
## the empty string then logs are written to stderr.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0d"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
# urls = ["http://127.0.0.1:8086"]
urls = ["https://grafana.yerbamate.dev/telegraf-input"]
## The target database for metrics; will be created as needed.
## For UDP url endpoint database needs to be configured on server side.
database = "yerbamate-dev"
## The value of this tag will be used to determine the database. If this
## tag is not set the 'database' option is used as the default.
# database_tag = ""
## If true, the 'database_tag' will not be included in the written metric.
# exclude_database_tag = false
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
# skip_database_creation = false
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy. Only takes effect when using HTTP.
# retention_policy = ""
## The value of this tag will be used to determine the retention policy. If this
## tag is not set the 'retention_policy' option is used as the default.
# retention_policy_tag = ""
## If true, the 'retention_policy_tag' will not be included in the written metric.
# exclude_retention_policy_tag = false
## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
## Only takes effect when using HTTP.
# write_consistency = "any"
## Timeout for HTTP messages.
# timeout = "5s"
## HTTP Basic Auth
username = "telegraf"
password = "{{ telegraf_influxdb_password }}"
## HTTP User-Agent
# user_agent = "telegraf"
## UDP payload size is the maximum packet size to send.
# udp_payload = "512B"
## Optional TLS Config for use on HTTP connections.
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
# influx_uint_support = false
###############################################################################
# INPUT PLUGINS #
###############################################################################
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
# Read metrics about disk IO by device
[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb", "vd*"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
#
## On systems which support it, device metadata can be added in the form of
## tags.
## Currently only Linux is supported via udev properties. You can view
## available properties for a device by running:
## 'udevadm info -q property -n /dev/sda'
## Note: Most, but not all, udev properties can be accessed this way. Properties
## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
#
## Using the same metadata source as device_tags, you can also customize the
## name of the device via templates.
## The 'name_templates' parameter is a list of templates to try and apply to
## the device. The template may contain variables in the form of '$PROPERTY' or
## '${PROPERTY}'. The first template which does not contain any variables not
## present for the device is used as the device name tag.
## The typical use case is for LVM volumes, to get the VG/LV name instead of
## the near-meaningless DM-0 name.
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
## Uncomment to remove deprecated metrics.
fielddrop = ["uptime_format"]
[[inputs.net]]
interfaces = ["eth0"]
# Read metrics about docker containers
[[inputs.docker]]
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
gather_services = false
## Only collect metrics for these containers, collect all if empty
container_names = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false
## Containers to include and exclude. Globs accepted.
## Note that an empty array for both will include all containers
container_name_include = []
container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...) and
## network (eth0, eth1, ...) stats or not
perdevice = true
## Whether to report for each container total blkio and network stats or not
total = false
## Which environment variables should we use as a tag
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = []
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
# An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost:8090/nginx_status"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.cer"
# tls_key = "/etc/telegraf/key.key"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
# HTTP response timeout (default: 5s)
# response_timeout = "5s"
# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
# [[inputs.nginx_upstream_check]]
# ## An URL where Nginx Upstream check module is enabled
# ## It should be set to return a JSON formatted response
# url = "http://127.0.0.1/status?format=json"
#
# ## HTTP method
# # method = "GET"
#
# ## Optional HTTP headers
# # headers = {"X-Special-Header" = "Special-Value"}
#
# ## Override HTTP "Host" header
# # host_header = "check.example.com"
#
# ## Timeout for HTTP requests
# timeout = "5s"
#
# ## Optional HTTP Basic Auth credentials
# # username = "username"
# # password = "pa$$word"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
# Read logging output from the Docker engine
[[inputs.docker_log]]
# Docker Endpoint
# To use TCP, set endpoint = "tcp://[ip]:[port]"
# To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "unix:///var/run/docker.sock"
# When true, container logs are read from the beginning; otherwise
# reading begins at the end of the log.
from_beginning = false
## Timeout for Docker API calls.
# timeout = "5s"
## Containers to include and exclude. Globs accepted.
## Note that an empty array for both will include all containers
# container_name_include = []
# container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
# container_state_include = []
# container_state_exclude = []
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
# docker_label_include = []
# docker_label_exclude = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false
# # Read metrics from one or many postgresql servers
# [[inputs.postgresql]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
# ##
# ## All connection parameters are optional.
# ##
# ## Without the dbname parameter, the driver will default to a database
# ## with the same name as the user. This dbname is just for instantiating a
# ## connection with the server and doesn't restrict the databases we are trying
# ## to grab metrics for.
# ##
# address = "host=localhost user=postgres sslmode=disable"
# ## A custom name for the database that will be used as the "server" tag in the
# ## measurement output. If not specified, a default one generated from
# ## the connection address is used.
# # outputaddress = "db01"
#
# ## connection configuration.
# ## maxlifetime - specify the maximum lifetime of a connection.
# ## default is forever (0s)
# max_lifetime = "0s"
#
# ## A list of databases to explicitly ignore. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'databases' option.
# # ignored_databases = ["postgres", "template0", "template1"]
#
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
# # databases = ["app_production", "testing"]

View file

@ -1,3 +1,8 @@
map $geoip_country_code $allowed_country {
default yes;
CN no;
}
server {
listen 80;
server_name weblate.{{ domain }};
@ -43,6 +48,10 @@ server {
gzip_proxied any;
gzip_vary on;
if ($allowed_country = no) {
return 444;
}
location / {
limit_req zone=gitea_ratelimit burst=30 nodelay;
proxy_pass http://127.0.0.1:3001;