Compare commits

...

10 Commits

Author SHA1 Message Date
John Lancaster
015725d2cc added build/run task 2025-11-30 13:14:10 -06:00
John Lancaster
85b3c5ee3f added docker socket proxy container 2025-11-30 13:06:26 -06:00
John Lancaster
ad6e33162c changed watchtower container 2025-11-27 13:11:14 -06:00
John Lancaster
ffc472e6b0 removed custom DNS 2025-11-04 02:02:16 -06:00
John Lancaster
36ce983f0e Updated based on example from Grafana 2025-08-05 23:01:56 -05:00
John Lancaster
9946d1bff9 added explicit dns to some services 2025-08-04 22:47:44 -05:00
John Lancaster
6efb480f33 started grafana alloy implementation 2025-07-20 22:00:52 -05:00
John Lancaster
4145c55522 using tls for telegraf 2025-06-19 12:33:36 -05:00
John Lancaster
38d69731fe cleaned up promtail 2025-06-19 12:08:52 -05:00
John Lancaster
f5dfdaba55 commented out appdaemon section 2025-02-15 01:30:50 -06:00
5 changed files with 102 additions and 117 deletions

18
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,18 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "Docker Compose Up",
"type": "shell",
"problemMatcher": [],
"command": "docker compose up -d --build --force-recreate"
,
"presentation": {
"focus": true,
"panel": "dedicated"
}
}
]
}

View File

@@ -8,36 +8,21 @@ services:
volumes: volumes:
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes - /var/lib/docker/volumes:/var/lib/docker/volumes
alloy:
telegraf-docker: image: grafana/alloy
image: telegraf container_name: grafana-alloy
container_name: telegraf-docker restart: unless-stopped
restart: always
uts: host
user: telegraf:${DOCKER_GUID}
env_file:
- .env
volumes: volumes:
- ./telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro - ./grafana-alloy/config.alloy:/etc/alloy/config.alloy
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
promtail:
image: grafana/promtail
container_name: promtail
restart: always
uts: host
env_file:
- .env
volumes:
- ./promtail:/etc/promtail
- /var/run/docker.sock:/var/run/docker.sock
- /var/log:/var/log:ro
command: command:
- "-config.file=/etc/promtail/promtail-config.yml" - run
- "-config.expand-env=true" - --server.http.listen-addr=0.0.0.0:12345
- --storage.path=/var/lib/alloy/data
- /etc/alloy/config.alloy
watchtower: watchtower:
image: containrrr/watchtower image: nickfedor/watchtower
container_name: watchtower container_name: watchtower
restart: always restart: always
volumes: volumes:
@@ -46,3 +31,16 @@ services:
- /etc/timezone:/etc/timezone:ro - /etc/timezone:/etc/timezone:ro
environment: environment:
WATCHTOWER_SCHEDULE: "0 0 3 * * *" WATCHTOWER_SCHEDULE: "0 0 3 * * *"
docker-proxy:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: docker-proxy
restart: unless-stopped
ports:
- "127.0.0.1:2375:2375"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- CONTAINERS=1
- INFO=1
- PING=1

View File

@@ -0,0 +1,49 @@
// https://grafana.com/docs/alloy/latest/monitor/monitor-docker-containers/
prometheus.exporter.cadvisor "docker_metrics" {
docker_host = "unix:///var/run/docker.sock"
storage_duration = "5m"
}
prometheus.scrape "scraper" {
targets = prometheus.exporter.cadvisor.docker_metrics.targets
forward_to = [ prometheus.remote_write.default.receiver ]
scrape_interval = "10s"
}
prometheus.remote_write "default" {
endpoint {
url = "http://prometheus:9090/api/v1/write"
}
}
discovery.docker "linux" {
host = "unix:///var/run/docker.sock"
}
discovery.relabel "logs_integrations_docker" {
targets = []
rule {
source_labels = ["__meta_docker_container_name"]
regex = "/(.*)"
target_label = "service_name"
}
}
loki.source.docker "default" {
host = "unix:///var/run/docker.sock"
targets = discovery.docker.linux.targets
labels = {"platform" = "docker"}
relabel_rules = discovery.relabel.logs_integrations_docker.rules
forward_to = [loki.write.default.receiver]
}
loki.write "default" {
endpoint {
url = "http://loki:3100/loki/api/v1/push"
}
external_labels = {}
}

View File

@@ -1,71 +0,0 @@
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://${LOKI_HOST:-loki}:3100/loki/api/v1/push
scrape_configs:
- job_name: flog_scrape
docker_sd_configs:
- host: unix:///var/run/docker.sock
refresh_interval: 5s
relabel_configs:
- source_labels: ['__meta_docker_container_name']
regex: '/(.*)'
target_label: 'container'
- source_labels: ['__meta_docker_container_label_com_docker_compose_oneoff']
target_label: oneoff
- source_labels: ['__meta_docker_container_label_com_docker_compose_project_config_files']
target_label: compose_file
- source_labels: ['__meta_docker_container_label_com_docker_compose_project']
target_label: project_name
- source_labels: ['__meta_docker_container_label_com_docker_compose_service']
target_label: service
- target_label: 'host'
replacement: '${HOSTNAME}'
pipeline_stages:
# - match:
# selector: '{container="appdaemon"}'
# stages:
# - regex:
# expression: "(?P<date>\\\\d{4}-\\\\d{2}-\\\\d{2}) (?P<time>\\\\d{2}:\\\\d{2}:\\\\d{2} [AP)]M) (?P<level>\\\\w+) (?P<app>\\\\w+): (?P<msg>.*?)$"
# - labels:
# level:
# app:
# msg:
- match:
selector: '{container="zigbee2mqtt"} |= "MQTT publish"'
stages:
- regex:
expression: "topic '(?P<topic>zigbee2mqtt\/(?P<device>[^\/]+).*?)', payload '(?P<payload>.*?)'$"
- labels:
topic:
device:
payload:
- match:
selector: '{topic=~".*action$"}'
stages:
- labels:
action: "payload"
- match:
selector: '{container="zigbee2mqtt"} |= "occupancy"'
stages:
- json:
expressions:
occupancy: occupancy
source: payload
- labels:
occupancy:
- match:
selector: '{container="zigbee2mqtt"} |= "contact"'
stages:
- json:
expressions:
contact: contact
source: payload
- labels:
contact:

View File

@@ -80,7 +80,9 @@
## Multiple URLs can be specified for a single cluster, only ONE of the ## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval. ## urls will be written to each interval.
## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
urls = ["http://${INFLUX_HOST:-influxdb}:8086"] urls = ["https://influxdb.john-stream.com"]
tls_enable = true
tls_server_name = "influxdb.john-stream.com"
## Token for authentication. ## Token for authentication.
token = "$INFLUX_WRITE_TOKEN" token = "$INFLUX_WRITE_TOKEN"
@@ -103,7 +105,6 @@
## Additional HTTP headers ## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"} # http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Proxy override, if unset values the standard proxy environment ## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used. ## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128" # http_proxy = "http://corporate.proxy:3128"
@@ -136,11 +137,6 @@
## configuring in multiple Swarm managers results in duplication of metrics. ## configuring in multiple Swarm managers results in duplication of metrics.
gather_services = false gather_services = false
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
# container_names = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false source_tag = false
@@ -155,27 +151,19 @@
# container_state_include = [] # container_state_include = []
# container_state_exclude = [] # container_state_exclude = []
## Objects to include for disk usage query
## Allowed values are "container", "image", "volume"
## When empty disk usage is excluded
storage_objects = ["container"]
## Timeout for docker list, info, and stats commands ## Timeout for docker list, info, and stats commands
timeout = "5s" timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...),
## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
## is honored.
# perdevice = true
## Specifies for which classes a per-device metric should be issued ## Specifies for which classes a per-device metric should be issued
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
## Please note that this setting has no effect if 'perdevice' is set to 'true' ## Please note that this setting has no effect if 'perdevice' is set to 'true'
perdevice_include = ["cpu", "blkio", "network"] perdevice_include = ["cpu", "blkio", "network"]
## Whether to report for each container total blkio and network stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
## is honored.
total = false
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
## Possible values are 'cpu', 'blkio' and 'network' ## Possible values are 'cpu', 'blkio' and 'network'
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
@@ -188,7 +176,7 @@
docker_label_exclude = [] docker_label_exclude = []
## Which environment variables should we use as a tag ## Which environment variables should we use as a tag
tag_env = ["JAVA_HOME", "HEAP_SIZE"] tag_env = []
## Optional TLS Config ## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem" # tls_ca = "/etc/telegraf/ca.pem"
@@ -196,3 +184,6 @@
# tls_key = "/etc/telegraf/key.pem" # tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification ## Use TLS but skip chain & host verification
# insecure_skip_verify = false # insecure_skip_verify = false
# Needed to silence a warning, but it's irrelevant for me
# skip_processors_after_aggregators = true