I’m trying to configure Grafana Alloy to resolve my Angular application’s stack traces using source maps, but the mapping isn’t working as expected. Even though I have the sourcemaps downloaded locally, the Alloy logs still show only minified stack traces.
Here’s my Alloy configuration:
sourcemaps {
download = true
download_from_origins = ["http://control-center.dev.svc.cluster.local:8081"]
location {
path = "/usr/share/nginx/sourcemaps"
minified_path_prefix = "/"
}
}
All the source maps are available in the container under /usr/share/nginx/sourcemaps
However, when an error is logged, Grafana still shows minified stack traces like this:
timestamp="2025-10-20 16:23:51.46 +0000 UTC" kind=exception type=Error value="Test Sentry"
stacktrace="Error: Test Sentry
at c.ngOnInit (https://cpc-tnag-wbpr7v/cc/199.29d616a1968be30c.js:1:10140)
at At (https://cpc-tnag-wbpr7v/cc/main.56a2acdf792d4034.js:129:306468)
at xn (https://cpc-tnag-wbpr7v/cc/main.56a2acdf792d4034.js:129:306631)
at Vh (https://cpc-tnag-wbpr7v/cc/main.56a2acdf792d4034.js:129:306368)
...
I was expecting Alloy to automatically map these minified lines back to the corresponding TypeScript/Angular source lines.
What I’ve verified so far:
All sourcemap files exist inside
/usr/share/nginx/sourcemaps.The
download_from_originsURL (http://control-center.dev.svc.cluster.local:8081) is accessible and returns the.js.mapfiles.The minified_path_prefix is set to
/, assuming it matches URLs likehttps://cpc-tnag-wbpr7v/cc/main.56a2acdf792d4034.js.
Question
Is there something wrong or missing in my sourcemaps configuration?
Do I need to adjust minified_path_prefix or path to ensure Alloy correctly matches the minified JS URLs (e.g. https://cpc-tnag-wbpr7v/cc/main.56a2acdf792d4034.js) with the corresponding sourcemap files?
Any help or examples of a working setup would be greatly appreciated.
I have tried running the Alloy code below:
apiVersion: v1
kind: ConfigMap
metadata:
name: alloy-config
labels:
{{- include "cai-observability.labels" . | nindent 4 }}
data:
config.alloy: |-
// For development only!
//livedebugging {
// enabled = true
//}
faro.receiver "faro" {
server {
listen_address = "0.0.0.0"
}
sourcemaps {
download = true
download_from_origins = ["http://control-center.dev.svc.cluster.local:8081"]
location {
path = "/usr/share/nginx/sourcemaps"
minified_path_prefix = "/"
}
}
output {
logs = [loki.process.client_logs.receiver]
{{- if eq .Values.tempo.enabled true}}
traces = [otelcol.exporter.otlp.traces.input]
{{ end }}
}
}
logging {
level = "debug"
format = "logfmt"
}
loki.write "default" {
endpoint {
url = "http://cai-observability-loki-gateway/loki/api/v1/push"
}
}
otelcol.exporter.otlp "traces" {
client {
endpoint = "cai-observability-tempo"
tls {
insecure = true
insecure_skip_verify = true
}
}
}
// discovery.kubernetes allows you to find scrape targets from Kubernetes resources.
// It watches cluster state and ensures targets are continually synced with what is currently running in your cluster.
discovery.kubernetes "pod" {
role = "pod"
namespaces {
names = [{{- range $index, $ns := .Values.mes.namespaces }}{{ $ns | quote }}{{ if ne $index (sub (len $.Values.mes.namespaces) 1)}}, {{ end }}{{- end}}]
}
}
// discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules.
// If no rules are defined, then the input targets are exported as-is.
discovery.relabel "pod_logs" {
targets = discovery.kubernetes.pod.targets
// Label creation - "namespace" field from "__meta_kubernetes_namespace"
rule {
source_labels = ["__meta_kubernetes_namespace"]
action = "replace"
target_label = "namespace"
}
// Label creation - "pod" field from "__meta_kubernetes_pod_name"
rule {
source_labels = ["__meta_kubernetes_pod_name"]
action = "replace"
target_label = "pod"
}
// Label creation - "container" field from "__meta_kubernetes_pod_container_name"
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "container"
}
// Label creation - "app" field from "__meta_kubernetes_pod_label_app_kubernetes_io_name"
rule {
source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"]
action = "replace"
target_label = "app"
}
// Label creation - "job" field from "__meta_kubernetes_namespace" and "__meta_kubernetes_pod_container_name"
// Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name
rule {
source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "job"
separator = "/"
replacement = "$1"
}
// Label creation - "container" field from "__meta_kubernetes_pod_uid" and "__meta_kubernetes_pod_container_name"
// Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log
rule {
source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"]
action = "replace"
target_label = "__path__"
separator = "/"
replacement = "/var/log/pods/*$1/*.log"
}
// Note: We are not using the mes_service label at the moment.
// Label creation - "mes-service" field from "__meta_kubernetes_pod_labelpresent_caisoft_com_mes_service"
//rule {
// source_labels = ["__meta_kubernetes_pod_labelpresent_caisoft_com_mes_service"]
// action = "replace"
// target_label = "mes_service"
//}
}
// loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API.
loki.source.kubernetes "pod_logs" {
targets = discovery.relabel.pod_logs.output
forward_to = [loki.process.pod_logs.receiver]
}
loki.process "pod_logs" {
// Current approach
// 1. Make sure all entries have a level.
// 2. Don't drop any level=info coming from mes-services.
// 3. Drop web server access log entries (Nginx, Gin).
// 4. Drop all Novu entries.
// 5. Drop all MongoDb info entries. Can be parsed as JSON. "s: I".
// This will give us the following benefits:
// 1. We still have info entries for our services. This will help us find context while troubleshooting.
// 2. We will have the ability to filter entries using the "Level" label. Grafana UI doesn't always work as expected.
// 3. Services that have a significant amount of chatter will be dropped (novu, Mongo info).
// Health checks to batch scheduler
stage.drop {
expression = "GET /api/app/ping"
}
// Health checks to Nginx
stage.drop {
expression = "kube-probe/"
}
// Health and metrics checks to services using golang's Gin framework
stage.drop {
expression = `\[GIN\].+"(?:\/healthz|\/metrics)\S*"`
}
// Try to extract the log level from JSON
stage.json {
expressions = {
level = "not_null(\"@l\", level, s)",
}
}
// Default level to "info" if it wasn't found
// Lower case all levels
stage.template {
source = "level"
template = `{{ "{{" }} default "info" .Value | ToLower {{ "}}" }}`
}
stage.labels {
values = {
"level" = "",
}
}
// Note: This is currently not used, but leaving it here for reference.
// This if for dropping informational messages from pods containing the mes_sevice label.
//stage.match {
// selector = "{ mes_service=\"true\" }"
// stage.drop {
// source = "level"
// expression = `(?i)^(?:info|information|debug|trace|i|d[1-5])$`
// }
//}
// Drop all entries from Novu
stage.drop {
source = "app"
expression = `^novu-\S+$`
}
// Drop all info entries from Mongodb where "s: I"
stage.match {
selector = "{ app=\"mongodb\" }"
stage.drop {
source = "level"
expression = `^i$`
}
}
// Note: Not using mes_service label at this time.
//stage.label_drop {
// values = [ "mes_service" ]
//}
forward_to = [loki.relabel.pod_logs.receiver]
}
loki.relabel "pod_logs" {
// Normalize levels
// i to info
rule {
source_labels = ["level"]
regex = "i"
replacement = "info"
action = "replace"
target_label = "level"
}
// w or warn to warning
rule {
source_labels = ["level"]
regex = "(w|warn)"
replacement = "warning"
action = "replace"
target_label = "level"
}
// e or f to error
rule {
source_labels = ["level"]
regex = "(e|f)"
replacement = "error"
action = "replace"
target_label = "level"
}
// d to debug
rule {
source_labels = ["level"]
regex = "d"
replacement = "debug"
action = "replace"
target_label = "level"
}
forward_to = [loki.write.default.receiver]
}
loki.process "client_logs" {
stage.logfmt {
mapping = { "kind" = "", "app_name" = "", "app_namespace" = "" }
}
stage.labels {
values = { "kind" = "kind", "app" = "app_name", "namespace" = "app_namespace", "client" = "app_name" }
}
forward_to = [loki.write.default.receiver]
}