Skip to content

Unable to see application logs of Aks on SPlunk #861

@newmoon88

Description

@newmoon88

Hi,
I deployed my application on Azure Aks (it's a simple python hello world that logs to stdout). And I used splunk for connect with the following yaml file:

`global:
logLevel: info
splunk:
hec:
host:
port:
token: mytoken
protocol: https
endpoint: /endpoint/
fullUrl: https://fullurl
indexName: kube-logs
insecureSSL:
clientCert:
clientKey:
caFile:
indexRouting:
consume_chunk_on_4xx_errors:
kubernetes:
clusterName: "cluster_name"
prometheus_enabled:
monitoring_agent_enabled:
monitoring_agent_index_name:
monitoring_agent_bind_address:
metrics:
service:
enabled: true
headless: true
serviceMonitor:
enabled: false

metricsPort: 24231
interval: ""
scrapeTimeout: "10s"

additionalLabels: { }

splunk-kubernetes-logging:
enabled: true
logLevel:

namespace:

fluentd:
path: /var/log/containers/*.log
exclude_path:

containers:
path: /var/log
pathDest: /var/lib/docker/containers
logFormatType: cri
logFormat:
refreshInterval:
removeBlankEvents: true
localTime: false
enableStatWatcher: true

k8sMetadata:
podLabels:
- helloworld5-5dc446c649-zhvq5
- app
- k8s-app
- release
watch: true
cache_ttl: 3600
propagate_namespace_labels: false

sourcetypePrefix: "kube"

rbac:
create: true
openshiftPrivilegedSccBinding: false

serviceAccount:
create: true
name:

podSecurityPolicy:
create: false
apparmor_security: true
apiGroup: policy

splunk:
hec:
host:
port:
token:
protocol:
endpoint:
fullUrl:
indexName:
insecureSSL:
clientCert:
clientKey:
caFile:
consume_chunk_on_4xx_errors:
gzip_compression:
ingest_api:
serviceClientIdentifier:
serviceClientSecretKey:
tokenEndpoint:
ingestAuthHost:
ingestAPIHost:
tenant:
eventsEndpoint:
debugIngestAPI:

secret:
create: true
name:

journalLogPath: /run/log/journal

charEncodingUtf8: false

customLogs:
default:
from:
pod: helloworld5-5dc446c649-zhvq5
multiline:
firstline: /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d+\s+\w+\s+[/
flushInterval: 30
logs:
docker:
from:
journald:
unit: docker.service
sourcetype: kube:docker
kubelet: &glog
from:
journald:
unit: kubelet.service
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kubelet
default:
from:
pod: helloworld5-5dc446c649-zhvq5
etcd:
from:
pod: etcd-server
container: etcd-container
etcd-minikube:
from:
pod: etcd-minikube
container: etcd
etcd-events:
from:
pod: etcd-server-events
container: etcd-container
kube-apiserver:
<<: *glog
from:
pod: kube-apiserver
sourcetype: kube:kube-apiserver
kube-scheduler:
<<: *glog
from:
pod: kube-scheduler
sourcetype: kube:kube-scheduler
kube-controller-manager:
<<: *glog
from:
pod: kube-controller-manager
sourcetype: kube:kube-controller-manager
kube-proxy:
<<: *glog
from:
pod: kube-proxy
sourcetype: kube:kube-proxy
kubedns:
<<: *glog
from:
pod: kube-dns
sourcetype: kube:kubedns
dnsmasq:
<<: *glog
from:
pod: kube-dns
sourcetype: kube:dnsmasq
dns-sidecar:
<<: *glog
from:
pod: kube-dns
container: sidecar
sourcetype: kube:kubedns-sidecar
dns-controller:
<<: *glog
from:
pod: dns-controller
sourcetype: kube:dns-controller
kube-dns-autoscaler:
<<: *glog
from:
pod: kube-dns-autoscaler
container: autoscaler
sourcetype: kube:kube-dns-autoscaler
kube-audit:
from:
file:
path: /var/log/kube-apiserver-audit.log
timestampExtraction:
format: "%Y-%m-%dT%H:%M:%SZ"
sourcetype: kube:apiserver-audit

image:
registry: docker.io
name: splunk/fluentd-hec
tag: 1.3.2
pullPolicy: IfNotPresent
usePullSecret: false
pullSecretName:

environmentVar:

podAnnotations:

extraLabels:

resources:
requests:
cpu: 100m
memory: 200Mi

bufferChunkKeys:

  • index
    buffer:
    "@type": memory
    total_limit_size: 600m
    chunk_limit_size: 20m
    chunk_limit_records: 100000
    flush_interval: 5s
    flush_thread_count: 1
    overflow_action: block
    retry_max_times: 10
    retry_type: periodic
    retry_wait: 30

sendAllMetadata: false

tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule

nodeSelector:
kubernetes.io/os: linux

affinity: {}

extraVolumes: []
extraVolumeMounts: []

priorityClassName:

kubernetes:
clusterName:
securityContext: false

customMetadata:

customMetadataAnnotations:

customFilters: {}

indexFields: []

rollingUpdate:

splunk-kubernetes-objects:
enabled: true
logLevel:

namespace:

rbac:
create: true

serviceAccount:
create: true
name:
usePullSecrets: false

podSecurityPolicy:
create: false
apparmor_security: true
apiGroup: policy

priorityClassName:

kubernetes:
url:
insecureSSL: false
clientCert:
clientKey:
caFile:
bearerTokenFile:
secretDir:
clusterName:

objects:
core:
v1:
- name: pods
- name: namespaces
- name: nodes
- name: events
mode: watch

checkpointFile:
name: kubernetes-objects.pos
volume:

splunk:
hec:
host:
token:
protocol:
port:
fullUrl:
indexName:
insecureSSL:
clientCert:
clientKey:
caFile:
caPath:
indexRouting:
consume_chunk_on_4xx_errors:

secret:
create: true
name:

image:
registry: docker.io
name: splunk/kube-objects
tag: 1.2.2
pullPolicy: IfNotPresent
usePullSecret: false
pullSecretName:

environmentVar:

podAnnotations:

extraLabels:

resources:
requests:
cpu: 100m
memory: 200Mi

buffer:
"@type": memory
total_limit_size: 600m
chunk_limit_size: 20m
chunk_limit_records: 10000
flush_interval: 5s
flush_thread_count: 1
overflow_action: block
retry_max_times: 10
retry_type: periodic
retry_wait: 30

nodeSelector:
kubernetes.io/os: linux

tolerations: []

affinity: {}

customFilters: {}
indexFields: []

splunk-kubernetes-metrics:
enabled: true
logLevel:

namespace:

rbac:
create: true

serviceAccount:
create: true
name:
usePullSecrets: false

podSecurityPolicy:
create: false
apparmor_security: true
apiGroup: policy

splunk:
hec:
host:
port:
token:
protocol:
fullUrl:
indexName:
insecureSSL:
clientCert:
clientKey:
caFile:
consume_chunk_on_4xx_errors:

secret:
create: true
name:

image:
registry: docker.io
name: splunk/k8s-metrics
tag: 1.2.2
pullPolicy: IfNotPresent
usePullSecret: false
pullsecretName:

imageAgg:
registry: docker.io
name: splunk/k8s-metrics-aggr
tag: 1.2.2
pullPolicy: IfNotPresent
usePullSecret: false
pullsecretName:

environmentVar:

environmentVarAgg:

podAnnotations:

podAnnotationsAgg:

extraLabels:

extraLabelsAgg:

resources:
fluent:
limits:
cpu: 200m
memory: 300Mi
requests:
cpu: 200m
memory: 300Mi
buffer:
"@type": memory
total_limit_size: 400m
chunk_limit_size: 10m
chunk_limit_records: 10000
flush_interval: 5s
flush_thread_count: 1
overflow_action: block
retry_max_times: 10
retry_type: periodic
retry_wait: 30

aggregatorBuffer:
"@type": memory
total_limit_size: 400m
chunk_limit_size: 10m
chunk_limit_records: 10000
flush_interval: 5s
flush_thread_count: 1
overflow_action: block
retry_max_times: 10
retry_type: periodic
retry_wait: 30

metricsInterval: 15s

nodeSelector:
kubernetes.io/os: linux

aggregatorNodeSelector:
kubernetes.io/os: linux

tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule

aggregatorTolerations: {}

priorityClassName:

priorityClassNameAgg:

affinity: {}

kubernetes:
kubeletAddress:
kubeletPort:
kubeletPortAggregator:
useRestClientSSL: true
insecureSSL: false
caFile:
bearerTokenFile:
secretDir:
clusterName:

customFilters: {}

customFiltersAggr: {}

rollingUpdate:
`
Unfortunately I am not able to see my python script logs in Splunk. Any hint?

Thank you
Best Regards
Marta

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions