Skip to content

Unable to get minio pod working #160

@localops-root

Description

@localops-root

I'm trying to install Lago in my minikube setup which is given 10core and 12GB RAM in my local docker engine in my Mac (ARM).

I'm installing the chart like this:

helm upgrade lago lago/lago --install -f values.yaml --version 1.20.4 -n lago --create-namespace

with values:

version: 1.20.2

# Required: Set the URLs for your API and Frontend services
# Replace these placeholders with the actual domain names.
apiUrl: "billing-api.appenv.example"   # Example: https://api.mydomain.dev
frontUrl: "billing.appenv.example"   # Example: https://app.mydomain.dev

# Only for Development, Staging or PoC, you should use a managed Redis instance for Production
redis:
  enabled: true
  image:
    tag: 6.2.14
  replica:
    replicaCount: 0
  auth:
    enabled: false
  master:
    service:
      ports:
        redis: 6379
# Only for Development, Staging or PoC, you should use a managed PostgreSQL instance for Production
postgresql:
  enabled: true

global:
  # Define your Lago Premium License
  # license:
  # If you use a managed PG instance
  # Should respect this format postgresql://USER:PASSWORD@HOST:PORT/DATABASE_NAME
  # databaseUrl:
  # Should respect standard redis URL format redis://..., redis+sentinel://...
  # redisUrl:

  # If you wish to provide an existing secret with credentials in, then you can do so here.
  # The following fields are required:
  #  - databaseUrl: <SEE FORMAT ABOVE>
  #  - redisUrl:
  #  - awsS3AccessKeyId:
  #  - awsS3SecretAccessKey:
  #  - smtpUsername:
  #  - smtpPassword:
  # existingSecret: "lago-credentials"
  # Not required if using existingSecret
  postgresql:
    auth:
      username: lago
      password: lago
      database: lago
    service:
      ports:
        postgresql: 5432

  # You can disable segment tracking for Lago's internal purpose
  segment:
    enabled: true
  s3:
    enabled: false
    # accessKeyId and secretAccessKey are not required here if using existingSecret
    # accessKeyId: "<your-aws-access-key>"
    # secretAccessKey: "<your-aws-secret-key>"
    # bucket: "<your-aws-bucket>"
    # region: "<your-aws-region>"
    # endpoint: "https://s3.<region>.amazonaws.com" # Leave empty for default AWS S3 endpoint

  smtp:
    # username and password are not required here if using existingSecret
    enabled: false
    # address:
    # username:
    # password:
    # port:
    # fromEmail:
  newRelic:
    enabled: false
    # key:

  # You can disable Lago's signup
  signup:
    enabled: false

  ingress:
    enabled: false
    frontHostname: billing.appenv.example
    apiHostname: billing-api.appenv.example
    className:
    annotations: {}

  networkPolicy:
    enabled: false
    egress: []
    ingress: []

  # serviceAccountName:

  # If kubectlVersion is not supplied it will be inferred using the version of the cluster that Lago is deployed on.
  kubectlVersion: "1.30"

front:
  replicas: 1
  service:
    port: 80
  resources:
    requests:
      memory: 512Mi
      cpu: "200m"
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  tolerations: []
  nodeSelector: {}
  affinity: {}

api:
  replicas: 1
  service:
    port: 3000
  rails:
    maxThreads: 10
    webConcurrency: 4
    env: "production"
    logStdout: true
    logLevel: error
  sidekiqWeb:
    enabled: true
  resources:
    requests:
      memory: 1Gi
      cpu: "1000m"
  autoscaling:
    enabled: false
    minReplicas: 1
    maxReplicas: 3
    targetCPUUtilizationPercentage: 80
  volumes:
    accessModes: ReadWriteOnce
    storage: "10Gi"
  extraEnv: {}
  podAnnotations: {}
  podLabels: {}
  tolerations: []
  nodeSelector: {}
  affinity: {}

worker:
  replicas: 1
  rails:
    sidekiqConcurrency: 20
    env: "production"
    logStdout: true
    logLevel: error
  resources:
    requests:
      memory: 1Gi
      cpu: "500m"
  autoscaling:
    enabled: false
    minReplicas: 1
    maxReplicas: 3
    targetCPUUtilizationPercentage: 80
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  livenessProbe:
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 1
    failureThreshold: 3
  tolerations: []
  nodeSelector: {}
  affinity: {}

eventsWorker:
  enabled: true
  replicas: 1
  rails:
    sidekiqConcurrency: 20
    env: "production"
    logStdout: true
    logLevel: error
  resources:
    requests:
      memory: 1Gi
      cpu: "500m"
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  livenessProbe:
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 1
    failureThreshold: 3
  tolerations: []
  nodeSelector: {}
  affinity: {}

clockWorker:
  enabled: true
  replicas: 1
  rails:
    sidekiqConcurrency: 20
    env: "production"
    logStdout: true
    logLevel: error
  resources:
    requests:
      memory: 1Gi
      cpu: "500m"
  autoscaling:
    enabled: false
    minReplicas: 1
    maxReplicas: 3
    targetCPUUtilizationPercentage: 80
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  livenessProbe:
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 1
    failureThreshold: 3
  tolerations: []
  nodeSelector: {}
  affinity: {}

clock:
  replicas: 1
  rails:
    env: "production"
    logStdout: true
    logLevel: info
  resources:
    requests:
      memory: 256Mi
      cpu: "100m"
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  tolerations: []
  nodeSelector: {}
  affinity: {}

pdf:
  replicas: 1
  service:
    port: 3001
  resources:
    requests:
      memory: 2Gi
      cpu: "1000m"
  autoscaling:
    enabled: false
    minReplicas: 1
    maxReplicas: 3
    targetCPUUtilizationPercentage: 80
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  tolerations: []
  nodeSelector: {}
  affinity: {}

billingWorker:
  enabled: true
  replicas: 1
  rails:
    sidekiqConcurrency: 10
    env: "production"
    logStdout: true
    logLevel: info
  resources:
    requests:
      memory: 1Gi
      cpu: "500m"
  autoscaling:
    enabled: false
    minReplicas: 1
    maxReplicas: 3
    targetCPUUtilizationPercentage: 100
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  livenessProbe:
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 1
    failureThreshold: 3
  tolerations: []
  nodeSelector: {}
  affinity: {}

pdfWorker:
  enabled: true
  replicas: 1
  rails:
    sidekiqConcurrency: 10
    env: "production"
    logStdout: true
    logLevel: info
  resources:
    requests:
      memory: 1Gi
      cpu: "500m"
  autoscaling:
    enabled: false
    minReplicas: 1
    maxReplicas: 3
    targetCPUUtilizationPercentage: 80
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  livenessProbe:
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 1
    failureThreshold: 3
  tolerations: []
  nodeSelector: {}
  affinity: {}

webhookWorker:
  enabled: true
  replicas: 1
  rails:
    sidekiqConcurrency: 20
    env: "production"
    logStdout: true
    logLevel: info
  resources:
    requests:
      memory: 1Gi
      cpu: "500m"
  autoscaling:
    enabled: false
    minReplicas: 1
    maxReplicas: 3
    targetCPUUtilizationPercentage: 80
  podAnnotations: {}
  podLabels: {}
  extraEnv: {}
  livenessProbe:
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 1
    failureThreshold: 3
  tolerations: []
  nodeSelector: {}
  affinity: {}

job:
  migrate:
    nameOverride: null
    podAnnotations: {}
    podLabels: {}
    resources: {}
    loadSchema: false

minio:
  enabled: true
  replicas: 1
  fullnameOverride: "lago-minio"
  endpoint: "http://minio.lago.localops.run"
  resources:
    requests:
      memory: "512Mi"
      cpu: "500m"
    limits:
      memory: "1Gi"
      cpu: "1"
  persistence:
    size: 10Gi
  ingress:
    enabled: false
    ingressClassName: nginx
    labels: {}
    annotations: {}
    path: /
    hosts:
      - minio.lago.localops.run
    tls: []
  buckets:
    - name: lago-minio
      policy: none
      purge: false
      versioning: false
      objectlocking: false

MinIO and All workers are in crashloop backoff status

Image

Checking MinIO logs gives this:

❯ kubectl logs pod/lago-minio-0 -n lago
ERROR Unable to prepare the list of endpoints: Incorrect number of endpoints provided [http://lago-minio-{0...0}.lago-minio-svc.lago.svc.cluster.local/export]
      > Please provide correct combination of local/remote paths
      HINT:
        For more information, please refer to https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html

I followed the MinIO configuration notes in README. See MinIO values above.

How to make MinIO work? Am I missing anything here?

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions