People Attributes Installation

How to Setup for Deployment?

Create a dedicated folder for this installation to help organize your deployment.

1. Create and setup docker-compose.yml configuration

version: '3.3'
services:
  node1:
    image: "${ANALYTIC_IMAGE}"
    runtime: nvidia
    pid: host
    network_mode: host
    cap_add:
      - SYS_PTRACE
    command: [
      httpserver,
      --listen-port, "${ANALYTIC_LISTEN_PORT}",
      --listen-port-monitoring, "${ANALYTIC_LISTEN_PORT_MONITORING}",
      --verbose,
    ]
    healthcheck:
      test: ["CMD", "curl", "-f", "http://0.0.0.0:${ANALYTIC_LISTEN_PORT}/healthcheck"]
      interval: 10s
      timeout: 3s
      retries: 30
  coordinator:
    image: "${ANALYTIC_IMAGE}"
    runtime: nvidia
    pid: host
    network_mode: host
    cap_add:
      - SYS_PTRACE
    command: [
      coordinator,
      --listen-port, "${COORDINATOR_LISTEN_PORT}",
      --listen-port-monitoring, "${COORDINATOR_LISTEN_PORT_MONITORING}",
      --config-path, "/etc/nodeflux/config.yml",
      --verbose,
    ]
    volumes:
      - ${PWD}/config.yml:/etc/nodeflux/config.yml
    depends_on:
      node1:
        condition: service_healthy
  prometheus: #optional
    image: prom/prometheus
    network_mode: host
    pid: host
    volumes:
      - ${PWD}/prometheus.yml:/etc/prometheus/prometheus.yml

2. Create and setup config.yml

version: "v1"
nodes:
- address: "0.0.0.0:4021"
  analytic_id: "NFFS-PA"

3. Setup Prometheus.yml (optional)

# my global config
global:
  scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
  alertmanagers:
    - static_configs:
        - targets:
          # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: "prometheus"

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
      - targets: ["localhost:9090", "localhost:5021", "localhost:5022", "localhost:5023", "localhost:5024", "localhost:5001", "localhost:5005", "localhost:5006"]

4. Make a .env file to store analytic image and port addresses

If you use NVIDIA Ampere and Ada Lovelace architecture GPUs please use image below:

export ANALYTIC_IMAGE=registry.gitlab.com/nodefluxio/cloud/analytics/pipelines/people-attribute-pipeline:1.5.0-cuda11.8

export COORDINATOR_LISTEN_PORT=4004
export COORDINATOR_LISTEN_PORT_MONITORING=5004

export ANALYTIC_LISTEN_PORT=4021
export ANALYTIC_LISTEN_PORT_MONITORING=5021

If you use old GPUs architecture, please use image below:

export ANALYTIC_IMAGE=registry.gitlab.com/nodefluxio/cloud/analytics/pipelines/people-attribute-pipeline:1.5.0-cuda10.2

export COORDINATOR_LISTEN_PORT=4004
export COORDINATOR_LISTEN_PORT_MONITORING=5004

export ANALYTIC_LISTEN_PORT=4021
export ANALYTIC_LISTEN_PORT_MONITORING=5021

5. Run Image

docker-compose up -d --build

Last updated