Face Detection

License

NFFS-FD

This analytic is GPU-based please ensure you meet the minimum requirement.

Setup for Deployment

Docker-compose.yml config

version: '3.3'
services:
  node1:
    image: "${ANALYTIC_IMAGE}"
    runtime: nvidia
    pid: host
    network_mode: host
    cap_add:
      - SYS_PTRACE
    command: [
      httpserver,
      --listen-port, "${ANALYTIC_LISTEN_PORT}",
      --listen-port-monitoring, "${ANALYTIC_LISTEN_PORT_MONITORING}",
      --verbose,
    ]
    healthcheck:
      test: ["CMD", "curl", "-f", "http://0.0.0.0:${ANALYTIC_LISTEN_PORT}/healthcheck"]
      interval: 5s
      timeout: 3s
      retries: 20
  coordinator:
    image: "${ANALYTIC_IMAGE}"
    runtime: nvidia
    pid: host
    network_mode: host
    cap_add:
      - SYS_PTRACE
    command: [
      coordinator,
      --access-key, "${NODEFLUX_ACCESS_KEY}",
      --secret-key, "${NODEFLUX_SECRET_KEY}",
      --deployment-key, "${NODEFLUX_DEPLOYMENT_KEY}",
      --listen-port, "${COORDINATOR_LISTEN_PORT}",
      --listen-port-monitoring, "${COORDINATOR_LISTEN_PORT_MONITORING}",
      --config-path, "/etc/nodeflux/config.yml",
      --verbose,
    ]
    volumes:
      - ${PWD}/config.yml:/etc/nodeflux/config.yml
    depends_on:
      node1:
        condition: service_healthy
  prometheus:
    image: prom/prometheus
    network_mode: host
    pid: host
    volumes:
      - ${PWD}/prometheus.yml:/etc/prometheus/prometheus.yml

config.yml

version: "v1"
nodes:
- address: "0.0.0.0:4021"
  analytic_id: "NFFS-FD"

prometheus.yml

# my global config
global:
  scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
  alertmanagers:
    - static_configs:
        - targets:
          # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: "prometheus"

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
      - targets: ["localhost:9090", "localhost:5021", "localhost:5022", "localhost:5023", "localhost:5024", "localhost:5001", "localhost:5005", "localhost:5006"]

Run the Docker Compose by following these steps:

export ANALYTIC_IMAGE=registry.gitlab.com/nodefluxio/cloud/analytics/pipelines/face-detection-etle:0.2.1
export NODEFLUX_ACCESS_KEY=<nodeflux access key>
export NODEFLUX_SECRET_KEY=<nodeflux secret key>
export NODEFLUX_DEPLOYMENT_KEY=<nodeflux deployment key>

export COORDINATOR_LISTEN_PORT=4004
export COORDINATOR_LISTEN_PORT_MONITORING=5004

export ANALYTIC_LISTEN_PORT=4021
export ANALYTIC_LISTEN_PORT_MONITORING=5021
docker compose up -d --build 

Last updated