Uniting Go, Redis Cluster Mode, Postgres, and Docker into Something Meh

Recently I was tasked with implementing Redis with Cluster Mode enabled for my company’s production application. One of the nice things about working in the modern software age is that cloud providers abstract a lot of complexity away from you. In our case, we are implementing AWS’ ElastiCache, which supports Redis Cluster Mode natively. However, it’s not so easy to replicate locally.

When I set about doing this, what I thought would be a fairly standard Google search. I figured there’d be an immediately available docker-compose file supporting both Postgres and Redis Cluster Mode, but I wasn’t able to find it. As a result, my goals with this post is to create a working proof of concept that utilizes the tools I work with daily and can hopefully be of use to other developers.

Baseline Requirements

In order to be useful, this post has to demonstrate a few things. It has to show the very basic framework of a repository that can show:

  1. An application binary written in Go that accepts HTTP connections
  2. A redis instance with cluster mode enabled
  3. A postgres database
  4. Ease of use. Must be easy enough to spin up and down on demand without dependencies or excessive overhead on a developer’s laptop

If you want to jump straight to the code, it is available here.

Pre-Development

My initial tool is one that I’ve been working with and should be familiar to most modern developers: docker-compose. It is a supremely easy way of mocking out infrastructure dependencies such as database and cache servers. I’ll be using it extensively. And you should too! I’ll start with just Postgres:

version: "3.5"

services:
  postgres:
    image: postgres
    environment:
      POSTGRES_USER: postgres
      POSTGRES_PASSWORD: password
      POSTGRES_DB: demo
    ports:
      - "6000:5432"
    volumes:
      - pg:/var/lib/postgresql/data

volumes:
  pg:

This Initial file is very simple. Note that I’m starting it on port 6000 to not conflict with any other instances of Postgres running on my laptop.

Developing the Application

The application code starts simple as well. The first thing I’ll do is boilerplate – initialize a golang http server that connects to Postgres to mimic the connections that my production application contains.

package main

import (
	"database/sql"
	"fmt"
	"net/http"

	_ "github.com/lib/pq"
	log "github.com/sirupsen/logrus"
)

const (
	pgHost     = "localhost"
	pgPort     = 6000
	pgUser     = "postgres"
	pgPassword = "password"
	pgDBname   = "demo"
)

func main() {
	db, err := sql.Open(
		"postgres",
		fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable", pgHost, pgPort, pgUser, pgPassword, pgDBname),
	)
	if err != nil {
		log.Fatal(err)
	}
	err = db.Ping()
	if err != nil {
		log.Fatal(err)
	}
	log.Info("ping successful")
	defer db.Close()

	log.Fatal(http.ListenAndServe(":8080", nil))
}
$ go run main.go 
INFO[0000] ping successful

That was the easy part! Now the harder part. How to run redis cluster mode?

Redis Cluster Mode

I initially had a very long foray into setting up a bootstrapped cluster myself, before realizing that an existing cluster configuration existed. There’s an outstanding project with a prebuilt Redis cluster, all just a docker container away. If you’ve got the time, it’s worth checking out Grokzen’s docker-redis-cluster repository, because it made my life here a LOT easier. It allows a quick spin up and experimentation. It is not suitable for production, but I’m simply trying to have a way to work on my own laptop. Let’s try it out.

version: "3.5"

services:
  postgres:
    container_name: pg
    image: postgres
    environment:
      POSTGRES_USER: postgres
      POSTGRES_PASSWORD: password
      POSTGRES_DB: demo
    ports:
      - "6000:5432"
    volumes:
      - pg:/var/lib/postgresql/data

  redis-cluster:
    container_name: redis_cluster
    image: grokzen/redis-cluster:latest
    ports:
      - "7000:7000"
      - "7001:7001"
      - "7002:7002"
      - "7003:7003"
      - "7004:7004"
      - "7005:7005"
      - "7006:7006"
      - "7007:7007"
    environment:
      - IP=0.0.0.0
    volumes:
      - redis_cluster:/data

volumes:
  pg:
  redis_cluster:
$ docker-compose up -d
Creating network "docker_redis_cluster_default" with the default driver
Creating redis ... done
Creating pg    ... done

$ docker ps
CONTAINER ID   IMAGE                          COMMAND                  CREATED         STATUS         PORTS                                                       NAMES
a8f665c6c74f   grokzen/redis-cluster:latest   "/docker-entrypoint.…"   6 seconds ago   Up 5 seconds   5000-5002/tcp, 6379/tcp, 0.0.0.0:7000-7007->7000-7007/tcp   redis
2c50b3777939   postgres                       "docker-entrypoint.s…"   6 seconds ago   Up 5 seconds   0.0.0.0:6000->5432/tcp                                      pg

Excellent! Now we just need to modify the code to attach to a redis cluster and we should be good to go!

package main

import (
	"context"
	"database/sql"
	"fmt"
	"net/http"
	"time"

	redis "github.com/go-redis/redis/v8"
	_ "github.com/lib/pq"
	log "github.com/sirupsen/logrus"
)

const (
	pgHost     = "localhost"
	pgPort     = 6000
	pgUser     = "postgres"
	pgPassword = "password"
	pgDBname   = "demo"
)

var redisHostList []string = []string{"localhost:7000", "localhost:7001", "localhost:7002", "localhost:7003", "localhost:7004", "localhost:7005"}

var rdb *redis.ClusterClient
var db *sql.DB
var ctx context.Context

func main() {
	var err error

	db, err = sql.Open(
		"postgres",
		fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable", pgHost, pgPort, pgUser, pgPassword, pgDBname),
	)
	if err != nil {
		log.Fatal(err)
	}
	err = db.Ping()
	if err != nil {
		log.Fatal(err)
	}
	log.Info("Postgres ping successful")
	defer db.Close()

	rdb = redis.NewClusterClient(&redis.ClusterOptions{
		Addrs: redisHostList,
	})

	ctx = context.Background()

	err = rdb.Ping(ctx).Err()
	if err != nil {
		log.Fatal(err)
	}
	log.Info("Redis Cluster Ping successful")

	http.HandleFunc("/insert", handleInsert)
	log.Fatal(http.ListenAndServe(":8080", nil))
}
$ go run main.go
INFO[0000] Postgres ping successful                     
INFO[0000] Redis Cluster Ping successful                

That’s really all there is to it! I added a quick insert method to validate that everything is shipshape. To begin, I created a tiny relation in postgres which holds two simple columns, key and value:

CREATE TABLE t(
	id INT,
	value INT
)

From there, I created a simple handler that inserts a random integer pair into both postgres and redis:

func handleInsert(w http.ResponseWriter, r *http.Request) {
	log.Info("insert")

	k := fmt.Sprintf("%d", rand.Intn(10000000))
	v := rand.Intn(10000000)

	c := rdb.Set(ctx, k, v, 0)
	if c.Err() != nil {
		fmt.Fprintf(w, "failed to insert to redis: %s", c.Err().Error())
		log.Fatal(c.Err())
	}
	log.Infof("Successfully inserted %s -> %d into redis", k, v)

	_, err := db.Exec("INSERT INTO t(id, value) VALUES ($1, $2)", k, v)
	if err != nil {
		fmt.Fprintf(w, "failed to insert to postgres: %s", err.Error())
		log.Fatal(err)
	}
	log.Infof("Successfully inserted %s -> %d into postgres", k, v)

	fmt.Fprintf(w, "successfully inserted %s -> %d into redis and postgres", k, v)
}

And we’re off to the races. After firing up the main server with go run main.go, I hit it from another terminal window:

$ for i in {1..5} ; do curl localhost:8080/insert; done
$ go run main.go 
INFO[0000] Postgres ping successful                     
INFO[0000] Redis Cluster Ping successful                
INFO[0002] Inserting into postgres and redis            
INFO[0002] Successfully inserted 7815407 -> 6256350 into redis 
INFO[0002] Successfully inserted 7815407 -> 6256350 into postgres 
INFO[0002] Inserting into postgres and redis            
INFO[0002] Successfully inserted 3255910 -> 7027863 into redis 
INFO[0002] Successfully inserted 3255910 -> 7027863 into postgres 
INFO[0002] Inserting into postgres and redis            
INFO[0002] Successfully inserted 9811666 -> 1385891 into redis 
INFO[0002] Successfully inserted 9811666 -> 1385891 into postgres 
INFO[0002] Inserting into postgres and redis            
INFO[0002] Successfully inserted 6989908 -> 6140818 into redis 
INFO[0002] Successfully inserted 6989908 -> 6140818 into postgres 
INFO[0002] Inserting into postgres and redis            
INFO[0002] Successfully inserted 2280893 -> 3153447 into redis 
INFO[0002] Successfully inserted 2280893 -> 3153447 into postgres
$ redis-cli --cluster call localhost:7000 KEYS "*"
>>> Calling KEYS *
localhost:7000: 3255910
7815407
127.0.0.1:7001: 9811666
6989908
127.0.0.1:7003: 6989908
9811666
127.0.0.1:7002: 2280893
127.0.0.1:7005: 3255910
7815407
127.0.0.1:7004: 2280893
$ PGPASSWORD=password psql -h localhost -p 6000 -U postgres -d demo -c "select * from t"
   id    |  value  
---------+---------
 7815407 | 6256350
 3255910 | 7027863
 9811666 | 1385891
 6989908 | 6140818
 2280893 | 3153447
(5 rows)

Looks good to me! Note that the redis-cli command used above is running “KEYS *” command on all 6 nodes of the redis cluster. By running it on all nodes, we can see that we have the keys distributed across the 3 master nodes and replicated across the 3 slave nodes. Awesome!

Refactor

At this point I decided to clean it up slightly to make it more presentable to the world.

package main

import (
	"context"
	"database/sql"
	"fmt"
	"math/rand"
	"net/http"
	"time"

	redis "github.com/go-redis/redis/v8"
	_ "github.com/lib/pq"
	log "github.com/sirupsen/logrus"
)

const (
	pgHost     = "localhost"
	pgPort     = 6000
	pgUser     = "postgres"
	pgPassword = "password"
	pgDBname   = "demo"
)

var redisHostList []string = []string{"localhost:7000", "localhost:7001", "localhost:7002", "localhost:7003", "localhost:7004", "localhost:7005", "localhost:7006", "localhost:7007"}

var rdb *redis.ClusterClient
var db *sql.DB
var ctx context.Context

func insertRedis(k string, v int, w http.ResponseWriter) error {
	c := rdb.Set(ctx, k, v, 0)
	if c.Err() != nil {
		fmt.Fprintf(w, "failed to insert to redis: %s", c.Err().Error())
		return c.Err()
	}

	log.Infof("Successfully inserted %s -> %d into redis", k, v)
	return nil
}

func insertPostgres(k string, v int, w http.ResponseWriter) error {
	_, err := db.Exec("INSERT INTO t(id, value) VALUES ($1, $2)", k, v)
	if err != nil {
		fmt.Fprintf(w, "failed to insert to postgres: %s", err.Error())
		return err
	}

	log.Infof("Successfully inserted %s -> %d into postgres", k, v)
	return nil
}

func handleInsert(w http.ResponseWriter, r *http.Request) {
	log.Info("Inserting into postgres and redis")
	k := fmt.Sprintf("%d", rand.Intn(10000000))
	v := rand.Intn(10000000)

	err := insertRedis(k, v, w)
	if err != nil {
		log.WithError(err).Error("failed to insert to redis")
		return
	}

	err = insertPostgres(k, v, w)
	if err != nil {
		log.WithError(err).Error("failed to insert to postgres")
		return
	}

	fmt.Fprintf(w, "successfully inserted %s -> %d into redis and postgres", k, v)
}

func initPg() {
	var err error

	db, err = sql.Open(
		"postgres",
		fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable", pgHost, pgPort, pgUser, pgPassword, pgDBname),
	)
	if err != nil {
		log.Fatal(err)
	}

	err = db.Ping()
	if err != nil {
		log.Fatal(err)
	}

	log.Info("Postgres ping successful")
}

func initRedis() {
	rdb = redis.NewClusterClient(&redis.ClusterOptions{
		Addrs: redisHostList,
	})

	err := rdb.Ping(ctx).Err()
	if err != nil {
		log.Fatal(err)
	}

	log.Info("Redis Cluster ping successful")
}

func init() {
	rand.Seed(time.Now().UnixNano())
	ctx = context.Background()

	initPg()
	initRedis()
}

func main() {
	log.Info("app initialized")
	defer db.Close()

	http.HandleFunc("/insert", handleInsert)
	log.Fatal(http.ListenAndServe(":8080", nil))
}

Conclusion

Hopefully this post has demonstrated a quick and easy way to spin up a backend API using Go, Postgres, and Redis Clusters. The code is linked here if you’d like to try it out for yourself!

Helpful Resources

  1. Check out Grokzen’s redis cluster mode docker solution. This development wouldn’t be possible without it.
  2. Redis Cluster Mode
  3. PostgreSQL
  4. Docker-Compose documentation