Skip to content

Commit

Permalink
chore: add support for multiple read replicas
Browse files Browse the repository at this point in the history
  • Loading branch information
cameri committed Apr 21, 2023
1 parent 830a782 commit 769a3f6
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 18 deletions.
27 changes: 19 additions & 8 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,26 @@ services:
DB_MAX_POOL_SIZE: 64
DB_ACQUIRE_CONNECTION_TIMEOUT: 60000
# Read Replica
READ_REPLICAS: 1
READ_REPLICA_ENABLED: 'false'
RR_DB_HOST: db
RR_DB_PORT: 5432
RR_DB_USER: nostr_ts_relay
RR_DB_PASSWORD: nostr_ts_relay
RR_DB_NAME: nostr_ts_relay
RR_DB_MIN_POOL_SIZE: 16
RR_DB_MAX_POOL_SIZE: 64
RR_DB_ACQUIRE_CONNECTION_TIMEOUT: 10000
# Read Replica No. 1
RR0_DB_HOST: db
RR0_DB_PORT: 5432
RR0_DB_USER: nostr_ts_relay
RR0_DB_PASSWORD: nostr_ts_relay
RR0_DB_NAME: nostr_ts_relay
RR0_DB_MIN_POOL_SIZE: 16
RR0_DB_MAX_POOL_SIZE: 64
RR0_DB_ACQUIRE_CONNECTION_TIMEOUT: 10000
# Read Replica No. 2
RR1_DB_HOST: db
RR1_DB_PORT: 5432
RR1_DB_USER: nostr_ts_relay
RR1_DB_PASSWORD: nostr_ts_relay
RR1_DB_NAME: nostr_ts_relay
RR1_DB_MIN_POOL_SIZE: 16
RR1_DB_MAX_POOL_SIZE: 64
RR1_DB_ACQUIRE_CONNECTION_TIMEOUT: 10000
# Redis
REDIS_HOST: nostream-cache
REDIS_PORT: 6379
Expand Down
1 change: 1 addition & 0 deletions src/app/app.ts
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ export class App implements IRunnable {
debug('starting worker')
createWorker({
WORKER_TYPE: 'worker',
WORKER_INDEX: i.toString(),
})
}
logCentered(`${workerCount} client workers started`, width)
Expand Down
25 changes: 15 additions & 10 deletions src/database/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,27 +46,32 @@ const getMasterConfig = (): Knex.Config => ({
: 60000,
} as any)

const getReadReplicaConfig = (): Knex.Config => ({
const getReadReplicaConfigByIndex = (index: number): Knex.Config => ({
tag: 'read-replica',
client: 'pg',
connection: {
host: process.env.RR_DB_HOST,
port: Number(process.env.RR_DB_PORT),
user: process.env.RR_DB_USER,
password: process.env.RR_DB_PASSWORD,
database: process.env.RR_DB_NAME,
host: process.env[`RR${index}_DB_HOST`],
port: Number(process.env[`RR${index}_DB_PORT`]),
user: process.env[`RR${index}_DB_USER`],
password: process.env[`RR${index}_DB_PASSWORD`],
database: process.env[`RR${index}_DB_NAME`],
},
pool: {
min: process.env.RR_DB_MIN_POOL_SIZE ? Number(process.env.RR_DB_MIN_POOL_SIZE) : 0,
max: process.env.RR_DB_MAX_POOL_SIZE ? Number(process.env.RR_DB_MAX_POOL_SIZE) : 3,
min: process.env[`RR${index}_DB_MIN_POOL_SIZE`] ? Number(process.env[`RR${index}_DB_MIN_POOL_SIZE`]) : 0,
max: process.env[`RR${index}_DB_MAX_POOL_SIZE`] ? Number(process.env[`RR${index}_DB_MAX_POOL_SIZE`]) : 3,
idleTimeoutMillis: 60000,
propagateCreateError: false,
acquireTimeoutMillis: process.env.RR_DB_ACQUIRE_CONNECTION_TIMEOUT
? Number(process.env.RR_DB_ACQUIRE_CONNECTION_TIMEOUT)
acquireTimeoutMillis: process.env[`RR${index}_DB_ACQUIRE_CONNECTION_TIMEOUT`]
? Number(process.env[`RR${index}_DB_ACQUIRE_CONNECTION_TIMEOUT`])
: 60000,
},
} as any)

const getReadReplicaConfig = (): Knex.Config => {
const readReplicaIndex = Number(process.env.WORKER_INDEX) % Number(process.env.READ_REPLICAS)
return getReadReplicaConfigByIndex(readReplicaIndex)
}

let writeClient: Knex

export const getMasterDbClient = () => {
Expand Down

0 comments on commit 769a3f6

Please sign in to comment.