diff --git a/packages/cli-common/src/commands/base-command.ts b/packages/cli-common/src/commands/base-command.ts
index 29e05f0d..453c8c1a 100644
--- a/packages/cli-common/src/commands/base-command.ts
+++ b/packages/cli-common/src/commands/base-command.ts
@@ -3,7 +3,8 @@ import {
   LogLevel, Logger, logLevels, ComposeModel, ProcessError, telemetryEmitter,
 } from '@preevy/core'
 import { asyncReduce } from 'iter-tools-es'
-import { ParsingToken } from '@oclif/core/lib/interfaces/parser.js'
+import { ArgOutput, FlagOutput, Input, ParserOutput, ParsingToken } from '@oclif/core/lib/interfaces/parser.js'
+import { mergeWith } from 'lodash-es'
 import { commandLogger } from '../lib/log.js'
 import { composeFlags, pluginFlags } from '../lib/common-flags/index.js'
 import { PreevyConfig } from '../../../core/src/config.js'
@@ -90,9 +91,14 @@ abstract class BaseCommand<T extends typeof Command=typeof Command> extends Comm
     return result
   }
 
-  public async init(): Promise<void> {
-    await super.init()
-    const { args, flags, raw } = await this.parse({
+  protected async reparse<
+    F extends FlagOutput,
+    B extends FlagOutput,
+    A extends ArgOutput>(
+    options?: Input<F, B, A>,
+    argv?: string[],
+  ): Promise<ParserOutput<F, B, A>> {
+    return await this.parse(mergeWith({
       flags: this.ctor.flags,
       baseFlags: {
         ...this.ctor.baseFlags,
@@ -100,7 +106,12 @@ abstract class BaseCommand<T extends typeof Command=typeof Command> extends Comm
       },
       args: this.ctor.args,
       strict: false,
-    })
+    }, options, argv))
+  }
+
+  public async init(): Promise<void> {
+    await super.init()
+    const { args, flags, raw } = await this.reparse()
     this.args = args as Args<T>
     this.flags = flags as Flags<T>
     if (this.flags.debug) {
diff --git a/packages/cli/.eslintignore b/packages/cli/.eslintignore
index cb8f0ccc..1857e62b 100644
--- a/packages/cli/.eslintignore
+++ b/packages/cli/.eslintignore
@@ -1,3 +1,4 @@
 /dist
 node_modules
 /scripts
+/tmp
\ No newline at end of file
diff --git a/packages/cli/src/commands/down.ts b/packages/cli/src/commands/down.ts
index 1cc32caa..e8e703b8 100644
--- a/packages/cli/src/commands/down.ts
+++ b/packages/cli/src/commands/down.ts
@@ -1,10 +1,10 @@
 import { Flags } from '@oclif/core'
 import { findEnvId, machineResourceType, withSpinner } from '@preevy/core'
-import DriverCommand from '../driver-command.js'
+import MachineCreationDriverCommand from '../machine-creation-driver-command.js'
 import { envIdFlags } from '../common-flags.js'
 
 // eslint-disable-next-line no-use-before-define
-export default class Down extends DriverCommand<typeof Down> {
+export default class Down extends MachineCreationDriverCommand<typeof Down> {
   static description = 'Delete preview environments'
 
   static flags = {
@@ -28,6 +28,7 @@ export default class Down extends DriverCommand<typeof Down> {
     const log = this.logger
     const { flags } = this
     const driver = await this.driver()
+    const machineCreationDriver = await this.machineCreationDriver()
 
     const envId = await findEnvId({
       log,
@@ -45,7 +46,10 @@ export default class Down extends DriverCommand<typeof Down> {
     }
 
     await withSpinner(async () => {
-      await driver.deleteResources(flags.wait, { type: machineResourceType, providerId: machine.providerId })
+      await machineCreationDriver.deleteResources(
+        flags.wait,
+        { type: machineResourceType, providerId: machine.providerId },
+      )
     }, { opPrefix: `Deleting ${driver.friendlyName} machine ${machine.providerId} for environment ${envId}` })
 
     await Promise.all(
diff --git a/packages/cli/src/commands/init.ts b/packages/cli/src/commands/init.ts
index 934ff50e..f26e581d 100644
--- a/packages/cli/src/commands/init.ts
+++ b/packages/cli/src/commands/init.ts
@@ -55,7 +55,7 @@ export default class Init extends BaseCommand {
 
     const driver = await chooseDriver()
     const driverStatic = machineDrivers[driver]
-    const driverFlags = await driverStatic.inquireFlags()
+    const driverFlags = await driverStatic.inquireFlags({ log: this.logger })
 
     ux.info(text.recommendation('To use Preevy in a CI flow, select a remote storage for your profile.'))
     const locationType = await chooseFsType(({ driver }))
diff --git a/packages/cli/src/commands/purge.ts b/packages/cli/src/commands/purge.ts
index e7fad27e..3cfbe119 100644
--- a/packages/cli/src/commands/purge.ts
+++ b/packages/cli/src/commands/purge.ts
@@ -3,7 +3,7 @@ import { Flags, ux } from '@oclif/core'
 import { asyncFilter, asyncToArray } from 'iter-tools-es'
 import { groupBy, partition } from 'lodash-es'
 import { MachineResource, isPartialMachine, machineResourceType } from '@preevy/core'
-import DriverCommand from '../driver-command.js'
+import MachineCreationDriverCommand from '../machine-creation-driver-command.js'
 import { carefulBooleanPrompt } from '../prompt.js'
 
 const isMachineResource = (r: { type: string }): r is MachineResource => r.type === machineResourceType
@@ -35,7 +35,7 @@ const confirmPurge = async (
 }
 
 // eslint-disable-next-line no-use-before-define
-export default class Purge extends DriverCommand<typeof Purge> {
+export default class Purge extends MachineCreationDriverCommand<typeof Purge> {
   static description = 'Delete all cloud provider machines and potentially other resources'
 
   static flags = {
@@ -68,6 +68,7 @@ export default class Purge extends DriverCommand<typeof Purge> {
     const { flags } = this
 
     const driver = await this.driver()
+    const creationDriver = await this.machineCreationDriver()
     const resourcePlurals: Record<string, string> = { [machineResourceType]: 'machines', ...driver.resourcePlurals }
     const driverResourceTypes = new Set(Object.keys(resourcePlurals))
 
@@ -83,7 +84,7 @@ export default class Purge extends DriverCommand<typeof Purge> {
     const allResources = await asyncToArray(
       asyncFilter(
         ({ type }) => flags.all || flags.type.includes(type),
-        driver.listDeletableResources(),
+        creationDriver.listDeletableResources(),
       ),
     )
 
@@ -106,7 +107,7 @@ export default class Purge extends DriverCommand<typeof Purge> {
       return undefined
     }
 
-    await driver.deleteResources(flags.wait, ...allResources)
+    await creationDriver.deleteResources(flags.wait, ...allResources)
 
     if (flags.json) {
       return allResources
diff --git a/packages/cli/src/driver-command.ts b/packages/cli/src/driver-command.ts
index 06c6d236..28bedcda 100644
--- a/packages/cli/src/driver-command.ts
+++ b/packages/cli/src/driver-command.ts
@@ -1,7 +1,8 @@
 import { Command, Flags, Interfaces } from '@oclif/core'
 import { MachineConnection, MachineDriver, isPartialMachine, profileStore } from '@preevy/core'
-import { pickBy } from 'lodash-es'
-import { DriverFlags, DriverName, FlagType, flagsForAllDrivers, machineDrivers, removeDriverPrefix } from './drivers.js'
+import { mapValues, pickBy } from 'lodash-es'
+import { Flag } from '@oclif/core/lib/interfaces'
+import { DriverFlags, DriverName, FlagType, addDriverPrefix, flagsForAllDrivers, machineDrivers, removeDriverPrefix } from './drivers.js'
 import ProfileCommand from './profile-command.js'
 
 // eslint-disable-next-line no-use-before-define
@@ -40,18 +41,25 @@ abstract class DriverCommand<T extends typeof Command> extends ProfileCommand<T>
     driver: Name,
     type: Type
   ): Promise<DriverFlags<DriverName, Type>> {
-    const driverFlagNames = Object.keys(machineDrivers[driver][type])
-    const flagDefaults = pickBy(
-      {
-        ...await profileStore(this.store).ref.defaultDriverFlags(driver),
-        ...this.preevyConfig?.drivers?.[driver] ?? {},
-      },
-      (_v, k) => driverFlagNames.includes(k),
-    ) as DriverFlags<DriverName, Type>
-    return {
-      ...flagDefaults,
-      ...removeDriverPrefix<DriverFlags<DriverName, Type>>(driver, this.flags),
+    const driverFlags = machineDrivers[driver][type]
+    const flagDefaults = {
+      ...await profileStore(this.store).ref.defaultDriverFlags(driver),
+      ...this.preevyConfig?.drivers?.[driver] ?? {},
     }
+
+    const flagDefsWithDefaults = addDriverPrefix(driver, mapValues(
+      driverFlags,
+      (v: Flag<unknown>, k) => Object.assign(v, { default: flagDefaults[k] ?? v.default }),
+    )) as Record<string, Flag<unknown>>
+
+    const { flags: parsedFlags } = await this.reparse({ flags: flagDefsWithDefaults })
+
+    const driverFlagNamesWithPrefix = new Set(Object.keys(driverFlags).map(k => `${driver}-${k}`))
+
+    const parsedDriverFlags = pickBy(parsedFlags, (_v, k) => driverFlagNamesWithPrefix.has(k))
+
+    const result = removeDriverPrefix(driver, parsedDriverFlags) as DriverFlags<DriverName, Type>
+    return result
   }
 
   #driver: MachineDriver | undefined
diff --git a/packages/cli/src/hooks/init/load-plugins.ts b/packages/cli/src/hooks/init/load-plugins.ts
index 3c332ad4..4e12bd96 100644
--- a/packages/cli/src/hooks/init/load-plugins.ts
+++ b/packages/cli/src/hooks/init/load-plugins.ts
@@ -7,7 +7,7 @@ const wrappedHook: OclifHook<'init'> = async function wrappedHook(...args) {
     await initHook.call(this, ...args)
   } catch (e) {
     // eslint-disable-next-line no-console
-    console.warn(`warning: failed to init context: ${e}`)
+    console.warn(`warning: failed to init context: ${(e as Error).stack || e}`)
     telemetryEmitter().capture('plugin-init-error', { error: e })
     await telemetryEmitter().flush()
     process.exit(1)
diff --git a/packages/core/src/driver/driver.ts b/packages/core/src/driver/driver.ts
index 5cd7d2dc..bd94e838 100644
--- a/packages/core/src/driver/driver.ts
+++ b/packages/core/src/driver/driver.ts
@@ -17,7 +17,6 @@ export type MachineConnection = Disposable & {
 
 export type MachineDriver<
   Machine extends MachineBase = MachineBase,
-  ResourceType extends string = string
 > = {
   customizationScripts?: string[]
   friendlyName: string
@@ -33,8 +32,6 @@ export type MachineDriver<
   ) => Promise<{ code: number } | { signal: string }>
 
   listMachines: () => AsyncIterableIterator<Machine | PartialMachine>
-  listDeletableResources: () => AsyncIterableIterator<Resource<ResourceType>>
-  deleteResources: (wait: boolean, ...resource: Resource<string>[]) => Promise<void>
   machineStatusCommand: (machine: MachineBase) => Promise<MachineStatusCommand | undefined>
 }
 
@@ -43,7 +40,10 @@ export type MachineCreationResult<Machine extends MachineBase = MachineBase> = {
   result: Promise<{ machine: Machine; connection: MachineConnection }>
 }
 
-export type MachineCreationDriver<Machine extends MachineBase = MachineBase> = {
+export type MachineCreationDriver<
+  Machine extends MachineBase = MachineBase,
+  ResourceType extends string = string,
+> = {
   metadata: Record<string, unknown>
 
   createMachine: (args: {
@@ -54,24 +54,30 @@ export type MachineCreationDriver<Machine extends MachineBase = MachineBase> = {
   getMachineAndSpecDiff: (
     args: { envId: string },
   ) => Promise<(Machine & { specDiff: SpecDiffItem[] }) | PartialMachine | undefined>
+
+  listDeletableResources: () => AsyncIterableIterator<Resource<ResourceType>>
+  deleteResources: (wait: boolean, ...resource: Resource<string>[]) => Promise<void>
 }
 
 export type MachineDriverFactory<
   Flags,
   Machine extends MachineBase = MachineBase,
-  ResourceType extends string = string
 > = ({ flags, profile, store, log, debug }: {
   flags: Flags
   profile: Profile
   store: Store
   log: Logger
   debug: boolean
-}) => MachineDriver<Machine, ResourceType>
+}) => MachineDriver<Machine>
 
-export type MachineCreationDriverFactory<Flags, Machine extends MachineBase> = ({ flags, profile, store, log, debug }: {
+export type MachineCreationDriverFactory<
+  Flags,
+  Machine extends MachineBase,
+  ResourceType extends string = string,
+> = ({ flags, profile, store, log, debug }: {
   flags: Flags
   profile: Profile
   store: Store
   log: Logger
   debug: boolean
-}) => MachineCreationDriver<Machine>
+}) => MachineCreationDriver<Machine, ResourceType>
diff --git a/packages/core/src/driver/machine-operations.ts b/packages/core/src/driver/machine-operations.ts
index dad3ac29..14199731 100644
--- a/packages/core/src/driver/machine-operations.ts
+++ b/packages/core/src/driver/machine-operations.ts
@@ -53,7 +53,10 @@ const ensureBareMachine = async ({
   return await withSpinner(async spinner => {
     if (existingMachine && recreating) {
       spinner.text = 'Deleting machine'
-      await machineDriver.deleteResources(false, { type: machineResourceType, providerId: existingMachine.providerId })
+      await machineCreationDriver.deleteResources(
+        false,
+        { type: machineResourceType, providerId: existingMachine.providerId },
+      )
     }
     spinner.text = 'Checking for existing snapshot'
     const machineCreation = await machineCreationDriver.createMachine({ envId })
diff --git a/packages/driver-azure/src/driver/index.ts b/packages/driver-azure/src/driver/index.ts
index 07686c45..59153cfc 100644
--- a/packages/driver-azure/src/driver/index.ts
+++ b/packages/driver-azure/src/driver/index.ts
@@ -87,48 +87,32 @@ const machineFromVm = (
   }
 }
 
+const listMachines = ({ client: cl }: { client: Client }) => asyncMap(
+  rg => cl.getInstanceByRg(rg.name as string).then(vm => {
+    if (vm) {
+      return machineFromVm(vm)
+    }
+    return {
+      type: machineResourceType,
+      providerId: rg.name as string,
+      envId: rg.tags?.[AzureCustomTags.ENV_ID] as string,
+      error: 'VM creation is incomplete',
+    }
+  }),
+  cl.listResourceGroups()
+)
+
 const machineDriver = (
   { store, client: cl }: DriverContext,
-): MachineDriver<SshMachine, ResourceType> => {
-  const listMachines = () => asyncMap(
-    rg => cl.getInstanceByRg(rg.name as string).then(vm => {
-      if (vm) {
-        return machineFromVm(vm)
-      }
-      return {
-        type: machineResourceType,
-        providerId: rg.name as string,
-        envId: rg.tags?.[AzureCustomTags.ENV_ID] as string,
-        error: 'VM creation is incomplete',
-      }
-    }),
-    cl.listResourceGroups()
-  )
-
-  return ({
-    customizationScripts: CUSTOMIZE_BARE_MACHINE,
-    friendlyName: 'Microsoft Azure',
-    getMachine: async ({ envId }) => await cl.getInstance(envId).then(vm => machineFromVm(vm)),
-
-    listMachines,
-    listDeletableResources: listMachines,
-
-    deleteResources: async (wait, ...resources) => {
-      await Promise.all(resources.map(({ type, providerId }) => {
-        if (type === machineResourceType) {
-          return cl.deleteResourcesResourceGroup(providerId, wait)
-        }
-        throw new Error(`Unknown resource type "${type}"`)
-      }))
-    },
-
-    resourcePlurals: {},
-
-    ...sshDriver({ getSshKey: () => getStoredSshKey(store, SSH_KEYPAIR_ALIAS) }),
-
-    machineStatusCommand: async () => machineStatusNodeExporterCommand,
-  })
-}
+): MachineDriver<SshMachine> => ({
+  customizationScripts: CUSTOMIZE_BARE_MACHINE,
+  friendlyName: 'Microsoft Azure',
+  getMachine: async ({ envId }) => await cl.getInstance(envId).then(vm => machineFromVm(vm)),
+  listMachines: () => listMachines({ client: cl }),
+  resourcePlurals: {},
+  ...sshDriver({ getSshKey: () => getStoredSshKey(store, SSH_KEYPAIR_ALIAS) }),
+  machineStatusCommand: async () => machineStatusNodeExporterCommand,
+})
 
 const flags = {
   region: Flags.string({
@@ -143,7 +127,7 @@ const flags = {
 
 type FlagTypes = Omit<Interfaces.InferredFlags<typeof flags>, 'json'>
 
-const inquireFlags = async () => {
+const inquireFlags = async ({ log: _log }: { log: Logger }) => {
   const region = await inquirerAutoComplete<string>({
     message: flags.region.description as string,
     source: async input => REGIONS.filter(r => !input || r.includes(input.toLowerCase())).map(value => ({ value })),
@@ -191,7 +175,7 @@ type MachineCreationContext = DriverContext & {
 
 const machineCreationDriver = (
   { client: cl, vmSize, store, log, debug, metadata }: MachineCreationContext,
-): MachineCreationDriver<SshMachine> => {
+): MachineCreationDriver<SshMachine, ResourceType> => {
   const ssh = sshDriver({ getSshKey: () => getStoredSshKey(store, SSH_KEYPAIR_ALIAS) })
 
   return {
@@ -241,13 +225,21 @@ const machineCreationDriver = (
           : [],
       })
     },
+    listDeletableResources: () => listMachines({ client: cl }),
+    deleteResources: async (wait, ...resources) => {
+      await Promise.all(resources.map(({ type, providerId }) => {
+        if (type === machineResourceType) {
+          return cl.deleteResourcesResourceGroup(providerId, wait)
+        }
+        throw new Error(`Unknown resource type "${type}"`)
+      }))
+    },
   }
 }
 
 const factory: MachineDriverFactory<
   Interfaces.InferredFlags<typeof flags>,
-  SshMachine,
-  ResourceType
+  SshMachine
 > = ({ flags: f, profile: { id: profileId }, store, log, debug }) => machineDriver({
   client: createClient({
     ...contextFromFlags(f),
@@ -267,7 +259,8 @@ const machineCreationContextFromFlags = (
 
 const machineCreationFactory: MachineCreationDriverFactory<
   MachineCreationFlagTypes,
-  SshMachine
+  SshMachine,
+  ResourceType
 > = ({ flags: f, profile: { id: profileId }, store, log, debug }) => {
   const c = machineCreationContextFromFlags(f)
   return machineCreationDriver({
diff --git a/packages/driver-gce/src/driver/index.ts b/packages/driver-gce/src/driver/index.ts
index cb5f0520..d5747673 100644
--- a/packages/driver-gce/src/driver/index.ts
+++ b/packages/driver-gce/src/driver/index.ts
@@ -16,7 +16,7 @@ import {
   extractDefined,
   PartialMachine,
 } from '@preevy/core'
-import { memoize, pick } from 'lodash-es'
+import { pick } from 'lodash-es'
 import createClient, { Client, Instance, availableRegions, defaultProjectId, instanceError, shortResourceName } from './client.js'
 import { deserializeMetadata, metadataKey } from './metadata.js'
 import { LABELS } from './labels.js'
@@ -59,34 +59,22 @@ const machineFromInstance = (
   }
 }
 
-const machineDriver = ({ store, client }: DriverContext): MachineDriver<SshMachine, ResourceType> => {
-  const listMachines = () => asyncMap(machineFromInstance, client.listInstances())
+const listMachines = ({ client }: { client: Client }) => asyncMap(machineFromInstance, client.listInstances())
 
-  return ({
-    friendlyName: 'Google Cloud',
-
-    getMachine: async ({ envId }) => {
-      const instance = await client.findBestEnvInstance(envId)
-      return instance && machineFromInstance(instance)
-    },
+const machineDriver = ({ store, client }: DriverContext): MachineDriver<SshMachine> => ({
+  friendlyName: 'Google Cloud',
 
-    listMachines,
-    listDeletableResources: listMachines,
+  getMachine: async ({ envId }) => {
+    const instance = await client.findBestEnvInstance(envId)
+    return instance && machineFromInstance(instance)
+  },
 
-    deleteResources: async (wait, ...resources) => {
-      await Promise.all(resources.map(({ type, providerId }) => {
-        if (type === 'machine') {
-          return client.deleteInstance(providerId, wait)
-        }
-        throw new Error(`Unknown resource type: "${type}"`)
-      }))
-    },
+  listMachines: () => listMachines({ client }),
 
-    resourcePlurals: {},
-    ...sshDriver({ getSshKey: () => getStoredSshKey(store, SSH_KEYPAIR_ALIAS) }),
-    machineStatusCommand: async () => machineStatusNodeExporterCommand,
-  })
-}
+  resourcePlurals: {},
+  ...sshDriver({ getSshKey: () => getStoredSshKey(store, SSH_KEYPAIR_ALIAS) }),
+  machineStatusCommand: async () => machineStatusNodeExporterCommand,
+})
 
 const flags = {
   'project-id': Flags.string({
@@ -109,7 +97,7 @@ const contextFromFlags = ({
   zone,
 })
 
-const inquireFlags = async () => {
+const inquireFlags = async ({ log: _log }: { log: Logger }) => {
   const project = await inquirer.input({
     default: await defaultProjectId(),
     message: flags['project-id'].description as string,
@@ -161,7 +149,7 @@ type MachineCreationDriverContext = DriverContext & {
 
 const machineCreationDriver = (
   { machineType: specifiedMachineType, store, client, log, debug, metadata }: MachineCreationDriverContext,
-): MachineCreationDriver<SshMachine> => {
+): MachineCreationDriver<SshMachine, ResourceType> => {
   const machineType = specifiedMachineType || DEFAULT_MACHINE_TYPE
   const ssh = sshDriver({ getSshKey: () => getStoredSshKey(store, SSH_KEYPAIR_ALIAS) })
 
@@ -211,13 +199,23 @@ const machineCreationDriver = (
           : [],
       }
     },
+
+    listDeletableResources: () => listMachines({ client }),
+
+    deleteResources: async (wait, ...resources) => {
+      await Promise.all(resources.map(({ type, providerId }) => {
+        if (type === 'machine') {
+          return client.deleteInstance(providerId, wait)
+        }
+        throw new Error(`Unknown resource type: "${type}"`)
+      }))
+    },
   })
 }
 
 const factory: MachineDriverFactory<
   Interfaces.InferredFlags<typeof flags>,
-  SshMachine,
-  ResourceType
+  SshMachine
 > = ({ flags: f, profile: { id: profileId }, store, log, debug }) => machineDriver({
   log,
   debug,
@@ -228,7 +226,8 @@ machineDriver.factory = factory
 
 const machineCreationFactory: MachineCreationDriverFactory<
   Interfaces.InferredFlags<typeof machineCreationFlags>,
-  SshMachine
+  SshMachine,
+  ResourceType
 > = ({ flags: f, profile: { id: profileId }, store, log, debug }) => machineCreationDriver({
   metadata: pick(f, Object.keys(machineCreationFlags)) as MachineCreationFlagTypes, // filter out non-driver flags
   log,
diff --git a/packages/driver-kube-pod/src/driver/client/common.ts b/packages/driver-kube-pod/src/driver/client/common.ts
index 31459580..ab5602bd 100644
--- a/packages/driver-kube-pod/src/driver/client/common.ts
+++ b/packages/driver-kube-pod/src/driver/client/common.ts
@@ -13,4 +13,5 @@ export const bodyOrUndefined = async <T, Response extends { body: T } = { body:
 }
 
 export type HasMetadata = { metadata?: k8s.V1ObjectMeta }
+export type HasKind = { kind?: string }
 export type Package = { version: string; name: string }
diff --git a/packages/driver-kube-pod/src/driver/client/dynamic/index.ts b/packages/driver-kube-pod/src/driver/client/dynamic/index.ts
index b6eba25f..6d655833 100644
--- a/packages/driver-kube-pod/src/driver/client/dynamic/index.ts
+++ b/packages/driver-kube-pod/src/driver/client/dynamic/index.ts
@@ -1,5 +1,5 @@
 import * as k8s from '@kubernetes/client-node'
-import { HasRequired, ensureDefined } from '@preevy/core'
+import { ensureDefined } from '@preevy/core'
 import { asyncConcat, asyncMap } from 'iter-tools-es'
 import { defaults } from 'lodash-es'
 import { paginationIterator } from '../pagination.js'
@@ -7,11 +7,17 @@ import apply from './apply.js'
 import waiter from './wait.js'
 import { FuncWrapper } from '../log-error.js'
 
+export type KubernetesType = {
+  apiVersion: string
+  kind: string
+  namespace?: string
+}
+
 const dynamicApi = (
   { client, wrap }: { client: k8s.KubernetesObjectApi; wrap: FuncWrapper },
 ) => {
   const list = (
-    types: { apiVersion: string; kind: string; namespace?: string }[],
+    types: KubernetesType[],
     { fieldSelector, labelSelector }: {
       fieldSelector?: string
       labelSelector?: string
@@ -36,21 +42,19 @@ const dynamicApi = (
     ),
   )))
 
-  const gatherTypes = (...specs: k8s.KubernetesObject[]) => {
+  const uniqueTypes = (types: KubernetesType[]): KubernetesType[] => [
+    ...new Map(types.map(t => [[t.apiVersion, t.kind, t.namespace].join(':'), t])).values(),
+  ]
+
+  const gatherTypes = (...specs: k8s.KubernetesObject[]): KubernetesType[] => {
     const docs = specs.map(s => ensureDefined(s, 'apiVersion', 'kind', 'metadata'))
-    type Doc = HasRequired<k8s.KubernetesObject, 'apiVersion' | 'kind' | 'metadata'>
-    const key = ({ apiVersion, kind, metadata: { namespace } }: Doc) => [apiVersion, kind, namespace].join(':')
-    const uniques = new Map(docs.map(d => [key(d), {
-      apiVersion: d.apiVersion,
-      kind: d.kind,
-      namespace: d.metadata.namespace,
-    }])).values()
-    return [...uniques]
+    return uniqueTypes(docs.map(({ apiVersion, kind, metadata: { namespace } }) => ({ apiVersion, kind, namespace })))
   }
 
   return {
     list,
     gatherTypes,
+    uniqueTypes,
     apply: apply({ client, wrap }),
     waiter: (watcher: k8s.Watch) => waiter({ watcher, client }),
   }
diff --git a/packages/driver-kube-pod/src/driver/client/dynamic/wait.ts b/packages/driver-kube-pod/src/driver/client/dynamic/wait.ts
index 552c5859..36e502b5 100644
--- a/packages/driver-kube-pod/src/driver/client/dynamic/wait.ts
+++ b/packages/driver-kube-pod/src/driver/client/dynamic/wait.ts
@@ -10,11 +10,11 @@ const waiter = ({ watcher, client }: { watcher: k8s.Watch; client: k8s.Kubernete
     eventPredicate: (phase: string, apiObj: T) => boolean = () => true,
   ) => {
     const path = new URL(await urlGetter(o)).pathname
-    const { name, namespace } = ensureDefined(extractDefined(o, 'metadata'), 'name', 'namespace')
+    const { name, namespace } = ensureDefined(extractDefined(o, 'metadata'), 'name') // namespace can be undefined
     let abort: () => void
     return await new Promise<T>(resolve => {
       void watcher.watch(path, {}, (phase, apiObj: T) => {
-        const metadata = ensureDefined(extractDefined(apiObj, 'metadata'), 'name')
+        const metadata = ensureDefined(extractDefined(apiObj, 'metadata'), 'name') // namespace can be undefined
         if (
           metadata.name === name
           && metadata.namespace === namespace
diff --git a/packages/driver-kube-pod/src/driver/client/index.ts b/packages/driver-kube-pod/src/driver/client/index.ts
index 21b4ad18..1427d26a 100644
--- a/packages/driver-kube-pod/src/driver/client/index.ts
+++ b/packages/driver-kube-pod/src/driver/client/index.ts
@@ -9,9 +9,13 @@ import { maxBy } from 'lodash-es'
 import { inspect } from 'util'
 import { Logger } from '@preevy/core'
 import baseExec from './exec/index.js'
-import dynamicApi, { ApplyFilter, applyStrategies, compositeApplyFilter } from './dynamic/index.js'
+import dynamicApi, { ApplyFilter, KubernetesType, applyStrategies, compositeApplyFilter } from './dynamic/index.js'
 import basePortForward from './port-forward.js'
-import k8sHelpers from './k8s-helpers.js'
+import {
+  podHelpers as createPodHelpers,
+  appsV1ApiHelpers as createAppsV1ApiHelpers,
+  coreV1ApiHelpers as createCoreV1ApiHelpers,
+} from './k8s-helpers.js'
 import {
   LABELS,
   addEnvMetadata,
@@ -23,15 +27,16 @@ import {
   extractTemplateHash,
   extractCreatedAt,
   extractName,
-  isDockerHostDeployment,
-  ANNOTATIONS,
+  isDockerHostStatefulSet,
+  addAllTypesAnnotation,
+  readAllTypesAnnotation,
 } from './metadata.js'
 import { Package } from './common.js'
 import { logError } from './log-error.js'
 
 const stringify = stringifyModule.default
 
-export const loadKubeConfig = (kubeconfig?: string, context?: string) => {
+export const loadKubeConfig = ({ kubeconfig, context }: { kubeconfig?: string; context?: string }) => {
   const kc = new k8s.KubeConfig()
   if (kubeconfig) {
     kc.loadFromFile(kubeconfig)
@@ -44,86 +49,228 @@ export const loadKubeConfig = (kubeconfig?: string, context?: string) => {
   return kc
 }
 
-export class DuplicateDockerHostDeployment extends Error {
+export class DuplicateDockerHostStatefulSet extends Error {
   constructor(readonly dups: [k8s.KubernetesObject, k8s.KubernetesObject]) {
-    super(`Duplicate Docker host Deployments found: ${inspect(dups)}`)
+    super(`Duplicate Docker host StatefulSets found: ${inspect(dups)}`)
   }
 }
 
-const ensureSingleDockerHostDeployment = (): ApplyFilter => {
-  let deployment: k8s.KubernetesObject
+const ensureSingleDockerHostStatefulSet = (
+  transform: (o: k8s.V1StatefulSet) => k8s.V1StatefulSet = o => o,
+): ApplyFilter => {
+  let statefulSet: k8s.KubernetesObject
   return s => {
-    if (isDockerHostDeployment(s)) {
-      if (deployment) {
-        throw new DuplicateDockerHostDeployment([deployment, s])
+    if (isDockerHostStatefulSet(s)) {
+      if (statefulSet) {
+        throw new DuplicateDockerHostStatefulSet([statefulSet, s])
       }
-      deployment = s
+      statefulSet = transform(s)
     }
     return s
   }
 }
 
-const kubeClient = ({ log, namespace, kc, profileId, template, package: packageDetails, kubeconfig }: {
+export const kubeClient = ({
+  log,
+  namespace,
+  kc,
+  profileId,
+  kubeconfig,
+}: {
   log: Logger
   kc: k8s.KubeConfig
   kubeconfig?: string
   namespace: string
   profileId: string
+}) => {
+  const wrap = logError(log)
+  const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
+  const k8sAppsApi = kc.makeApiClient(k8s.AppsV1Api)
+
+  const podHelpers = createPodHelpers({ k8sApi, k8sAppsApi, wrap })
+  const appsV1ApiHelpers = createAppsV1ApiHelpers(k8sAppsApi, { wrap })
+  const coreV1ApiHelpers = createCoreV1ApiHelpers(k8sApi, { wrap })
+
+  const listEnvStatefulSets = (
+    envId: string,
+    deleted?: boolean,
+  ) => appsV1ApiHelpers.listStatefulSets({
+    namespace,
+    ...envSelector({ profileId, envId, deleted, dockerHost: true }),
+  })
+
+  const listEnvDeployments = (
+    envId: string,
+    deleted?: boolean,
+  ) => appsV1ApiHelpers.listDeployments({
+    namespace,
+    ...envSelector({ profileId, envId, deleted, dockerHost: true }),
+  })
+
+  const findMostRecentDeployment = async ({ envId, deleted }: {
+    envId: string
+    deleted?: boolean
+  }): Promise<k8s.V1Deployment | undefined> => maxBy(
+    await asyncToArray(listEnvDeployments(envId, deleted)),
+    extractCreatedAt,
+  )
+
+  const findMostRecentStatefulSet = async ({ envId, deleted }: {
+    envId: string
+    deleted?: boolean
+  }): Promise<k8s.V1StatefulSet | undefined> => maxBy(
+    await asyncToArray(listEnvStatefulSets(envId, deleted)),
+    extractCreatedAt,
+  )
+
+  const findEnvObject = async (args: {
+    envId: string
+    deleted?: boolean
+  }): Promise<k8s.V1Deployment | k8s.V1StatefulSet | undefined> => await findMostRecentStatefulSet(args)
+    ?? await findMostRecentDeployment(args)
+
+  const portForward = async (
+    statefulSet: k8s.V1StatefulSet,
+    targetPort: number,
+    listenAddress: number | string | ListenOptions,
+  ) => {
+    const forward = new k8s.PortForward(kc)
+    const pod = await podHelpers.findReadyPodForStatefulSet(statefulSet)
+    const podName = extractName(pod)
+    return await basePortForward({ namespace, forward, log })(podName, targetPort, listenAddress)
+  }
+
+  const apiServiceClusterAddress = async (): Promise<[string, number] | undefined> => {
+    const service = await asyncFirst(coreV1ApiHelpers.listServices({
+      namespace: 'default',
+      fieldSelector: 'metadata.name=kubernetes',
+    }))
+    const [host, port] = [service?.spec?.clusterIP, service?.spec?.ports?.[0]?.port]
+    if (host === undefined || port === undefined) {
+      return undefined
+    }
+    return [host, port]
+  }
+
+  return {
+    findEnvObject,
+    findMostRecentDeployment,
+    findMostRecentStatefulSet,
+    listProfileStatefulSets: () => appsV1ApiHelpers.listStatefulSets({ namespace, ...profileSelector({ profileId }) }),
+    listProfileDeployments: () => appsV1ApiHelpers.listDeployments({ namespace, ...profileSelector({ profileId }) }),
+    exec: baseExec({ kubeConfig: kc, kubeconfigLocation: kubeconfig, namespace, log }),
+    findReadyPod: podHelpers.findReadyPod,
+    portForward,
+    apiServiceClusterAddress,
+  }
+}
+
+export type Client = ReturnType<typeof kubeClient>
+
+export const kubeCreationClient = ({
+  log,
+  namespace,
+  kc,
+  profileId,
+  template,
+  package: packageDetails,
+  storageClass,
+  storageSize,
+}: {
+  log: Logger
+  kc: k8s.KubeConfig
+  namespace: string
+  profileId: string
   template: Buffer | string | Promise<Buffer | string>
   package: Package | Promise<Package>
+  storageClass: string | undefined
+  storageSize: number
 }) => {
   const wrap = logError(log)
-  const k8sApi = kc.makeApiClient(k8s.CoreV1Api)
   const k8sAppsApi = kc.makeApiClient(k8s.AppsV1Api)
   const k8sObjApi = kc.makeApiClient(k8s.KubernetesObjectApi)
   const watcher = new k8s.Watch(kc)
 
-  const helpers = k8sHelpers({ k8sApi, k8sAppsApi, wrap })
+  const appsV1ApiHelpers = createAppsV1ApiHelpers(k8sAppsApi, { wrap })
 
-  const { apply, gatherTypes, list: dynamicList, waiter } = dynamicApi({ client: k8sObjApi, wrap })
+  const { apply, gatherTypes, uniqueTypes, list: dynamicList, waiter } = dynamicApi({ client: k8sObjApi, wrap })
 
   const renderTemplate = async ({ instance }: { instance: string }) => {
     const specsStr = nunjucks.renderString((await template).toString('utf-8'), {
       id: instance,
       namespace,
+      storageClass,
+      storageSize,
+      labels: {
+        [LABELS.PROFILE_ID]: profileId,
+        [LABELS.INSTANCE]: instance,
+      },
     })
     return yaml.parseAllDocuments(specsStr).map(d => d.toJS() as k8s.KubernetesObject)
   }
 
-  const calcTemplateHash = async ({ instance }: { instance: string }) => `sha1:${createHash('sha1').update(
-    stringify.stableStringify(await renderTemplate({ instance }))
+  const calcTemplateHash = async (
+    args: { instance: string } | { specs: k8s.KubernetesObject[] },
+  ) => `sha1:${createHash('sha1').update(
+    stringify.stableStringify('instance' in args ? await renderTemplate(args) : args.specs)
   ).digest('base64')}`
 
-  const listInstanceObjects = async (instance: string) => dynamicList(
-    gatherTypes(...await renderTemplate({ instance: '' })),
-    { ...instanceSelector({ instance }) },
-  )
+  const findInstanceStatefulSet = async (instance: string) => {
+    const statefulSet = await asyncFirst(appsV1ApiHelpers.listStatefulSets({
+      namespace,
+      ...instanceSelector({ instance }),
+    }))
 
-  const findInstanceDeployment = async (instance: string) => {
-    const deployment = await asyncFirst(helpers.listDeployments({ namespace, ...instanceSelector({ instance }) }))
-    if (!deployment) {
-      throw new Error(`Cannot find deployment with label "${LABELS.INSTANCE}": "${instance}" in namespace "${namespace}"`)
+    if (!statefulSet) {
+      throw new Error(`Cannot find StatefulSet with label "${LABELS.INSTANCE}": "${instance}" in namespace "${namespace}"`)
     }
-    return deployment
+    return statefulSet
   }
 
+  const findInstanceAllTypesFromMetadata = async (instance: string): Promise<KubernetesType[]> => {
+    const statefulSet = await findInstanceStatefulSet(instance).catch(() => undefined)
+    const allTypes = statefulSet ? readAllTypesAnnotation(statefulSet) : undefined
+    return (allTypes ?? []).map(t => ({ ...t, namespace }))
+  }
+
+  const findInstanceAllTypes = async (instance: string): Promise<KubernetesType[]> => uniqueTypes([
+    ...await findInstanceAllTypesFromMetadata(instance),
+    ...gatherTypes(...await renderTemplate({ instance: '' })),
+    { kind: 'Deployment', apiVersion: 'apps/v1', namespace }, // backwards compatibility with Deployment-based envs
+  ])
+
+  const listInstanceObjects = async (
+    instance: string,
+  ) => dynamicList(
+    await findInstanceAllTypes(instance),
+    { ...instanceSelector({ instance }) },
+  )
+
   const createEnv = async (
     envId: string,
     { serverSideApply }: { serverSideApply: boolean },
   ) => {
     const instance = envRandomName({ envId, profileId })
     const specs = await renderTemplate({ instance })
+    const templateTypes = gatherTypes(...specs)
+      .map(({ kind, apiVersion, namespace: ns }) => ({ kind, apiVersion, namespaced: ns !== undefined }))
+
+    const allTypes = uniqueTypes([
+      { kind: 'PersistentVolumeClaim', apiVersion: 'v1', namespace }, // PVC associated with StatefulSet (they are not part of the template)
+      ...templateTypes,
+    ])
+
     log.debug('createEnv: apply', instance, inspect(specs, { depth: null }))
     await apply(specs, {
       filter: compositeApplyFilter(
-        ensureSingleDockerHostDeployment(),
+        ensureSingleDockerHostStatefulSet(s => addAllTypesAnnotation(s, allTypes)),
         addEnvMetadata({
           profileId,
           envId,
           createdAt: new Date(),
           instance,
           package: await packageDetails,
-          templateHash: await calcTemplateHash({ instance }),
+          templateHash: await calcTemplateHash({ specs }),
         })
       ),
       strategy: serverSideApply
@@ -131,17 +278,12 @@ const kubeClient = ({ log, namespace, kc, profileId, template, package: packageD
         : applyStrategies.clientSideApply,
     })
 
-    log.debug('createEnv: findInstanceDeployment', instance)
-    const deployment = await findInstanceDeployment(instance)
-
-    // objects returned by the list API missing 'kind' and 'apiVersion' props
-    // https://github.com/kubernetes/kubernetes/issues/3030
-    deployment.kind ??= deployment.metadata?.annotations?.[ANNOTATIONS.KUBERNETES_KIND] ?? 'Deployment'
-    deployment.apiVersion ??= deployment.metadata?.annotations?.[ANNOTATIONS.KUERBETES_API_VERSION] ?? 'apps/v1'
+    log.debug('createEnv: findInstanceStatefulSet', instance)
+    const statefulSet = await findInstanceStatefulSet(instance)
 
     return await waiter(watcher).waitForEvent(
-      deployment,
-      (_phase, d) => Boolean(d.status?.conditions?.some(({ type, status }) => type === 'Available' && status === 'True')),
+      statefulSet,
+      (_phase, ss) => ss.status?.readyReplicas === ss.spec?.replicas,
     )
   }
 
@@ -157,62 +299,15 @@ const kubeClient = ({ log, namespace, kc, profileId, template, package: packageD
     await apply(objects, { strategy: wait ? applyStrategies.deleteAndWait(watcher) : applyStrategies.delete })
   }
 
-  const listEnvDeployments = (
-    envId: string,
-    deleted?: boolean,
-  ) => helpers.listDeployments({
-    namespace,
-    ...envSelector({ profileId, envId, deleted, dockerHost: true }),
-  })
-
-  const findMostRecentDeployment = async ({ envId, deleted }: {
-    envId: string
-    deleted?: boolean
-  }): Promise<k8s.V1Deployment | undefined> => maxBy(
-    await asyncToArray(listEnvDeployments(envId, deleted)),
-    extractCreatedAt,
-  )
-
-  const portForward = async (
-    deployment: k8s.V1Deployment,
-    targetPort: number,
-    listenAddress: number | string | ListenOptions,
-  ) => {
-    const forward = new k8s.PortForward(kc)
-    const pod = await helpers.findReadyPodForDeployment(deployment)
-    const podName = extractName(pod)
-    return await basePortForward({ namespace, forward, log })(podName, targetPort, listenAddress)
-  }
-
-  const apiServiceClusterAddress = async (): Promise<[string, number] | undefined> => {
-    const service = await asyncFirst(helpers.listServices({
-      namespace: 'default',
-      fieldSelector: 'metadata.name=kubernetes',
-    }))
-    const [host, port] = [service?.spec?.clusterIP, service?.spec?.ports?.[0]?.port]
-    if (host === undefined || port === undefined) {
-      return undefined
-    }
-    return [host, port]
-  }
-
   return {
-    findMostRecentDeployment,
-    listProfileDeployments: () => helpers.listDeployments({ namespace, ...profileSelector({ profileId }) }),
-    exec: baseExec({ kubeConfig: kc, kubeconfigLocation: kubeconfig, namespace, log }),
-    findReadyPodForDeployment: helpers.findReadyPodForDeployment,
     createEnv,
     deleteEnv,
-    portForward,
     extractTemplateHash,
     calcTemplateHash,
-    apiServiceClusterAddress,
   }
 }
 
-export type Client = ReturnType<typeof kubeClient>
+export type CreationClient = ReturnType<typeof kubeCreationClient>
 
 export { extractInstance, extractEnvId, extractName, extractNamespace, extractTemplateHash } from './metadata.js'
 export { DeploymentNotReadyError, DeploymentNotReadyErrorReason } from './k8s-helpers.js'
-
-export default kubeClient
diff --git a/packages/driver-kube-pod/src/driver/client/k8s-helpers.ts b/packages/driver-kube-pod/src/driver/client/k8s-helpers.ts
index f266dbce..1e032dd4 100644
--- a/packages/driver-kube-pod/src/driver/client/k8s-helpers.ts
+++ b/packages/driver-kube-pod/src/driver/client/k8s-helpers.ts
@@ -1,8 +1,9 @@
 import * as k8s from '@kubernetes/client-node'
 import { ensureDefined, extractDefined } from '@preevy/core'
-import { asyncFilter, asyncFirst, asyncToArray } from 'iter-tools-es'
+import { asyncFilter, asyncFind, asyncFirst, asyncMap, asyncToArray } from 'iter-tools-es'
 import { paginationIterator } from './pagination.js'
 import { FuncWrapper } from './log-error.js'
+import { ANNOTATIONS, extractNameAndNamespace } from './metadata.js'
 
 export type DeploymentNotReadyErrorReason = 'NoRevision' | 'NoReplicaSet' | 'NoReadyPod'
 export class DeploymentNotReadyError extends Error {
@@ -11,24 +12,86 @@ export class DeploymentNotReadyError extends Error {
   }
 }
 
-export default (
-  { k8sAppsApi, k8sApi, wrap }: {
-    k8sAppsApi: k8s.AppsV1Api
-    k8sApi: k8s.CoreV1Api
-    wrap: FuncWrapper
+export class StatefulSetNotReadyError extends Error {
+  constructor(statefulSet: Pick<k8s.V1StatefulSet, 'metadata'>) {
+    super(`No ready pod found for StatefulSet "${statefulSet.metadata?.namespace}/${statefulSet.metadata?.name}"`)
   }
+}
+
+const readyPodPredicate = (p: k8s.V1Pod) => Boolean(p.status?.conditions?.some(c => c.type === 'Ready' && c.status === 'True'))
+
+// objects returned by the list API missing 'kind' and 'apiVersion' props
+// https://github.com/kubernetes/kubernetes/issues/3030
+const completeMissingListFields = <T extends k8s.KubernetesObject>(
+  { kind, apiVersion }: { kind: string; apiVersion: string },
+) => (o: T) => {
+    o.kind ??= o.metadata?.annotations?.[ANNOTATIONS.KUBERNETES_KIND] ?? kind
+    o.apiVersion ??= o.metadata?.annotations?.[ANNOTATIONS.KUERBETES_API_VERSION] ?? apiVersion
+    return o
+  }
+
+const completeMissingListFieldsAsyncIter = <T extends k8s.KubernetesObject>(
+  fields: { kind: string; apiVersion: string },
+  asyncIterator: AsyncIterable<T>,
+) => asyncMap(
+    completeMissingListFields(fields),
+    asyncIterator,
+  )
+
+export const storageV1ApiHelpers = (
+  storageApi: k8s.StorageV1Api,
+  { wrap = f => f }: { wrap?: FuncWrapper } = {}
 ) => {
-  const listDeployments = (
+  const listStorageClasses = (
+    { fieldSelector, labelSelector, resourceVersion, timeoutSeconds, watch }: {
+      fieldSelector?: string
+      labelSelector?: string
+      resourceVersion?: string
+      timeoutSeconds?: number
+      watch?: boolean
+    },
+  ) => completeMissingListFieldsAsyncIter(
+    { kind: 'StorageClass', apiVersion: 'storage.k8s.io/v1' },
+    paginationIterator<k8s.V1StorageClass>(
+      wrap(continueToken => storageApi.listStorageClass(
+        undefined,
+        undefined,
+        continueToken,
+        fieldSelector,
+        labelSelector,
+        undefined,
+        resourceVersion,
+        undefined,
+        timeoutSeconds,
+        watch,
+      )),
+    )
+  )
+
+  const findDefaultStorageClass = (storageClasses: k8s.V1StorageClass[]) => {
+    const defaultStorageClass = storageClasses.find(sc => sc?.metadata?.annotations?.['storageclass.kubernetes.io/is-default-class'] === 'true')
+    return defaultStorageClass
+  }
+
+  return { listStorageClasses, findDefaultStorageClass }
+}
+
+export const coreV1ApiHelpers = (
+  k8sApi: k8s.CoreV1Api,
+  { wrap = f => f }: { wrap?: FuncWrapper } = {}
+) => {
+  const listPods = (
     { namespace, fieldSelector, labelSelector, resourceVersion, timeoutSeconds, watch }: {
       namespace: string
+      namespaceOverride?: string
       fieldSelector?: string
       labelSelector?: string
       resourceVersion?: string
       timeoutSeconds?: number
       watch?: boolean
     },
-  ) => paginationIterator<k8s.V1Deployment>(
-    wrap(continueToken => k8sAppsApi.listNamespacedDeployment(
+  ) => completeMissingListFieldsAsyncIter({ kind: 'Pod', apiVersion: 'v1' }, paginationIterator<k8s.V1Pod>(
+    wrap(continueToken => k8sApi.listNamespacedPod(
       namespace,
       undefined,
       undefined,
@@ -41,9 +104,9 @@ export default (
       timeoutSeconds,
       watch,
     )),
-  )
+  ))
 
-  const listReplicaSets = (
+  const listServices = (
     { namespace, fieldSelector, labelSelector, resourceVersion, timeoutSeconds, watch }: {
       namespace: string
       fieldSelector?: string
@@ -52,8 +115,8 @@ export default (
       timeoutSeconds?: number
       watch?: boolean
     },
-  ) => paginationIterator<k8s.V1ReplicaSet>(
-    wrap(continueToken => k8sAppsApi.listNamespacedReplicaSet(
+  ) => completeMissingListFieldsAsyncIter({ kind: 'Service', apiVersion: 'v1' }, paginationIterator<k8s.V1Service>(
+    wrap(continueToken => k8sApi.listNamespacedService(
       namespace,
       undefined,
       undefined,
@@ -66,20 +129,29 @@ export default (
       timeoutSeconds,
       watch,
     )),
-  )
+  ))
 
-  const listPods = (
+  return {
+    listPods,
+    listServices,
+  }
+}
+
+export const appsV1ApiHelpers = (
+  k8sAppsApi: k8s.AppsV1Api,
+  { wrap = f => f }: { wrap?: FuncWrapper } = {}
+) => {
+  const listStatefulSets = (
     { namespace, fieldSelector, labelSelector, resourceVersion, timeoutSeconds, watch }: {
       namespace: string
-      namespaceOverride?: string
       fieldSelector?: string
       labelSelector?: string
       resourceVersion?: string
       timeoutSeconds?: number
       watch?: boolean
     },
-  ) => paginationIterator<k8s.V1Pod>(
-    wrap(continueToken => k8sApi.listNamespacedPod(
+  ) => completeMissingListFieldsAsyncIter({ kind: 'StatefulSet', apiVersion: 'apps/v1' }, paginationIterator<k8s.V1StatefulSet>(
+    wrap(continueToken => k8sAppsApi.listNamespacedStatefulSet(
       namespace,
       undefined,
       undefined,
@@ -92,9 +164,9 @@ export default (
       timeoutSeconds,
       watch,
     )),
-  )
+  ))
 
-  const listServices = (
+  const listDeployments = (
     { namespace, fieldSelector, labelSelector, resourceVersion, timeoutSeconds, watch }: {
       namespace: string
       fieldSelector?: string
@@ -103,8 +175,8 @@ export default (
       timeoutSeconds?: number
       watch?: boolean
     },
-  ) => paginationIterator<k8s.V1Service>(
-    wrap(continueToken => k8sApi.listNamespacedService(
+  ) => completeMissingListFieldsAsyncIter({ kind: 'Deployment', apiVersion: 'apps/v1' }, paginationIterator<k8s.V1Deployment>(
+    wrap(continueToken => k8sAppsApi.listNamespacedDeployment(
       namespace,
       undefined,
       undefined,
@@ -117,10 +189,75 @@ export default (
       timeoutSeconds,
       watch,
     )),
-  )
+  ))
+
+  const listReplicaSets = (
+    { namespace, fieldSelector, labelSelector, resourceVersion, timeoutSeconds, watch }: {
+      namespace: string
+      fieldSelector?: string
+      labelSelector?: string
+      resourceVersion?: string
+      timeoutSeconds?: number
+      watch?: boolean
+    },
+  ) => completeMissingListFieldsAsyncIter({ kind: 'ReplicaSet', apiVersion: 'apps/v1' }, paginationIterator<k8s.V1ReplicaSet>(
+    wrap(continueToken => k8sAppsApi.listNamespacedReplicaSet(
+      namespace,
+      undefined,
+      undefined,
+      continueToken,
+      fieldSelector,
+      labelSelector,
+      undefined,
+      resourceVersion,
+      undefined,
+      timeoutSeconds,
+      watch,
+    )),
+  ))
+
+  return {
+    listStatefulSets,
+    listDeployments,
+    listReplicaSets,
+  }
+}
+
+export const podHelpers = (
+  { k8sAppsApi, k8sApi, wrap }: {
+    k8sAppsApi: k8s.AppsV1Api
+    k8sApi: k8s.CoreV1Api
+    wrap: FuncWrapper
+  }
+) => {
+  const { listPods } = coreV1ApiHelpers(k8sApi, { wrap })
+  const { listReplicaSets } = appsV1ApiHelpers(k8sAppsApi, { wrap })
+
+  const listPodsForStatefulSet = (ss: k8s.V1StatefulSet) => {
+    const { name, namespace } = extractNameAndNamespace(ss)
+    const { matchLabels } = ensureDefined(extractDefined(extractDefined(ss, 'spec'), 'selector'), 'matchLabels')
+    const labelSelector = Object.entries(matchLabels).map(([k, v]) => `${k}=${v}`).join(',')
+    return asyncFilter<k8s.V1Pod>(
+      pod => Boolean(pod.metadata?.ownerReferences?.some(ref => ref.kind === 'StatefulSet' && ref.name === name)),
+      listPods({
+        namespace: namespace || '',
+        labelSelector,
+      }),
+    )
+  }
+
+  const findReadyPodForStatefulSet = async (
+    ss: k8s.V1StatefulSet,
+  ) => {
+    const pod = await asyncFind(readyPodPredicate, listPodsForStatefulSet(ss))
+    if (!pod) {
+      throw new StatefulSetNotReadyError(ss)
+    }
+    return pod
+  }
 
   const findReplicaSetForDeployment = async (deployment: Pick<k8s.V1Deployment, 'metadata'>) => {
-    const { name, namespace, annotations } = ensureDefined(extractDefined(deployment, 'metadata'), 'name', 'annotations', 'labels')
+    const { name, namespace, annotations } = ensureDefined(extractDefined(deployment, 'metadata'), 'name', 'namespace', 'annotations', 'labels')
     const revision = annotations['deployment.kubernetes.io/revision']
     if (!revision) {
       throw new DeploymentNotReadyError(deployment, 'NoRevision')
@@ -151,18 +288,23 @@ export default (
 
   const findReadyPodForDeployment = async (deployment: Pick<k8s.V1Deployment, 'metadata'>) => {
     const allPods = await asyncToArray(await listPodsForDeployment(deployment))
-    const pod = allPods.find(p => p.status?.conditions?.some(c => c.type === 'Ready' && c.status === 'True'))
+    const pod = allPods.find(readyPodPredicate)
     if (!pod) {
       throw new DeploymentNotReadyError(deployment, 'NoReadyPod')
     }
     return pod
   }
 
+  const findReadyPod = async (obj: Pick<k8s.V1Deployment, 'metadata' | 'kind'> | k8s.V1StatefulSet) => {
+    if (obj.kind === 'Deployment') {
+      return await findReadyPodForDeployment(obj as k8s.V1Deployment)
+    }
+    return await findReadyPodForStatefulSet(obj as k8s.V1StatefulSet)
+  }
+
   return {
-    listDeployments,
-    listReplicaSets,
-    listPods,
+    findReadyPodForStatefulSet,
     findReadyPodForDeployment,
-    listServices,
+    findReadyPod,
   }
 }
diff --git a/packages/driver-kube-pod/src/driver/client/metadata.test.ts b/packages/driver-kube-pod/src/driver/client/metadata.test.ts
index 5f68e0e2..969fdf7e 100644
--- a/packages/driver-kube-pod/src/driver/client/metadata.test.ts
+++ b/packages/driver-kube-pod/src/driver/client/metadata.test.ts
@@ -32,7 +32,7 @@ describe('metadata', () => {
       const createName = () => envRandomName({ envId: repeat('a', 100), profileId: '1myprofile' })
       it('should truncate it to the correct length', () => {
         const name = createName()
-        expect(name).toHaveLength(53) // max name length with 10 chars spare
+        expect(name).toHaveLength(48) // max name length with 15 chars spare
         expect(name).toMatch(/^a1myprofile-a+-[a-z0-9]{5}$/)
       })
 
diff --git a/packages/driver-kube-pod/src/driver/client/metadata.ts b/packages/driver-kube-pod/src/driver/client/metadata.ts
index aa019e36..b5ff3069 100644
--- a/packages/driver-kube-pod/src/driver/client/metadata.ts
+++ b/packages/driver-kube-pod/src/driver/client/metadata.ts
@@ -1,7 +1,10 @@
 import * as k8s from '@kubernetes/client-node'
-import { extractDefined, randomString, truncatePrefix } from '@preevy/core'
+import { ensureDefined, extractDefined, randomString, truncatePrefix } from '@preevy/core'
+import { pick } from 'lodash-es'
+import { tryParseJson } from '@preevy/common'
 import { sanitizeLabel, sanitizeLabels } from './labels.js'
 import { HasMetadata, Package } from './common.js'
+import { KubernetesType } from './dynamic/index.js'
 
 export const MAX_LABEL_LENGTH = 63
 
@@ -27,6 +30,7 @@ export const ANNOTATIONS = {
   KUBERNETES_KIND: `${PREEVY_PREFIX}/kubernetes-kind`,
   KUERBETES_API_VERSION: `${PREEVY_PREFIX}/kubernetes-api-version`,
   DEPLOYMENT_REVISION: 'deployment.kubernetes.io/revision',
+  ALL_TYPES: `${PREEVY_PREFIX}/all-types`,
 }
 
 export const markObjectAsDeleted = (
@@ -44,6 +48,27 @@ export const markObjectAsDeleted = (
   },
 })
 
+export type StoredType = Pick<KubernetesType, 'apiVersion' | 'kind'>
+
+export const readAllTypesAnnotation = (
+  o: k8s.KubernetesObject,
+) => {
+  const an = o?.metadata?.annotations?.[ANNOTATIONS.ALL_TYPES]
+  return an ? tryParseJson(an) as StoredType[] : undefined
+}
+
+export const addAllTypesAnnotation = (
+  o: k8s.KubernetesObject,
+  types: StoredType[]
+): k8s.KubernetesObject => {
+  o.metadata ??= {}
+  o.metadata.annotations ??= {}
+  o.metadata.annotations[ANNOTATIONS.ALL_TYPES] = JSON.stringify(
+    types.map(t => pick<StoredType>(t, 'apiVersion', 'kind'))
+  )
+  return o
+}
+
 export const addEnvMetadata = (
   { profileId, envId, createdAt, instance, package: { name, version }, templateHash }: {
     profileId: string
@@ -96,6 +121,7 @@ export const extractInstance = (o: HasMetadata) => extractLabel(o, 'INSTANCE')
 export const extractEnvId = (o: HasMetadata) => extractAnnotation(o, 'ENV_ID')
 export const extractName = (o: HasMetadata) => extractDefined(extractDefined(o, 'metadata'), 'name')
 export const extractNamespace = (o: HasMetadata) => extractDefined(extractDefined(o, 'metadata'), 'namespace')
+export const extractNameAndNamespace = (o: HasMetadata) => ensureDefined(extractDefined(o, 'metadata'), 'namespace', 'name')
 export const extractTemplateHash = (o: HasMetadata) => extractAnnotation(o, 'TEMPLATE_HASH')
 export const extractCreatedAt = (o: HasMetadata) => extractAnnotation(o, 'CREATED_AT')
 
@@ -126,7 +152,7 @@ export const profileSelector = ({ profileId }: { profileId: string }) => ({
   labelSelector: eqSelector(LABELS.PROFILE_ID, sanitizeLabel(profileId)),
 })
 
-export const isDockerHostDeployment = (s: k8s.KubernetesObject) => s.kind === 'Deployment'
+export const isDockerHostStatefulSet = (s: k8s.KubernetesObject) => s.kind === 'StatefulSet'
     && s.metadata?.labels?.[LABELS.COMPONENT] === DOCKER_HOST_VALUE
 
 // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -136,7 +162,7 @@ const sanitizeName = (s: string) => s
   .replace(/^[^a-z]/, firstChar => `a${firstChar}`) // prefix with alphabetic if first char is not alphabetic
   .replace(/[^a-z0-9]$/, lastChar => `${lastChar}z`) // suffix with alphanumeric if last char is not alphanumeric
 
-const RANDOM_ID_SPARE_LENGTH = 10
+const RANDOM_ID_SPARE_LENGTH = 15 // give room for StatefulSet pod name suffix
 const MAX_NAME_LENGTH = 63
 
 export const envRandomName = (
diff --git a/packages/driver-kube-pod/src/driver/common.ts b/packages/driver-kube-pod/src/driver/common.ts
index df0d378f..131c446c 100644
--- a/packages/driver-kube-pod/src/driver/common.ts
+++ b/packages/driver-kube-pod/src/driver/common.ts
@@ -2,16 +2,21 @@ import * as k8s from '@kubernetes/client-node'
 import { MachineBase, machineResourceType } from '@preevy/core'
 import { extractEnvId, extractInstance, extractName, extractNamespace } from './client/index.js'
 
-export type DeploymentMachine = MachineBase & {
-  deployment: k8s.V1Deployment
+export type KubernetesMachine<T extends k8s.KubernetesObject> = MachineBase & {
+  kubernetesObject: T
 }
 
+export type StatefulSetMachine = KubernetesMachine<k8s.V1StatefulSet>
+export type DeploymentMachine = KubernetesMachine<k8s.V1Deployment>
+
 export type ResourceType = typeof machineResourceType
 
-export const machineFromDeployment = (deployment: k8s.V1Deployment): DeploymentMachine & { envId: string } => ({
-  type: machineResourceType,
-  providerId: extractInstance(deployment),
-  locationDescription: `deployment/${extractName(deployment)} of namespace ${extractNamespace(deployment)}`,
-  envId: extractEnvId(deployment),
-  deployment,
-})
+export const k8sObjectToMachine = <T extends k8s.KubernetesObject>(
+  kubernetesObject: T,
+): KubernetesMachine<T> => ({
+    type: machineResourceType,
+    providerId: extractInstance(kubernetesObject),
+    locationDescription: `${kubernetesObject.kind}/${extractName(kubernetesObject)} of namespace ${extractNamespace(kubernetesObject)}`,
+    envId: extractEnvId(kubernetesObject),
+    kubernetesObject,
+  })
diff --git a/packages/driver-kube-pod/src/driver/creation-driver.ts b/packages/driver-kube-pod/src/driver/creation-driver.ts
index 6f87af4f..b91fb17f 100644
--- a/packages/driver-kube-pod/src/driver/creation-driver.ts
+++ b/packages/driver-kube-pod/src/driver/creation-driver.ts
@@ -1,29 +1,48 @@
+import fs from 'fs'
+import * as k8s from '@kubernetes/client-node'
 import { Flags, Interfaces } from '@oclif/core'
-import { MachineCreationDriver, MachineCreationDriverFactory, telemetryEmitter } from '@preevy/core'
+import { Logger, MachineCreationDriver, MachineCreationDriverFactory, telemetryEmitter } from '@preevy/core'
 import { pick } from 'lodash-es'
-import { DeploymentMachine, machineFromDeployment } from './common.js'
-import { DriverContext, clientFromConfiguration, machineConnection, flags as machineDriverFlags } from './driver.js'
+import { DeploymentMachine, ResourceType, StatefulSetMachine, k8sObjectToMachine } from './common.js'
+import { clientFromConfiguration, listMachines, machineConnection, flags as machineDriverFlags } from './driver.js'
+import { Client, CreationClient, kubeCreationClient, loadKubeConfig } from './client/index.js'
+import { DEFAULT_TEMPLATE, packageJson } from '../static.js'
 
 export const flags = {
   ...machineDriverFlags,
+  template: Flags.string({
+    description: 'Path to custom resources template file (will use default template if not specified)',
+    required: false,
+  }),
   'server-side-apply': Flags.boolean({
     description: 'Use server side apply to create Kubernetes resources',
     default: true,
     required: false,
     allowNo: true,
   }),
+  'storage-class': Flags.string({
+    description: 'Storage class to use for Pod data volume',
+    required: false,
+  }),
+  'storage-size': Flags.custom<number>({
+    description: 'Size of Pod data volume in GiB',
+    required: false,
+    default: 5,
+    parse: async v => Number(v),
+  })(),
 } as const
 
 export type MachineCreationFlagTypes = Omit<Interfaces.InferredFlags<typeof flags>, 'json'>
 
-type MachineCreationDriverContext = DriverContext & {
-  serverSideApply: boolean
-  metadata: MachineCreationFlagTypes
-}
-
 const machineCreationDriver = (
-  { client, serverSideApply, log, metadata }: MachineCreationDriverContext,
-): MachineCreationDriver<DeploymentMachine> => ({
+  { client, creationClient, serverSideApply, log, metadata }: {
+    log: Logger
+    serverSideApply: boolean
+    metadata: MachineCreationFlagTypes
+    client: Client
+    creationClient: CreationClient
+  },
+): MachineCreationDriver<StatefulSetMachine | DeploymentMachine, ResourceType> => ({
   metadata,
   createMachine: async ({ envId }) => {
     const startTime = new Date().getTime()
@@ -33,8 +52,8 @@ const machineCreationDriver = (
       fromSnapshot: true,
       result: (async () => {
         log.debug('create machine', { envId, serverSideApply })
-        const deployment = await client.createEnv(envId, { serverSideApply })
-        const machine = machineFromDeployment(deployment)
+        const statefulSet = await creationClient.createEnv(envId, { serverSideApply })
+        const machine = k8sObjectToMachine(statefulSet)
         telemetryEmitter().capture('kube-pod create machine end', { elapsed_sec: (new Date().getTime() - startTime) / 1000 })
         const connection = await machineConnection(client, machine, log)
         return { machine, connection }
@@ -44,33 +63,66 @@ const machineCreationDriver = (
 
   ensureMachineSnapshot: async () => undefined,
   getMachineAndSpecDiff: async ({ envId }) => {
-    const deployment = await client.findMostRecentDeployment({ envId, deleted: false })
-    if (!deployment) {
+    const obj = await client.findEnvObject({ envId, deleted: false })
+    if (!obj) {
       return undefined
     }
 
-    const deploymentHash = client.extractTemplateHash(deployment)
-    const machine = machineFromDeployment(deployment)
-    const templateHash = await client.calcTemplateHash({ instance: machine.providerId })
+    const deployedHash = creationClient.extractTemplateHash(obj)
+    const machine = k8sObjectToMachine(obj)
+    const templateHash = await creationClient.calcTemplateHash({ instance: machine.providerId })
 
     return {
-      ...machineFromDeployment(deployment),
-      specDiff: deploymentHash !== templateHash
-        ? [{ name: 'template', old: deploymentHash, new: templateHash }]
+      ...machine,
+      specDiff: deployedHash !== templateHash
+        ? [{ name: 'template', old: deployedHash, new: templateHash }]
         : [],
     }
   },
+  listDeletableResources: () => listMachines({ client }),
+
+  deleteResources: async (wait, ...resources) => {
+    await Promise.all(resources.map(({ type, providerId }) => {
+      if (type === 'machine') {
+        return creationClient.deleteEnv(providerId, { wait })
+      }
+      throw new Error(`Unknown resource type: "${type}"`)
+    }))
+  },
+
 })
 
-export const factory: MachineCreationDriverFactory<
-  Interfaces.InferredFlags<typeof flags>,
-  DeploymentMachine
-> = ({ flags: f, profile: { id: profileId }, log, debug }) => machineCreationDriver({
-  metadata: pick(f, Object.keys(machineDriverFlags)) as MachineCreationFlagTypes, // filter out non-driver flags
+type FlagTypes = Omit<Interfaces.InferredFlags<typeof flags>, 'json'>
+
+const creationClientFromConfiguration = ({ flags: f, profileId, log, kc }: {
+  flags: FlagTypes
+  profileId: string
+  log: Logger
+  kc: k8s.KubeConfig
+}) => kubeCreationClient({
   log,
-  debug,
-  client: clientFromConfiguration({ log, flags: f, profileId }),
-  serverSideApply: f['server-side-apply'],
+  namespace: f.namespace,
+  kc,
+  profileId,
+  package: packageJson,
+  template: fs.readFileSync(f.template || DEFAULT_TEMPLATE, 'utf-8'),
+  storageClass: f['storage-class'],
+  storageSize: f['storage-size'],
 })
 
+export const factory: MachineCreationDriverFactory<
+  Interfaces.InferredFlags<typeof flags>,
+  StatefulSetMachine | DeploymentMachine,
+  ResourceType
+> = ({ flags: f, profile: { id: profileId }, log }) => {
+  const kc = loadKubeConfig(f)
+  return machineCreationDriver({
+    metadata: pick(f, Object.keys(machineDriverFlags)) as MachineCreationFlagTypes, // filter out non-driver flags
+    log,
+    client: clientFromConfiguration({ log, flags: f, profileId, kc }),
+    serverSideApply: f['server-side-apply'],
+    creationClient: creationClientFromConfiguration({ log, flags: f, profileId, kc }),
+  })
+}
+
 export default machineCreationDriver
diff --git a/packages/driver-kube-pod/src/driver/driver.ts b/packages/driver-kube-pod/src/driver/driver.ts
index bf7af51b..1640b084 100644
--- a/packages/driver-kube-pod/src/driver/driver.ts
+++ b/packages/driver-kube-pod/src/driver/driver.ts
@@ -1,4 +1,4 @@
-import fs from 'fs'
+import * as k8s from '@kubernetes/client-node'
 import { Flags, Interfaces } from '@oclif/core'
 import {
   MachineDriver,
@@ -10,13 +10,12 @@ import {
   Logger,
   MachineConnection,
 } from '@preevy/core'
-import { asyncMap } from 'iter-tools-es'
+import { asyncConcat, asyncMap } from 'iter-tools-es'
 import { AddressInfo } from 'net'
 import { Readable, Writable } from 'stream'
 import { orderedOutput } from '@preevy/common'
-import { DeploymentMachine, ResourceType, machineFromDeployment } from './common.js'
-import createClient, { Client, extractName, loadKubeConfig } from './client/index.js'
-import { DEFAULT_TEMPLATE, packageJson } from '../static.js'
+import { StatefulSetMachine, k8sObjectToMachine, DeploymentMachine } from './common.js'
+import { Client, extractName, loadKubeConfig, kubeClient as createClient } from './client/index.js'
 
 export type DriverContext = {
   log: Logger
@@ -28,12 +27,12 @@ const isTTY = (s: Readable | Writable) => (s as unknown as { isTTY: boolean }).i
 
 export const machineConnection = async (
   client: Client,
-  machine: DeploymentMachine,
+  machine: StatefulSetMachine,
   log: Logger,
 ): Promise<MachineConnection> => {
-  const { deployment } = machine as DeploymentMachine
-  log.debug(`Connecting to deployment "${deployment.metadata?.namespace}/${deployment.metadata?.name}"`)
-  const pod = await client.findReadyPodForDeployment(deployment)
+  const { kubernetesObject: statefulSet } = machine as StatefulSetMachine
+  log.debug(`Connecting to statefulset "${statefulSet.metadata?.namespace}/${statefulSet.metadata?.name}"`)
+  const pod = await client.findReadyPod(statefulSet)
   log.debug(`Found pod "${pod.metadata?.name}"`)
 
   return ({
@@ -59,7 +58,7 @@ export const machineConnection = async (
       const {
         localSocket,
         [Symbol.asyncDispose]: dispose,
-      } = await client.portForward(deployment, 2375, { host, port: 0 })
+      } = await client.portForward(statefulSet, 2375, { host, port: 0 })
 
       return {
         address: { host, port: (localSocket as AddressInfo).port },
@@ -69,79 +68,74 @@ export const machineConnection = async (
   })
 }
 
+export const listMachines = (
+  { client }: { client: Client },
+): AsyncIterableIterator<StatefulSetMachine | DeploymentMachine> => asyncMap(
+  k8sObjectToMachine,
+  asyncConcat<k8s.V1StatefulSet | k8s.V1Deployment>(
+    client.listProfileStatefulSets(),
+    client.listProfileDeployments(),
+  ),
+)
+
 const machineDriver = (
   { client, log }: DriverContext,
-): MachineDriver<DeploymentMachine, ResourceType> => {
-  const listMachines = () => asyncMap(machineFromDeployment, client.listProfileDeployments())
-
-  return ({
-    friendlyName: 'Kubernetes single Pod',
-
-    getMachine: async ({ envId }) => {
-      const deployment = await client.findMostRecentDeployment({ envId, deleted: false })
-      return deployment && machineFromDeployment(deployment)
-    },
-
-    listMachines,
-    listDeletableResources: listMachines,
-
-    deleteResources: async (wait, ...resources) => {
-      await Promise.all(resources.map(({ type, providerId }) => {
-        if (type === 'machine') {
-          return client.deleteEnv(providerId, { wait })
-        }
-        throw new Error(`Unknown resource type: "${type}"`)
-      }))
-    },
-
-    resourcePlurals: {},
-
-    spawnRemoteCommand: async (machine, command, stdio) => {
-      const pod = await client.findReadyPodForDeployment((machine as DeploymentMachine).deployment)
-      const { stdin, stdout, stderr } = expandStdioOptions(stdio)
-      const opts = {
-        pod: extractName(pod),
-        container: pod.spec?.containers[0]?.name as string,
-        command: command.length > 0 ? command : ['sh'],
-        tty: [stdin, stdout, stderr].every(isTTY),
-        stdin,
-        stdout,
-        stderr,
-      }
-      return await client.exec(opts)
-    },
-
-    connect: machine => machineConnection(client, machine as DeploymentMachine, log),
-
-    machineStatusCommand: async machine => {
-      const pod = await client.findReadyPodForDeployment((machine as DeploymentMachine).deployment)
-      const apiServiceAddress = await client.apiServiceClusterAddress()
-      if (!apiServiceAddress) {
-        log.warn('API service not found for cluster')
-        return undefined
-      }
-      const [apiServiceHost, apiServicePort] = apiServiceAddress
-
-      return ({
-        contentType: 'application/vnd.kubectl-top-pod-containers',
-        recipe: {
-          type: 'docker',
-          command: ['top', 'pod', '--containers', '--no-headers', extractName(pod)],
-          image: 'rancher/kubectl:v1.26.7',
-          network: 'host',
-          tty: false,
-          env: {
-            KUBERNETES_SERVICE_HOST: apiServiceHost,
-            KUBERNETES_SERVICE_PORT: apiServicePort.toString(),
-          },
-          bindMounts: [
-            '/var/run/secrets/kubernetes.io/serviceaccount:/var/run/secrets/kubernetes.io/serviceaccount',
-          ],
+): MachineDriver<StatefulSetMachine | DeploymentMachine> => ({
+  friendlyName: 'Kubernetes single Pod',
+
+  getMachine: async ({ envId }) => {
+    const obj = await client.findEnvObject({ envId, deleted: false })
+    return obj && k8sObjectToMachine(obj)
+  },
+
+  listMachines: () => listMachines({ client }),
+  resourcePlurals: {},
+
+  spawnRemoteCommand: async (machine, command, stdio) => {
+    const pod = await client.findReadyPod((machine as StatefulSetMachine | DeploymentMachine).kubernetesObject)
+    const { stdin, stdout, stderr } = expandStdioOptions(stdio)
+    const opts = {
+      pod: extractName(pod),
+      container: pod.spec?.containers[0]?.name as string,
+      command: command.length > 0 ? command : ['sh'],
+      tty: [stdin, stdout, stderr].every(isTTY),
+      stdin,
+      stdout,
+      stderr,
+    }
+    return await client.exec(opts)
+  },
+
+  connect: machine => machineConnection(client, machine as StatefulSetMachine, log),
+
+  machineStatusCommand: async machine => {
+    const pod = await client.findReadyPod((machine as StatefulSetMachine | DeploymentMachine).kubernetesObject)
+    const apiServiceAddress = await client.apiServiceClusterAddress()
+    if (!apiServiceAddress) {
+      log.warn('API service not found for cluster')
+      return undefined
+    }
+    const [apiServiceHost, apiServicePort] = apiServiceAddress
+
+    return ({
+      contentType: 'application/vnd.kubectl-top-pod-containers',
+      recipe: {
+        type: 'docker',
+        command: ['top', 'pod', '--containers', '--no-headers', extractName(pod)],
+        image: 'rancher/kubectl:v1.26.7',
+        network: 'host',
+        tty: false,
+        env: {
+          KUBERNETES_SERVICE_HOST: apiServiceHost,
+          KUBERNETES_SERVICE_PORT: apiServicePort.toString(),
         },
-      })
-    },
-  })
-}
+        bindMounts: [
+          '/var/run/secrets/kubernetes.io/serviceaccount:/var/run/secrets/kubernetes.io/serviceaccount',
+        ],
+      },
+    })
+  },
+})
 
 export const flags = {
   namespace: Flags.string({
@@ -159,34 +153,28 @@ export const flags = {
     required: false,
     env: 'KUBE_CONTEXT',
   }),
-  template: Flags.string({
-    description: 'Path to custom resources template file (will use default template if not specified)',
-    required: false,
-  }),
 } as const
 
 type FlagTypes = Omit<Interfaces.InferredFlags<typeof flags>, 'json'>
 
-export const clientFromConfiguration = ({ flags: f, profileId, log }: {
-  flags: Pick<FlagTypes, 'namespace' | 'kubeconfig' | 'template' | 'context'>
+export const clientFromConfiguration = ({ flags: f, profileId, log, kc }: {
+  flags: Pick<FlagTypes, 'namespace' | 'kubeconfig' | 'context'>
   profileId: string
   log: Logger
+  kc: k8s.KubeConfig
 }) => createClient({
   log,
   namespace: f.namespace,
-  kc: loadKubeConfig(f.kubeconfig, f.context),
+  kc,
   kubeconfig: f.kubeconfig,
   profileId,
-  package: packageJson,
-  template: fs.readFileSync(f.template || DEFAULT_TEMPLATE, 'utf-8'),
 })
 
 export const factory: MachineDriverFactory<
   FlagTypes,
-  DeploymentMachine,
-  ResourceType
+  StatefulSetMachine | DeploymentMachine
 > = ({ flags: f, profile: { id: profileId }, log, debug }) => machineDriver({
   log,
   debug,
-  client: clientFromConfiguration({ log, flags: f, profileId }),
+  client: clientFromConfiguration({ log, flags: f, profileId, kc: loadKubeConfig(f) }),
 })
diff --git a/packages/driver-kube-pod/src/driver/index.ts b/packages/driver-kube-pod/src/driver/index.ts
index 756f915a..f17cace5 100644
--- a/packages/driver-kube-pod/src/driver/index.ts
+++ b/packages/driver-kube-pod/src/driver/index.ts
@@ -2,7 +2,7 @@ import { flags, factory } from './driver.js'
 import { flags as machineCreationFlags, factory as machineCreationFactory } from './creation-driver.js'
 import { inquireFlags } from './questions.js'
 
-export type { DeploymentMachine, ResourceType } from './common.js'
+export type { StatefulSetMachine, DeploymentMachine, ResourceType } from './common.js'
 export type { MachineCreationFlagTypes } from './creation-driver.js'
 
 export default {
diff --git a/packages/driver-kube-pod/src/driver/questions.ts b/packages/driver-kube-pod/src/driver/questions.ts
index 3743fd0a..7a79892b 100644
--- a/packages/driver-kube-pod/src/driver/questions.ts
+++ b/packages/driver-kube-pod/src/driver/questions.ts
@@ -1,14 +1,38 @@
 import * as inquirer from '@inquirer/prompts'
+import * as k8s from '@kubernetes/client-node'
+import { Logger, withSpinner } from '@preevy/core'
+import { asyncToArray } from 'iter-tools-es'
 import { MachineCreationFlagTypes, flags } from './creation-driver.js'
-import { loadKubeConfig } from './client/index.js'
+import { extractName, loadKubeConfig } from './client/index.js'
+import { storageV1ApiHelpers as createStorageV1ApiHelpers } from './client/k8s-helpers.js'
+import { logError } from './client/log-error.js'
 
-export const inquireFlags = async (): Promise<Partial<MachineCreationFlagTypes>> => {
+export const inquireStorageClass = async (kc: k8s.KubeConfig, { log }: { log: Logger }) => {
+  const wrap = logError(log)
+  const storageHelpers = createStorageV1ApiHelpers(kc.makeApiClient(k8s.StorageV1Api), { wrap })
+  const availableStorageClasses = await withSpinner(
+    () => asyncToArray(storageHelpers.listStorageClasses({ })),
+    { text: 'Loading storage classes...' },
+  )
+
+  if (!availableStorageClasses.length) {
+    return undefined
+  }
+  const storageClassNames = availableStorageClasses.map(extractName)
+  return await inquirer.select({
+    message: flags['storage-class'].description as string,
+    choices: [{ name: '(default)', value: undefined }, new inquirer.Separator(), ...storageClassNames.map(c => ({ value: c }))],
+    default: undefined,
+  })
+}
+
+export const inquireFlags = async ({ log }: { log: Logger }): Promise<Partial<MachineCreationFlagTypes>> => {
   const namespace = await inquirer.input({
     default: flags.namespace.default as string,
     message: flags.namespace.description as string,
   })
 
-  const kc = loadKubeConfig() // will read from KUBECONFIG env var as well
+  const kc = loadKubeConfig({}) // will read from KUBECONFIG env var as well
   const contextChoices = [
     { name: 'Default: use default context at runtime', value: undefined },
     new inquirer.Separator(),
@@ -20,8 +44,18 @@ export const inquireFlags = async (): Promise<Partial<MachineCreationFlagTypes>>
     message: flags.context.description as string,
   })
 
+  const storageClass = await inquireStorageClass(kc, { log })
+
+  const storageSize = Number(await inquirer.input({
+    message: flags['storage-size'].description as string,
+    default: String(flags['storage-size'].default),
+    validate: value => (Number(value) > 0 ? true : 'Please enter a positive number'),
+  }))
+
   return {
     ...(namespace && namespace !== flags.namespace.default) ? { namespace } : undefined,
     ...(context && context !== flags.context.default) ? { context } : undefined,
+    ...(storageClass && storageClass !== flags['storage-class'].default) ? { 'storage-class': storageClass } : undefined,
+    ...(storageSize && storageSize !== flags['storage-size'].default) ? { 'storage-size': storageSize } : undefined,
   }
 }
diff --git a/packages/driver-kube-pod/static/default-template.yaml.njk b/packages/driver-kube-pod/static/default-template.yaml.njk
index d074e812..361f1b00 100644
--- a/packages/driver-kube-pod/static/default-template.yaml.njk
+++ b/packages/driver-kube-pod/static/default-template.yaml.njk
@@ -14,26 +14,30 @@ metadata:
 data:
   daemon.json: |
     {
-      "tls": false
+      "tls": false,
+      "data-root": "/data/docker"
     }
 ---
 apiVersion: apps/v1
-kind: Deployment
+kind: StatefulSet
 metadata:
   name: {{ id }}
   namespace: {{ namespace }}
   labels:
-    app: preevy-{{ id }}
     app.kubernetes.io/component: docker-host
 spec:
   replicas: 1
   selector:
     matchLabels:
-      app: preevy-{{ id }}
+      {% for k, v in labels %}
+      {{ k }}: {{ v }}
+      {% endfor %}
   template:
     metadata:
       labels:
-        app: preevy-{{ id }}
+        {% for k, v in labels %}
+        {{ k }}: {{ v }}
+        {% endfor %}
     spec:
       serviceAccountName: {{ id }}-sa
       containers:
@@ -43,15 +47,29 @@ spec:
           privileged: true
         command: ["dockerd", "--host=tcp://0.0.0.0:2375", "--host=unix:///var/run/docker.sock"]
         volumeMounts:
-        - mountPath: /etc/docker
-          name: docker-config
-
-
+        - name: docker-config
+          mountPath: /etc/docker
+        - name: data
+          mountPath: /data
       volumes:
       - name: docker-config
         configMap:
           name: {{ id }}-dc
-
+  volumeClaimTemplates:
+  - metadata:
+      name: data
+      labels:
+        {% for k, v in labels %}
+        {{ k }}: {{ v }}
+        {% endfor %}
+    spec:
+      accessModes: ["ReadWriteOnce"]
+{% if storageClass %}
+      storageClassName: "{{ storageClass }}"
+{% endif %}
+      resources:
+        requests:
+          storage: {{ storageSize }}Gi
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: Role
diff --git a/packages/driver-lightsail/src/driver/index.ts b/packages/driver-lightsail/src/driver/index.ts
index c888814e..79277a5c 100644
--- a/packages/driver-lightsail/src/driver/index.ts
+++ b/packages/driver-lightsail/src/driver/index.ts
@@ -42,15 +42,15 @@ type DriverContext = {
   store: Store
 }
 
+const listMachines = ({ client }: { client: Client }) => asyncMap(machineFromInstance, client.listInstances())
+
 const machineDriver = ({
   client,
   region,
   store,
-}: DriverContext): MachineDriver<SshMachine, ResourceType> => {
+}: DriverContext): MachineDriver<SshMachine> => {
   const keyAlias = region
 
-  const listMachines = () => asyncMap(machineFromInstance, client.listInstances())
-
   return {
     friendlyName: 'AWS Lightsail',
     customizationScripts: CUSTOMIZE_BARE_MACHINE,
@@ -60,39 +60,7 @@ const machineDriver = ({
       return instance && machineFromInstance(instance)
     },
 
-    listMachines,
-    listDeletableResources: () => {
-      const machines = listMachines()
-
-      const snapshots = asyncMap(
-        ({ name }) => ({ type: 'snapshot' as ResourceType, providerId: name as string }),
-        client.listInstanceSnapshots(),
-      )
-      const keyPairs = asyncMap(
-        ({ name }) => ({ type: 'keypair' as ResourceType, providerId: name as string }),
-        client.listKeyPairsByAlias(keyAlias),
-      )
-
-      return asyncConcat(machines, snapshots, keyPairs)
-    },
-
-    deleteResources: async (wait, ...resources) => {
-      await Promise.all(resources.map(({ type, providerId }) => {
-        if (type === 'snapshot') {
-          return client.deleteInstanceSnapshot({ instanceSnapshotName: providerId, wait })
-        }
-        if (type === 'keypair') {
-          return Promise.all([
-            client.deleteKeyPair(providerId, wait),
-            sshKeysStore(store).deleteKey(keyAlias),
-          ])
-        }
-        if (type === 'machine') {
-          return client.deleteInstance(providerId, wait)
-        }
-        throw new Error(`Unknown resource type "${type}"`)
-      }))
-    },
+    listMachines: () => listMachines({ client }),
 
     resourcePlurals: {
       snapshot: 'snapshots',
@@ -119,7 +87,7 @@ const contextFromFlags = ({ region }: FlagTypes): { region: string } => ({
   region: region as string,
 })
 
-const inquireFlags = async () => {
+const inquireFlags = async ({ log: _log }: { log: Logger }) => {
   const region = await inquirerAutoComplete<string>({
     default: process.env.AWS_REGION ?? 'us-east-1',
     message: flags.region.description as string,
@@ -130,7 +98,7 @@ const inquireFlags = async () => {
   return { region }
 }
 
-const factory: MachineDriverFactory<FlagTypes, SshMachine, ResourceType> = ({
+const factory: MachineDriverFactory<FlagTypes, SshMachine> = ({
   flags: f,
   profile: { id: profileId },
   store,
@@ -173,7 +141,7 @@ type MachineCreationContext = DriverContext & {
 
 const machineCreationDriver = (
   { region, client, availabilityZone, bundleId: specifiedBundleId, store, log, debug, metadata }: MachineCreationContext
-): MachineCreationDriver<SshMachine> => {
+): MachineCreationDriver<SshMachine, ResourceType> => {
   const bundleId = specifiedBundleId ?? DEFAULT_BUNDLE_ID
   const keyAlias = region
 
@@ -252,6 +220,39 @@ const machineCreationDriver = (
       })
       return undefined
     },
+
+    listDeletableResources: () => {
+      const machines = listMachines({ client })
+
+      const snapshots = asyncMap(
+        ({ name }) => ({ type: 'snapshot' as ResourceType, providerId: name as string }),
+        client.listInstanceSnapshots(),
+      )
+      const keyPairs = asyncMap(
+        ({ name }) => ({ type: 'keypair' as ResourceType, providerId: name as string }),
+        client.listKeyPairsByAlias(keyAlias),
+      )
+
+      return asyncConcat(machines, snapshots, keyPairs)
+    },
+
+    deleteResources: async (wait, ...resources) => {
+      await Promise.all(resources.map(({ type, providerId }) => {
+        if (type === 'snapshot') {
+          return client.deleteInstanceSnapshot({ instanceSnapshotName: providerId, wait })
+        }
+        if (type === 'keypair') {
+          return Promise.all([
+            client.deleteKeyPair(providerId, wait),
+            sshKeysStore(store).deleteKey(keyAlias),
+          ])
+        }
+        if (type === 'machine') {
+          return client.deleteInstance(providerId, wait)
+        }
+        throw new Error(`Unknown resource type "${type}"`)
+      }))
+    },
   })
 }
 
diff --git a/site/docs/deploy-runtimes/kube-pod.md b/site/docs/deploy-runtimes/kube-pod.md
index c4e852de..de484545 100644
--- a/site/docs/deploy-runtimes/kube-pod.md
+++ b/site/docs/deploy-runtimes/kube-pod.md
@@ -30,6 +30,7 @@ Your services are still exposed using the Preevy Tunnel Service - there's no nee
 - When using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), the default context needs permissions for running [exec](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/) and [port-forward](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) for specific pods in the configured namespace.
 - The [`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) tool needs to be installed and available in the PATH.
 - By default, the driver runs a Pod with [`privileged: true` security context](https://kubernetes.io/docs/concepts/security/pod-security-standards/#privileged). In some cases, this requirement may be lifted by customizing the deployment template, see [below](#configuring-rootless-unprivileged-docker-in-docker).
+- A StorageClass must be defined in the cluster to enable [dynamic volume provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/#enabling-dynamic-provisioning). This is usually pre-defined in your Kubernetes cluster.
 
 ## Supported options
 
@@ -39,6 +40,8 @@ Your services are still exposed using the Preevy Tunnel Service - there's no nee
 |`kubeconfig`|`--kube-pod-kubeconfig`|`$HOME/.kube`| `KUBECONFIG` | path to a [`kubeconfig`](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file|
 |`pod-template`|`--kube-pod-template`|[default template](https://github.com/livecycle/preevy/blob/main/packages/driver-kube-pod/static/default-template.yaml.njk)| |path to a [nunjacks template](https://mozilla.github.io/nunjucks/templating.html) used to provision Kubernetes resources per environment. See [below](#customizing-the-provisioned-kubernetes-resources) for details|
 |`server-side-apply`|`--[no-]kube-pod-server-side-apply`| true | | if true, provision resources using [server-side apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/), else using client-side apply (CREATE/PATCH). Applies to `preevy up` only|
+|`storage-class`|`--kube-pod-storage-class`| (undefined) | | The Kubernetes [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) used in the StatefulSet template to provision the data volume for the Docker server in the Preevy environment Pod |
+|`storage-size`|`--kube-pod-storage-size`| `5` | | Size in GiB of the volume allocated for the Docker server in the Preevy environment Pod. Adjust this acccording to the storage requirements of your environment. |
 
 ### Overriding options
 
@@ -54,12 +57,13 @@ x-preevy:
   drivers:
     kube-pod:
       namespace: other-namespace
+      storage-size: 12.5
 ```
 
 Options can also be overridden using a CLI flag per command execution:
 
 ```bash
-preevy up --kube-pod-namespace=other-namespace
+preevy up --kube-pod-namespace=other-namespace --kube-pod-storage-size=12.5
 ```
 
 ## Customizing the provisioned Kubernetes resources
@@ -80,11 +84,11 @@ Start by copying the [default template](https://github.com/livecycle/preevy/blob
 
 All resources need to be deployed in a single namespace, specified as a template argument (see below).
 
-While multiple [Kubernetes Deployment](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/#Deployment) objects may be defined, exactly one Deployment must have the label `app.kubernetes.io/component: docker-host`:
-- The [status](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#deployment-status) of the Deployment is used to determine whether the Preevy environment is ready.
-- The first [container](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#containers) of the Deployment spec is used for copying files, so it [must have](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#copying-files-and-directories-to-and-from-containers) the `tar` and `find` commands available.
+While multiple [Kubernetes StatefulSet](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/stateful-set-v1/) objects may be defined, exactly one StatefulSet must have the label `app.kubernetes.io/component: docker-host`:
+- The [status](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/stateful-set-v1/#StatefulSetStatus) of the StatefulSet is used to determine whether the Preevy environment is ready.
+- The first [container](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#containers) of the StatefulSet spec is used for copying files, so it [must have](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#copying-files-and-directories-to-and-from-containers) the `tar` and `find` commands available.
 
-A Docker server must be listening on port 2375 of the Deployment's Pod. As Preevy uses the [port-forward API](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to connect to the Docker server, it does not need to be exposed as a service. For the same reason, TLS is not supported and needs to be disabled for this port.
+A Docker server must be listening on port 2375 of the StatefulSet's Pod. As Preevy uses the [port-forward API](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to connect to the Docker server, it does not need to be exposed as a service. For the same reason, TLS is not supported and needs to be disabled for this port.
 
 The Docker server must also be listening on the unix socket path `/var/run/docker.sock` - this is used by the Preevy agent service running alongside your services.
 
@@ -96,9 +100,11 @@ The following arguments are specified when rendering the template:
 
 - `namespace`: the Kubernetes namespace saved in the Preevy profile or specified in the `--kube-pod-namespace` flag. All resources must be defined in this namespace.
 - `id`: A generated ID for this environment, 53 characters or less, comprised of the Preevy environment ID and a random suffix. `id` can be used as part of a label value, with up to 10 additional characters so as to not exceed the [63-character limit for labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set)
+- `storageClass`: The [Kubernetes StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) used to [dynamically provision](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) a volume for the StatefulSet. Saved in the Preevy profile or specified in the `--kube-pod-storage-class` flag. Leaving this undefined will cause the default storage class to be used.
+- `storageSize`: The size of the allocated volume in GiB. Saved in the Preevy profile or specified in the `--kube-pod-storage-size` flag.
 
 ## Configuring rootless unprivileged Docker-in-Docker
 
-By default, the Kubernetes Docker-in-Docker driver creates a Deployment which runs the [`docker:dind` image](https://hub.docker.com/_/docker). Traditionally, running Docker inside a container requires the [`privileged: true` security context](https://kubernetes.io/docs/concepts/security/pod-security-standards/#privileged), which may be a security concern.
+By default, the Kubernetes Docker-in-Docker driver creates a [StatefulSet](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/stateful-set-v1/) which runs the [`docker:dind` image](https://hub.docker.com/_/docker). Traditionally, running Docker inside a container requires the [`privileged: true` security context](https://kubernetes.io/docs/concepts/security/pod-security-standards/#privileged), which may be a security concern.
 
 [Sysbox](https://github.com/nestybox/sysbox) is an OSS project (acquired by Docker) that allows running unprivileged containers in a Kubernetes cluster. It can be [installed](https://github.com/nestybox/sysbox/blob/master/docs/user-guide/install-k8s.md) on most of the popular Kubernetes distros including managed cloud platforms like Amazon EKS, Google GKE, and Azure AKA. Once installed, a custom template can be used to [provision Pods](https://github.com/nestybox/sysbox/blob/master/docs/user-guide/deploy.md#deploying-pods-with-kubernetes--sysbox) without the `privileged` security context.