Enable customisable file size policies for storage backends
This commit is contained in:
parent
afac8b405f
commit
7a3355bff0
6 changed files with 32 additions and 6 deletions
14
cli/flags.go
14
cli/flags.go
|
@ -15,10 +15,11 @@
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
var flags = map[string]string{
|
var flags = map[string]string{
|
||||||
"db": `Database configuration path used for storing data. Available backend stores are memory, file, s3, gcs, rixxdb, or dendrodb. (default "memory").`,
|
"db": `Database configuration path used for storing data. Available backend stores are memory, file, logr, s3, gcs, or dendrodb. (default "memory").`,
|
||||||
"key": `Encryption key to use for intra-cluster communications, and on-disk encryption. For AES-128 encryption use a 16 bit key, for AES-192 encryption use a 24 bit key, and for AES-256 encryption use a 32 bit key.`,
|
"key": `Encryption key to use for intra-cluster communications, and on-disk encryption. For AES-128 encryption use a 16 bit key, for AES-192 encryption use a 24 bit key, and for AES-256 encryption use a 32 bit key.`,
|
||||||
"sync": `A time duration to use when syncing data to persistent storage. To sync data with every write specify '0', otherwise the data will be persisted asynchronously after the specified duration.`,
|
"size": `A size in MB which determines the minimum or maximum file size for streaming data file storage. This is used for specifying maximum cached data sizes when using remote streaming storage. (default "5")`,
|
||||||
"shrink": `A time duration to use when shrinking data on persistent storage. To shrink data asynchronously after a repeating period of time, specify a duration.`,
|
"sync": `A time duration to use when syncing data to persistent storage. To sync data with every write specify '0', otherwise the data will be persisted asynchronously after the specified duration. (default "0s")`,
|
||||||
|
"shrink": `A time duration to use when shrinking data on persistent storage. To shrink data asynchronously after a repeating period of time, specify a duration. Disabled by default. (default "0s")`,
|
||||||
"join": `A comma-separated list of addresses to use when a new node is joining an existing cluster. For the first node in a cluster, --join should NOT be specified.`,
|
"join": `A comma-separated list of addresses to use when a new node is joining an existing cluster. For the first node in a cluster, --join should NOT be specified.`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,11 +37,18 @@ var usage = map[string][]string{
|
||||||
"--key 1hg7dbrma8ghe5473kghvie6",
|
"--key 1hg7dbrma8ghe5473kghvie6",
|
||||||
"--key 1hg7dbrma8ghe5473kghvie64jgi3ph4",
|
"--key 1hg7dbrma8ghe5473kghvie64jgi3ph4",
|
||||||
},
|
},
|
||||||
|
"size": {
|
||||||
|
"--db-size 30",
|
||||||
|
},
|
||||||
"sync": {
|
"sync": {
|
||||||
"--db-sync 0",
|
"--db-sync 0",
|
||||||
"--db-sync 5s",
|
"--db-sync 5s",
|
||||||
"--db-sync 1m",
|
"--db-sync 1m",
|
||||||
},
|
},
|
||||||
|
"shrink": {
|
||||||
|
"--db-shrink 30m",
|
||||||
|
"--db-shrink 24h",
|
||||||
|
},
|
||||||
"join": {
|
"join": {
|
||||||
"--join 10.0.0.1",
|
"--join 10.0.0.1",
|
||||||
"--join 10.0.0.1:33693",
|
"--join 10.0.0.1:33693",
|
||||||
|
|
|
@ -61,6 +61,14 @@ func setup() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.DB.Proc.Size == 0 {
|
||||||
|
opts.DB.Proc.Size = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.DB.Proc.Size < 0 {
|
||||||
|
log.Fatal("Specify a valid data file size policy. Valid sizes are greater than 0 and are specified in MB.")
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(opts.DB.Cert.CA, "-----") {
|
if strings.HasPrefix(opts.DB.Cert.CA, "-----") {
|
||||||
var err error
|
var err error
|
||||||
var doc *os.File
|
var doc *os.File
|
||||||
|
|
|
@ -94,6 +94,7 @@ func init() {
|
||||||
startCmd.PersistentFlags().StringVar(&opts.DB.Cert.Crt, "db-crt", "", "Path to the certificate file used to connect to the remote database.")
|
startCmd.PersistentFlags().StringVar(&opts.DB.Cert.Crt, "db-crt", "", "Path to the certificate file used to connect to the remote database.")
|
||||||
startCmd.PersistentFlags().StringVar(&opts.DB.Cert.Key, "db-key", "", "Path to the private key file used to connect to the remote database.")
|
startCmd.PersistentFlags().StringVar(&opts.DB.Cert.Key, "db-key", "", "Path to the private key file used to connect to the remote database.")
|
||||||
startCmd.PersistentFlags().StringVar(&opts.DB.Path, "db-path", "", flag("db"))
|
startCmd.PersistentFlags().StringVar(&opts.DB.Path, "db-path", "", flag("db"))
|
||||||
|
startCmd.PersistentFlags().IntVar(&opts.DB.Proc.Size, "db-size", 0, flag("size"))
|
||||||
startCmd.PersistentFlags().DurationVar(&opts.DB.Proc.Sync, "db-sync", 0, flag("sync"))
|
startCmd.PersistentFlags().DurationVar(&opts.DB.Proc.Sync, "db-sync", 0, flag("sync"))
|
||||||
startCmd.PersistentFlags().DurationVar(&opts.DB.Proc.Shrink, "db-shrink", 0, flag("shrink"))
|
startCmd.PersistentFlags().DurationVar(&opts.DB.Proc.Shrink, "db-shrink", 0, flag("shrink"))
|
||||||
|
|
||||||
|
|
|
@ -84,6 +84,7 @@ type Options struct {
|
||||||
Port string // Surreal port to connect to
|
Port string // Surreal port to connect to
|
||||||
Base string // Base key to use in KV stores
|
Base string // Base key to use in KV stores
|
||||||
Proc struct {
|
Proc struct {
|
||||||
|
Size int // Policy for data file size
|
||||||
Sync time.Duration // Timeframe for syncing data
|
Sync time.Duration // Timeframe for syncing data
|
||||||
Shrink time.Duration // Timeframe for shrinking data
|
Shrink time.Duration // Timeframe for shrinking data
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ func setupDB() {
|
||||||
cnf.Settings = &cnf.Options{}
|
cnf.Settings = &cnf.Options{}
|
||||||
cnf.Settings.DB.Path = "memory"
|
cnf.Settings.DB.Path = "memory"
|
||||||
cnf.Settings.DB.Base = "*"
|
cnf.Settings.DB.Base = "*"
|
||||||
|
cnf.Settings.DB.Proc.Size = 5
|
||||||
|
|
||||||
workerCount = 1
|
workerCount = 1
|
||||||
|
|
||||||
|
@ -93,6 +94,7 @@ func TestYield(t *testing.T) {
|
||||||
|
|
||||||
res, err := Execute(setupKV(), txt, nil)
|
res, err := Execute(setupKV(), txt, nil)
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
So(res, ShouldHaveLength, 7)
|
||||||
So(res[1].Result, ShouldHaveLength, 1)
|
So(res[1].Result, ShouldHaveLength, 1)
|
||||||
So(data.Consume(res[1].Result[0]).Get("test").Data(), ShouldEqual, 1)
|
So(data.Consume(res[1].Result[0]).Get("test").Data(), ShouldEqual, 1)
|
||||||
So(res[2].Result, ShouldHaveLength, 1)
|
So(res[2].Result, ShouldHaveLength, 1)
|
||||||
|
|
|
@ -31,10 +31,16 @@ func init() {
|
||||||
path := strings.TrimPrefix(opts.DB.Path, "rixxdb://")
|
path := strings.TrimPrefix(opts.DB.Path, "rixxdb://")
|
||||||
|
|
||||||
pntr, err = rixxdb.Open(path, &rixxdb.Config{
|
pntr, err = rixxdb.Open(path, &rixxdb.Config{
|
||||||
|
// Set the encryption key
|
||||||
|
EncryptionKey: opts.DB.Key,
|
||||||
|
// Set the file size policy
|
||||||
|
SizePolicy: opts.DB.Proc.Size,
|
||||||
|
// Set the sync offset duration
|
||||||
|
SyncPolicy: opts.DB.Proc.Sync,
|
||||||
|
// Set the shrink offset duration
|
||||||
|
ShrinkPolicy: opts.DB.Proc.Shrink,
|
||||||
|
// Don't wait for syncing if shrinking
|
||||||
IgnoreSyncPolicyWhenShrinking: true,
|
IgnoreSyncPolicyWhenShrinking: true,
|
||||||
SyncPolicy: opts.DB.Proc.Sync,
|
|
||||||
ShrinkPolicy: opts.DB.Proc.Shrink,
|
|
||||||
EncryptionKey: opts.DB.Key,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in a new issue