diff --git a/Makefile b/Makefile index 8e23a43c7..a5e0c4495 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ SOURCE ?= file go_bindata github github_ee bitbucket aws_s3 google_cloud_storage godoc_vfs gitlab -DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb yugabytedb clickhouse mongodb sqlserver firebird neo4j pgx pgx5 rqlite +DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb yugabytedb clickhouse mongodb sqlserver firebird neo4j pgx pgx5 rqlite opensearch DATABASE_TEST ?= $(DATABASE) sqlite sqlite3 sqlcipher VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-) TEST_FLAGS ?= diff --git a/README.md b/README.md index a79cc7b76..18ff2e0b9 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,7 @@ Database drivers run migrations. [Add a new database?](database/driver.go) * [Firebird](database/firebird) * [MS SQL Server](database/sqlserver) * [rqlite](database/rqlite) +* [OpenSearch](database/opensearch) ### Database URLs diff --git a/database/opensearch/README.md b/database/opensearch/README.md new file mode 100644 index 000000000..6919761af --- /dev/null +++ b/database/opensearch/README.md @@ -0,0 +1,18 @@ +# OpenSearch + +* Driver work with OpenSearch through [OpenSearch REST API](https://opensearch.org/docs/latest/getting-started/communicate/#opensearch-rest-api) +* Migrations are written in JSON format and support actions such as creating indices, updating mappings, modifying settings and etc. +* [Examples](./examples) + +# Usage + +`opensearch://user:password@host:port/index` + +| URL Query | Default value | Description | +|------------|---------------------|-------------| +| `index` | `.migrations` | Name of the migrations index | +| `timeout` | `60s` | The max time that an operation will wait before failing. | +| `user` | | The user to sign in as. Can be omitted | +| `password` | | The user's password. Can be omitted | +| `host` | | The host to connect to | +| `port` | | The port to bind to | \ No newline at end of file diff --git a/database/opensearch/examples/migrations/1_create_index.down.json b/database/opensearch/examples/migrations/1_create_index.down.json new file mode 100644 index 000000000..f915b1d8a --- /dev/null +++ b/database/opensearch/examples/migrations/1_create_index.down.json @@ -0,0 +1,4 @@ +{ + "action": "DELETE /test-index", + "body": {} +} diff --git a/database/opensearch/examples/migrations/1_create_index.json b/database/opensearch/examples/migrations/1_create_index.json new file mode 100644 index 000000000..0e87b6e1c --- /dev/null +++ b/database/opensearch/examples/migrations/1_create_index.json @@ -0,0 +1,16 @@ +{ + "action": "PUT /test-index", + "body": { + "settings": { + "number_of_shards": 1, + "number_of_replicas": 0 + }, + "mappings": { + "properties": { + "title": { + "type": "text" + } + } + } + } +} diff --git a/database/opensearch/opensearch.go b/database/opensearch/opensearch.go new file mode 100644 index 000000000..7d627f3e8 --- /dev/null +++ b/database/opensearch/opensearch.go @@ -0,0 +1,395 @@ +package opensearch + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/golang-migrate/migrate/v4/database" + "github.com/opensearch-project/opensearch-go" +) + +func init() { + database.Register("opensearch", &OpenSearch{}) +} + +const MigrationVersionDocId = "migration_version" // The id of a document used as a lock +const DefaultTimeout = 1 * time.Minute // Default operations timeout +const DefaultIndex = ".migrations" // Default index to handle migrations + +var ( + ErrInvalidConnStr = errors.New("invalid connection string") + ErrMissingConfig = errors.New("missing config") + ErrInvalidTimeout = errors.New("invalid timeout value") + ErrInvalidSchema = errors.New("invalid schema") + ErrInvalidAction = errors.New("invalid action format in migration script") +) + +// OpenSearch migration driver. +type OpenSearch struct { + client *opensearch.Client + index string + lockKey string + isLocked bool + lockerMutex sync.Mutex + timeout time.Duration +} + +// OpenSearchCredentials holds credentials for OpenSearch. +type OpenSearchCredentials struct { + Addresses []string + Username string + Password string +} + +// Config holds the configuration for the OpenSearch driver. +type Config struct { + Index string + Timeout time.Duration +} + +type versionDoc struct { + Version int `json:"version"` + Dirty bool `json:"dirty"` +} + +// WithInstance allows the client to provide an instance and migrate using it. +func (d *OpenSearch) WithInstance(instance *opensearch.Client, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrMissingConfig + } + + if config.Index == "" { + config.Index = DefaultIndex + } + + if config.Timeout == 0 { + config.Timeout = DefaultTimeout + } + + driver := &OpenSearch{ + client: instance, + index: config.Index, + lockKey: fmt.Sprintf("%s_lock", config.Index), + timeout: config.Timeout, + } + + return driver, nil +} + +// Open initializes the driver with the provided connection string. +func (d *OpenSearch) Open(urlStr string) (database.Driver, error) { + cfg, creds, err := parseConfig(urlStr) + if err != nil { + return nil, err + } + + osCfg := opensearch.Config{ + Addresses: creds.Addresses, + Username: creds.Username, + Password: creds.Password, + } + + client, err := opensearch.NewClient(osCfg) + if err != nil { + return nil, err + } + + driver := &OpenSearch{ + client: client, + index: cfg.Index, + lockKey: fmt.Sprintf("%s_lock", cfg.Index), + timeout: cfg.Timeout, + } + + return driver, nil +} + +// parseConfig parses the connection string into a Config struct. +func parseConfig(connStr string) (*Config, *OpenSearchCredentials, error) { + parsedURL, err := url.Parse(connStr) + if err != nil { + return nil, nil, ErrInvalidConnStr + } + + creds := &OpenSearchCredentials{} + cfg := &Config{Timeout: DefaultTimeout} + + // Scheme validation + if parsedURL.Scheme != "opensearch" { + return nil, nil, ErrInvalidSchema + } + + // User Info + if parsedURL.User != nil { + creds.Username = parsedURL.User.Username() + creds.Password, _ = parsedURL.User.Password() + } + + // Hosts can be multiple, separated by commas + hosts := strings.Split(parsedURL.Host, ",") + for _, host := range hosts { + if !strings.Contains(host, ":") { + // Default port 9200 + host = fmt.Sprintf("%s:9200", host) + } + creds.Addresses = append(creds.Addresses, fmt.Sprintf("http://%s", host)) + } + + // Path is index name + if parsedURL.Path != "" && parsedURL.Path != "/" { + cfg.Index = strings.TrimPrefix(parsedURL.Path, "/") + } else { + cfg.Index = DefaultIndex + } + + // Query Parameters + q := parsedURL.Query() + if timeoutStr := q.Get("timeout"); timeoutStr != "" { + timeout, err := time.ParseDuration(timeoutStr) + if err != nil { + return nil, nil, ErrInvalidTimeout + } + cfg.Timeout = timeout + } + + return cfg, creds, nil +} + +// Close closes any open resources. +func (d *OpenSearch) Close() error { + return nil +} + +// Lock obtains a lock to prevent concurrent migrations. +func (d *OpenSearch) Lock() error { + d.lockerMutex.Lock() + defer d.lockerMutex.Unlock() + + if d.isLocked { + return database.ErrLocked + } + + ctx, cancel := context.WithTimeout(context.Background(), d.timeout) + defer cancel() + + // Try to create the lock document with op_type=create + lockDoc := map[string]interface{}{ + "timestamp": time.Now().UTC().Format(time.RFC3339), + } + + lockBody, err := json.Marshal(lockDoc) + if err != nil { + return &database.Error{OrigErr: err, Err: "failed to marshal lock document"} + } + + res, err := d.client.Index( + d.index, + bytes.NewReader(lockBody), + d.client.Index.WithContext(ctx), + d.client.Index.WithDocumentID(d.lockKey), + d.client.Index.WithOpType("create"), // Ensures the document is only created if it doesn't exist + ) + if err != nil { + return &database.Error{OrigErr: err, Err: "failed to acquire lock"} + } + defer func() { + if err := res.Body.Close(); err != nil { + fmt.Printf("failed to close response body: %v\n", err) + } + }() + + if res.StatusCode == http.StatusCreated { + // Lock acquired + d.isLocked = true + return nil + } else if res.StatusCode == http.StatusConflict { + // Lock is held by another process + return database.ErrLocked + } else { + return &database.Error{OrigErr: err, Err: fmt.Sprintf("failed to acquire lock, status: %d", res.StatusCode)} + } +} + +// Unlock releases the migration lock. +func (d *OpenSearch) Unlock() error { + d.lockerMutex.Lock() + defer d.lockerMutex.Unlock() + + if !d.isLocked { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), d.timeout) + defer cancel() + + res, err := d.client.Delete( + d.index, + d.lockKey, + d.client.Delete.WithContext(ctx), + ) + if err != nil { + return &database.Error{OrigErr: err, Err: "failed to release lock"} + } + defer func() { + if err := res.Body.Close(); err != nil { + fmt.Printf("failed to close response body: %v\n", err) + } + }() + + if res.StatusCode >= 400 && res.StatusCode != http.StatusNotFound { + return &database.Error{OrigErr: err, Err: fmt.Sprintf("failed to release lock, status: %d", res.StatusCode)} + } + + d.isLocked = false + return nil +} + +// Run executes a migration script. +func (d *OpenSearch) Run(migration io.Reader) error { + var migrationData struct { + Action string `json:"action"` + Body json.RawMessage `json:"body"` + } + + decoder := json.NewDecoder(migration) + if err := decoder.Decode(&migrationData); err != nil { + return &database.Error{OrigErr: err, Err: "failed to parse migration body"} + } + + // Parse the action into method and endpoint + actionParts := strings.Fields(migrationData.Action) + if len(actionParts) != 2 { + return ErrInvalidAction + } + method, endpoint := strings.ToUpper(actionParts[0]), actionParts[1] + + req, err := http.NewRequest(method, endpoint, bytes.NewReader(migrationData.Body)) + if err != nil { + return &database.Error{OrigErr: err, Err: "failed to create request"} + } + + req.Header.Set("Content-Type", "application/json") + + res, err := d.client.Transport.Perform(req) + if err != nil { + return &database.Error{OrigErr: err, Err: fmt.Sprintf("request failed with status: %d", res.StatusCode)} + } + defer func() { + if err := res.Body.Close(); err != nil { + fmt.Printf("failed to close response body: %v\n", err) + } + }() + + if res.StatusCode >= 400 { + resBody, _ := io.ReadAll(res.Body) + return &database.Error{OrigErr: err, Err: fmt.Sprintf("migration failed with status %d: %s", res.StatusCode, resBody)} + } + + return nil +} + +// Version returns the current migration version. +func (d *OpenSearch) Version() (version int, dirty bool, err error) { + ctx, cancel := context.WithTimeout(context.Background(), d.timeout) + defer cancel() + + res, err := d.client.Get( + d.index, + MigrationVersionDocId, + d.client.Get.WithContext(ctx), + ) + if err != nil { + return database.NilVersion, false, &database.Error{OrigErr: err, Err: "failed to get migration version"} + } + defer func() { + if err := res.Body.Close(); err != nil { + fmt.Printf("failed to close response body: %v\n", err) + } + }() + + if res.StatusCode == http.StatusNotFound { + return database.NilVersion, false, nil + } + + if res.StatusCode >= 400 { + return database.NilVersion, false, &database.Error{OrigErr: err, Err: fmt.Sprintf("failed to get migration version, status: %d", res.StatusCode)} + } + + var vdoc struct { + Source *versionDoc `json:"_source"` + } + if err := json.NewDecoder(res.Body).Decode(&vdoc); err != nil { + return database.NilVersion, false, &database.Error{OrigErr: err, Err: "failed to parse migration version"} + } + + return vdoc.Source.Version, vdoc.Source.Dirty, nil +} + +// SetVersion sets the current migration version. +func (d *OpenSearch) SetVersion(version int, dirty bool) error { + ctx, cancel := context.WithTimeout(context.Background(), d.timeout) + defer cancel() + + vdoc := &versionDoc{ + Version: version, + Dirty: dirty, + } + + versionBody, err := json.Marshal(vdoc) + if err != nil { + return &database.Error{OrigErr: err, Err: "failed to marshal version document"} + } + + res, err := d.client.Index( + d.index, + bytes.NewReader(versionBody), + d.client.Index.WithDocumentID(MigrationVersionDocId), + d.client.Index.WithContext(ctx), + ) + if err != nil { + return &database.Error{OrigErr: err, Err: "failed to set migration version"} + } + defer func() { + if err := res.Body.Close(); err != nil { + fmt.Printf("failed to close response body: %v\n", err) + } + }() + + if res.StatusCode >= 400 { + return &database.Error{OrigErr: err, Err: fmt.Sprintf("failed to set migration version, status: %d", res.StatusCode)} + } + + return nil +} + +// Drop deletes the index related to migrations. +func (d *OpenSearch) Drop() error { + ctx, cancel := context.WithTimeout(context.Background(), d.timeout) + defer cancel() + + res, err := d.client.Indices.Delete([]string{d.index}, d.client.Indices.Delete.WithContext(ctx)) + if err != nil { + return &database.Error{OrigErr: err, Err: "failed to drop index"} + } + defer func() { + if err := res.Body.Close(); err != nil { + fmt.Printf("failed to close response body: %v\n", err) + } + }() + + if res.StatusCode >= 400 { + resBody, _ := io.ReadAll(res.Body) + return &database.Error{OrigErr: err, Err: fmt.Sprintf("failed to drop index, status %d: %s", res.StatusCode, resBody)} + } + + return nil +} diff --git a/database/opensearch/opensearch_test.go b/database/opensearch/opensearch_test.go new file mode 100644 index 000000000..52a41ec85 --- /dev/null +++ b/database/opensearch/opensearch_test.go @@ -0,0 +1,196 @@ +package opensearch + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + "github.com/dhui/dktest" + "github.com/golang-migrate/migrate/v4" + dt "github.com/golang-migrate/migrate/v4/database/testing" + "github.com/golang-migrate/migrate/v4/dktesting" + _ "github.com/golang-migrate/migrate/v4/source/file" + "github.com/opensearch-project/opensearch-go" +) + +const DefaultPort = 9200 + +var ( + opts = dktest.Options{ + Env: map[string]string{ + "discovery.type": "single-node", + "OPENSEARCH_SECURITY_DISABLED": "true", + "DISABLE_INSTALL_DEMO_CONFIG": "true", + "plugins.security.disabled": "true", + }, + PortRequired: true, + ReadyFunc: isReady, + } + specs = []dktesting.ContainerSpec{ + {ImageName: "opensearchproject/opensearch:1.3.19", Options: opts}, + {ImageName: "opensearchproject/opensearch:2.17.0", Options: opts}, + } +) + +// getOpenSearchClient returns a new OpenSearch client. +func getOpenSearchClient(ip, port string) (*opensearch.Client, error) { + cfg := opensearch.Config{ + Addresses: []string{fmt.Sprintf("http://%s:%s", ip, port)}, + } + + client, err := opensearch.NewClient(cfg) + if err != nil { + return nil, err + } + + return client, nil +} + +// isReady checks if the OpenSearch container is ready. +func isReady(ctx context.Context, c dktest.ContainerInfo) bool { + ip, port, err := c.Port(DefaultPort) + if err != nil { + return false + } + + client, err := getOpenSearchClient(ip, port) + if err != nil { + return false + } + + res, err := client.Cluster.Health( + client.Cluster.Health.WithWaitForStatus("yellow"), + client.Cluster.Health.WithTimeout(1*time.Second), + ) + if err == nil && res.StatusCode == 200 { + defer func() { + if err := res.Body.Close(); err != nil { + fmt.Printf("failed to close response body: %v\n", err) + } + }() + return true + } + + return false +} + +func TestOpenSearch(t *testing.T) { + t.Run("testRun", testRun) + t.Run("testMigrate", testMigrate) + t.Run("testWithInstance", testWithInstance) + + t.Cleanup(func() { + for _, spec := range specs { + t.Log("cleaning up ", spec.ImageName) + if err := spec.Cleanup(); err != nil { + t.Error("error removing ", spec.ImageName, "error:", err) + } + } + }) +} + +func testRun(t *testing.T) { + dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) { + ip, port, err := c.Port(DefaultPort) + if err != nil { + t.Fatal("unable to get mapped port:", err) + } + + addr := fmt.Sprintf("opensearch://%s:%s/migrations", ip, port) + p := &OpenSearch{} + + d, err := p.Open(addr) + if err != nil { + t.Fatal("failed to open driver:", err) + } + defer func() { + if err := d.Close(); err != nil { + t.Error("failed to close driver:", err) + } + }() + + migrationScript := []byte(`{ + "action": "PUT /test", + "body": { + "settings": { + "number_of_shards": 1, + "number_of_replicas": 0 + } + } + }`) + if err := d.Run(bytes.NewReader(migrationScript)); err != nil { + t.Fatal("failed to run migration:", err) + } + + client, err := getOpenSearchClient(ip, port) + if err != nil { + t.Fatal("failed to create OpenSearch client:", err) + } + + res, err := client.Indices.Exists([]string{"test"}) + if err != nil { + t.Fatal("failed to check if index exists:", err) + } + defer func() { + if err := res.Body.Close(); err != nil { + fmt.Printf("failed to close response body: %v\n", err) + } + }() + + if res.StatusCode != 200 { + t.Fatalf("expected index to exists, but got status code: %d", res.StatusCode) + } + }) +} + +func testMigrate(t *testing.T) { + dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) { + ip, port, err := c.Port(9200) + if err != nil { + t.Fatal("unable to get mapped port:", err) + } + + addr := fmt.Sprintf("opensearch://%s:%s/migrations", ip, port) + + m, err := migrate.New("file://./examples/migrations", addr) + if err != nil { + t.Fatal(err) + } + + dt.TestMigrate(t, m) + }) +} + +func testWithInstance(t *testing.T) { + dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) { + ip, port, err := c.Port(9200) + if err != nil { + t.Fatal("unable to get mapped port:", err) + } + + client, err := getOpenSearchClient(ip, port) + if err != nil { + t.Fatal("failed to create OpenSearch client:", err) + } + + p := &OpenSearch{} + cfg := &Config{ + Index: "migrations", + Timeout: 1 * time.Minute, + } + + d, err := p.WithInstance(client, cfg) + if err != nil { + t.Fatal("failed to create driver with instance") + } + + m, err := migrate.NewWithDatabaseInstance("file://./examples/migrations", "migrations", d) + if err != nil { + t.Fatal(err) + } + + dt.TestMigrate(t, m) + }) +} diff --git a/go.mod b/go.mod index 851054ffd..65ee11de9 100644 --- a/go.mod +++ b/go.mod @@ -56,6 +56,7 @@ require ( github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/opensearch-project/opensearch-go v1.1.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/stretchr/objx v0.5.2 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect diff --git a/go.sum b/go.sum index 846e435b8..d15b061c9 100644 --- a/go.sum +++ b/go.sum @@ -75,6 +75,7 @@ github.com/apache/arrow/go/v10 v10.0.1 h1:n9dERvixoC/1JjDmBcs9FPaEryoANa2sCgVFo6 github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/aws/aws-sdk-go v1.42.27/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc= github.com/aws/aws-sdk-go v1.49.6 h1:yNldzF5kzLBRvKlKz1S0bkvc2+04R1kt13KfBWQBfFA= github.com/aws/aws-sdk-go v1.49.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= @@ -513,6 +514,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opensearch-project/opensearch-go v1.1.0 h1:eG5sh3843bbU1itPRjA9QXbxcg8LaZ+DjEzQH9aLN3M= +github.com/opensearch-project/opensearch-go v1.1.0/go.mod h1:+6/XHCuTH+fwsMJikZEWsucZ4eZMma3zNSeLrTtVGbo= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.16 h1:kQPfno+wyx6C5572ABwV+Uo3pDFzQ7yhyGchSyRda0c= @@ -705,6 +708,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= diff --git a/internal/cli/build_opensearch.go b/internal/cli/build_opensearch.go new file mode 100644 index 000000000..ee72e2710 --- /dev/null +++ b/internal/cli/build_opensearch.go @@ -0,0 +1,8 @@ +//go:build opensearch +// +build opensearch + +package cli + +import ( + _ "github.com/golang-migrate/migrate/v4/database/opensearch" +)