diff --git a/internal/collector/glance/glance.go b/internal/collector/glance/glance.go new file mode 100644 index 0000000..f2a09bb --- /dev/null +++ b/internal/collector/glance/glance.go @@ -0,0 +1,5 @@ +package glance + +const ( + Subsystem = "glance" +) diff --git a/internal/collector/glance/images.go b/internal/collector/glance/images.go new file mode 100644 index 0000000..ceee35f --- /dev/null +++ b/internal/collector/glance/images.go @@ -0,0 +1,99 @@ +package glance + +import ( + "context" + "database/sql" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/vexxhost/openstack_database_exporter/internal/collector" + glancedb "github.com/vexxhost/openstack_database_exporter/internal/db/glance" +) + +var ( + imagesUpDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "up"), + "up", + nil, + nil, + ) + + imagesBytesDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "image_bytes"), + "image_bytes", + []string{ + "id", + "name", + "tenant_id", + }, + nil, + ) + + imagesDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "images"), + "images", + nil, + nil, + ) +) + +type ImagesCollector struct { + db *sql.DB + queries *glancedb.Queries + logger *slog.Logger +} + +func NewImagesCollector(db *sql.DB, logger *slog.Logger) *ImagesCollector { + return &ImagesCollector{ + db: db, + queries: glancedb.New(db), + logger: logger.With( + "namespace", collector.Namespace, + "subsystem", Subsystem, + "collector", "images", + ), + } +} + +func (c *ImagesCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- imagesUpDesc + ch <- imagesBytesDesc + ch <- imagesDesc +} + +func (c *ImagesCollector) Collect(ch chan<- prometheus.Metric) { + ctx := context.Background() + + images, err := c.queries.GetAllImages(ctx) + if err != nil { + ch <- prometheus.MustNewConstMetric(imagesUpDesc, prometheus.GaugeValue, 0) + + c.logger.Error("failed to query", "error", err) + return + } + + for _, image := range images { + // Convert size from nullable int64 to float64, defaulting to 0 if null + sizeBytes := float64(0) + if image.Size.Valid { + sizeBytes = float64(image.Size.Int64) + } + + ch <- prometheus.MustNewConstMetric( + imagesBytesDesc, + prometheus.GaugeValue, + sizeBytes, + image.ID, + image.Name.String, + image.Owner.String, + ) + } + + ch <- prometheus.MustNewConstMetric( + imagesDesc, + prometheus.GaugeValue, + float64(len(images)), + ) + + ch <- prometheus.MustNewConstMetric(imagesUpDesc, prometheus.GaugeValue, 1) +} diff --git a/internal/collector/glance/images_test.go b/internal/collector/glance/images_test.go new file mode 100644 index 0000000..c9017a1 --- /dev/null +++ b/internal/collector/glance/images_test.go @@ -0,0 +1,113 @@ +package glance + +import ( + "database/sql" + "regexp" + "testing" + "time" + + "github.com/DATA-DOG/go-sqlmock" + glancedb "github.com/vexxhost/openstack_database_exporter/internal/db/glance" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestImagesCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection with images", + SetupMock: func(mock sqlmock.Sqlmock) { + createdAt := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + updatedAt := time.Date(2023, 1, 2, 0, 0, 0, 0, time.UTC) + + rows := sqlmock.NewRows([]string{ + "id", "name", "size", "status", "owner", "visibility", + "disk_format", "container_format", "checksum", "created_at", + "updated_at", "min_disk", "min_ram", "protected", "virtual_size", + "os_hidden", "os_hash_algo", "os_hash_value", + }).AddRow( + "781b3762-9469-4cec-b58d-3349e5de4e9c", "F17-x86_64-cfntools", 476704768, "active", "5ef70662f8b34079a6eddb8da9d75fe8", "public", + "qcow2", "bare", "1234567890abcdef", createdAt, updatedAt, 1, 512, false, nil, + false, nil, nil, + ).AddRow( + "1bea47ed-f6a9-463b-b423-14b9cca9ad27", "cirros-0.3.2-x86_64-disk", 13167616, "active", "5ef70662f8b34079a6eddb8da9d75fe8", "public", + "qcow2", "bare", "abcdef1234567890", createdAt, updatedAt, 0, 64, false, nil, + false, nil, nil, + ) + + mock.ExpectQuery(regexp.QuoteMeta(glancedb.GetAllImages)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_glance_image_bytes image_bytes +# TYPE openstack_glance_image_bytes gauge +openstack_glance_image_bytes{id="1bea47ed-f6a9-463b-b423-14b9cca9ad27",name="cirros-0.3.2-x86_64-disk",tenant_id="5ef70662f8b34079a6eddb8da9d75fe8"} 1.3167616e+07 +openstack_glance_image_bytes{id="781b3762-9469-4cec-b58d-3349e5de4e9c",name="F17-x86_64-cfntools",tenant_id="5ef70662f8b34079a6eddb8da9d75fe8"} 4.76704768e+08 +# HELP openstack_glance_images images +# TYPE openstack_glance_images gauge +openstack_glance_images 2 +# HELP openstack_glance_up up +# TYPE openstack_glance_up gauge +openstack_glance_up 1 +`, + }, + { + Name: "successful collection with no images", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", "name", "size", "status", "owner", "visibility", + "disk_format", "container_format", "checksum", "created_at", + "updated_at", "min_disk", "min_ram", "protected", "virtual_size", + "os_hidden", "os_hash_algo", "os_hash_value", + }) + + mock.ExpectQuery(regexp.QuoteMeta(glancedb.GetAllImages)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_glance_images images +# TYPE openstack_glance_images gauge +openstack_glance_images 0 +# HELP openstack_glance_up up +# TYPE openstack_glance_up gauge +openstack_glance_up 1 +`, + }, + { + Name: "handles null values gracefully", + SetupMock: func(mock sqlmock.Sqlmock) { + createdAt := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + rows := sqlmock.NewRows([]string{ + "id", "name", "size", "status", "owner", "visibility", + "disk_format", "container_format", "checksum", "created_at", + "updated_at", "min_disk", "min_ram", "protected", "virtual_size", + "os_hidden", "os_hash_algo", "os_hash_value", + }).AddRow( + "image-with-nulls", nil, nil, "active", nil, "private", + nil, nil, nil, createdAt, nil, 0, 0, false, nil, + false, nil, nil, + ) + + mock.ExpectQuery(regexp.QuoteMeta(glancedb.GetAllImages)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_glance_image_bytes image_bytes +# TYPE openstack_glance_image_bytes gauge +openstack_glance_image_bytes{id="image-with-nulls",name="",tenant_id=""} 0 +# HELP openstack_glance_images images +# TYPE openstack_glance_images gauge +openstack_glance_images 1 +# HELP openstack_glance_up up +# TYPE openstack_glance_up gauge +openstack_glance_up 1 +`, + }, + { + Name: "query error", + SetupMock: func(mock sqlmock.Sqlmock) { + mock.ExpectQuery(regexp.QuoteMeta(glancedb.GetAllImages)).WillReturnError(sql.ErrConnDone) + }, + ExpectedMetrics: `# HELP openstack_glance_up up +# TYPE openstack_glance_up gauge +openstack_glance_up 0 +`, + }, + } + + testutil.RunCollectorTests(t, tests, NewImagesCollector) +} diff --git a/internal/collector/keystone/identity.go b/internal/collector/keystone/identity.go new file mode 100644 index 0000000..2fe3064 --- /dev/null +++ b/internal/collector/keystone/identity.go @@ -0,0 +1,262 @@ +package keystone + +import ( + "context" + "database/sql" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/vexxhost/openstack_database_exporter/internal/collector" + keystonedb "github.com/vexxhost/openstack_database_exporter/internal/db/keystone" +) + +const Subsystem = "identity" + +var ( + keystoneUpDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "up"), + "up", + nil, + nil, + ) + + domainsDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "domains"), + "domains", + nil, + nil, + ) + + domainInfoDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "domain_info"), + "domain_info", + []string{ + "description", + "enabled", + "id", + "name", + }, + nil, + ) + + groupsDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "groups"), + "groups", + nil, + nil, + ) + + projectInfoDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "project_info"), + "project_info", + []string{ + "description", + "domain_id", + "enabled", + "id", + "is_domain", + "name", + "parent_id", + "tags", + }, + nil, + ) + + projectsDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "projects"), + "projects", + nil, + nil, + ) + + regionsDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "regions"), + "regions", + nil, + nil, + ) + + usersDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "users"), + "users", + nil, + nil, + ) +) + +type IdentityCollector struct { + db *sql.DB + logger *slog.Logger +} + +func NewIdentityCollector(db *sql.DB, logger *slog.Logger) *IdentityCollector { + return &IdentityCollector{ + db: db, + logger: logger, + } +} + +func (c *IdentityCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- keystoneUpDesc + ch <- domainsDesc + ch <- domainInfoDesc + ch <- groupsDesc + ch <- projectInfoDesc + ch <- projectsDesc + ch <- regionsDesc + ch <- usersDesc +} + +func (c *IdentityCollector) Collect(ch chan<- prometheus.Metric) { + ctx := context.Background() + queries := keystonedb.New(c.db) + + // Set up metric = 1 (service is up) + ch <- prometheus.MustNewConstMetric( + keystoneUpDesc, + prometheus.GaugeValue, + 1, + ) + + // Collect domain metrics + c.collectDomainMetrics(ctx, queries, ch) + + // Collect project metrics + c.collectProjectMetrics(ctx, queries, ch) + + // Collect group metrics + c.collectGroupMetrics(ctx, queries, ch) + + // Collect region metrics + c.collectRegionMetrics(ctx, queries, ch) + + // Collect user metrics + c.collectUserMetrics(ctx, queries, ch) +} + +func (c *IdentityCollector) collectDomainMetrics(ctx context.Context, queries *keystonedb.Queries, ch chan<- prometheus.Metric) { + domains, err := queries.GetDomainMetrics(ctx) + if err != nil { + c.logger.Error("Failed to get domain metrics", "error", err) + return + } + + // domains count + ch <- prometheus.MustNewConstMetric( + domainsDesc, + prometheus.GaugeValue, + float64(len(domains)), + ) + + // domain_info metrics + for _, domain := range domains { + enabled := "false" + if domain.Enabled.Valid && domain.Enabled.Bool { + enabled = "true" + } + + ch <- prometheus.MustNewConstMetric( + domainInfoDesc, + prometheus.GaugeValue, + 1, + domain.Description, + enabled, + domain.ID, + domain.Name, + ) + } +} + +func (c *IdentityCollector) collectProjectMetrics(ctx context.Context, queries *keystonedb.Queries, ch chan<- prometheus.Metric) { + projects, err := queries.GetProjectMetrics(ctx) + if err != nil { + c.logger.Error("Failed to get project metrics", "error", err) + return + } + + // projects count + ch <- prometheus.MustNewConstMetric( + projectsDesc, + prometheus.GaugeValue, + float64(len(projects)), + ) + + // project_info metrics + for _, project := range projects { + enabled := "false" + if project.Enabled.Valid && project.Enabled.Bool { + enabled = "true" + } + + isDomain := "false" + if project.IsDomain { + isDomain = "true" + } + + tags := "" + if project.Tags != nil { + if tagsStr, ok := project.Tags.(string); ok { + tags = tagsStr + } + } + + ch <- prometheus.MustNewConstMetric( + projectInfoDesc, + prometheus.GaugeValue, + 1, + project.Description, + project.DomainID, + enabled, + project.ID, + isDomain, + project.Name, + project.ParentID, + tags, + ) + } +} + +func (c *IdentityCollector) collectGroupMetrics(ctx context.Context, queries *keystonedb.Queries, ch chan<- prometheus.Metric) { + groups, err := queries.GetGroupMetrics(ctx) + if err != nil { + c.logger.Error("Failed to get group metrics", "error", err) + return + } + + // groups count + ch <- prometheus.MustNewConstMetric( + groupsDesc, + prometheus.GaugeValue, + float64(len(groups)), + ) +} + +func (c *IdentityCollector) collectRegionMetrics(ctx context.Context, queries *keystonedb.Queries, ch chan<- prometheus.Metric) { + regions, err := queries.GetRegionMetrics(ctx) + if err != nil { + c.logger.Error("Failed to get region metrics", "error", err) + return + } + + // regions count + ch <- prometheus.MustNewConstMetric( + regionsDesc, + prometheus.GaugeValue, + float64(len(regions)), + ) +} + +func (c *IdentityCollector) collectUserMetrics(ctx context.Context, queries *keystonedb.Queries, ch chan<- prometheus.Metric) { + users, err := queries.GetUserMetrics(ctx) + if err != nil { + c.logger.Error("Failed to get user metrics", "error", err) + return + } + + // users count + ch <- prometheus.MustNewConstMetric( + usersDesc, + prometheus.GaugeValue, + float64(len(users)), + ) +} diff --git a/internal/collector/keystone/identity_test.go b/internal/collector/keystone/identity_test.go new file mode 100644 index 0000000..8757132 --- /dev/null +++ b/internal/collector/keystone/identity_test.go @@ -0,0 +1,289 @@ +package keystone + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + keystonedb "github.com/vexxhost/openstack_database_exporter/internal/db/keystone" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestIdentityCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection with keystone data", + SetupMock: func(mock sqlmock.Sqlmock) { + // Setup domain metrics query + domainRows := sqlmock.NewRows([]string{ + "id", "name", "description", "enabled", + }).AddRow( + "default", "Default", "Owns users and tenants (i.e. projects) available on Identity API v2.", 1, + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetDomainMetrics)).WillReturnRows(domainRows) + + // Setup project metrics query + projectRows := sqlmock.NewRows([]string{ + "id", "name", "description", "enabled", "domain_id", "parent_id", "is_domain", "tags", + }).AddRow( + "4b1eb781a47440acb8af9850103e537f", "swifttenanttest4", "", 1, "1bc2169ca88e4cdaaba46d4c15390b65", "", 0, "", + ).AddRow( + "0c4e939acacf4376bdcd1129f1a054ad", "admin", "", 1, "default", "", 0, "", + ).AddRow( + "0cbd49cbf76d405d9c86562e1d579bd3", "demo", "Demo Project", 1, "default", "", 0, "", + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetProjectMetrics)).WillReturnRows(projectRows) + + // Setup group metrics query + groupRows := sqlmock.NewRows([]string{ + "id", "domain_id", "name", "description", + }).AddRow( + "group-1", "default", "test-group-1", "Test group 1", + ).AddRow( + "group-2", "default", "test-group-2", "Test group 2", + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetGroupMetrics)).WillReturnRows(groupRows) + + // Setup region metrics query + regionRows := sqlmock.NewRows([]string{ + "id", "description", "parent_region_id", + }).AddRow( + "RegionOne", "", "", + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetRegionMetrics)).WillReturnRows(regionRows) + + // Setup user metrics query + userRows := sqlmock.NewRows([]string{ + "id", "enabled", "domain_id", "default_project_id", "created_at", "last_active_at", + }).AddRow( + "user-1", 1, "default", "0c4e939acacf4376bdcd1129f1a054ad", nil, nil, + ).AddRow( + "user-2", 1, "default", "", nil, nil, + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetUserMetrics)).WillReturnRows(userRows) + }, + ExpectedMetrics: `# HELP openstack_identity_domain_info domain_info +# TYPE openstack_identity_domain_info gauge +openstack_identity_domain_info{description="Owns users and tenants (i.e. projects) available on Identity API v2.",enabled="true",id="default",name="Default"} 1 +# HELP openstack_identity_domains domains +# TYPE openstack_identity_domains gauge +openstack_identity_domains 1 +# HELP openstack_identity_groups groups +# TYPE openstack_identity_groups gauge +openstack_identity_groups 2 +# HELP openstack_identity_project_info project_info +# TYPE openstack_identity_project_info gauge +openstack_identity_project_info{description="",domain_id="1bc2169ca88e4cdaaba46d4c15390b65",enabled="true",id="4b1eb781a47440acb8af9850103e537f",is_domain="false",name="swifttenanttest4",parent_id="",tags=""} 1 +openstack_identity_project_info{description="",domain_id="default",enabled="true",id="0c4e939acacf4376bdcd1129f1a054ad",is_domain="false",name="admin",parent_id="",tags=""} 1 +openstack_identity_project_info{description="Demo Project",domain_id="default",enabled="true",id="0cbd49cbf76d405d9c86562e1d579bd3",is_domain="false",name="demo",parent_id="",tags=""} 1 +# HELP openstack_identity_projects projects +# TYPE openstack_identity_projects gauge +openstack_identity_projects 3 +# HELP openstack_identity_regions regions +# TYPE openstack_identity_regions gauge +openstack_identity_regions 1 +# HELP openstack_identity_up up +# TYPE openstack_identity_up gauge +openstack_identity_up 1 +# HELP openstack_identity_users users +# TYPE openstack_identity_users gauge +openstack_identity_users 2 +`, + }, + { + Name: "empty results", + SetupMock: func(mock sqlmock.Sqlmock) { + // Setup empty domain metrics query + domainRows := sqlmock.NewRows([]string{ + "id", "name", "description", "enabled", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetDomainMetrics)).WillReturnRows(domainRows) + + // Setup empty project metrics query + projectRows := sqlmock.NewRows([]string{ + "id", "name", "description", "enabled", "domain_id", "parent_id", "is_domain", "tags", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetProjectMetrics)).WillReturnRows(projectRows) + + // Setup empty group metrics query + groupRows := sqlmock.NewRows([]string{ + "id", "domain_id", "name", "description", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetGroupMetrics)).WillReturnRows(groupRows) + + // Setup empty region metrics query + regionRows := sqlmock.NewRows([]string{ + "id", "description", "parent_region_id", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetRegionMetrics)).WillReturnRows(regionRows) + + // Setup empty user metrics query + userRows := sqlmock.NewRows([]string{ + "id", "enabled", "domain_id", "default_project_id", "created_at", "last_active_at", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetUserMetrics)).WillReturnRows(userRows) + }, + ExpectedMetrics: `# HELP openstack_identity_domains domains +# TYPE openstack_identity_domains gauge +openstack_identity_domains 0 +# HELP openstack_identity_groups groups +# TYPE openstack_identity_groups gauge +openstack_identity_groups 0 +# HELP openstack_identity_projects projects +# TYPE openstack_identity_projects gauge +openstack_identity_projects 0 +# HELP openstack_identity_regions regions +# TYPE openstack_identity_regions gauge +openstack_identity_regions 0 +# HELP openstack_identity_up up +# TYPE openstack_identity_up gauge +openstack_identity_up 1 +# HELP openstack_identity_users users +# TYPE openstack_identity_users gauge +openstack_identity_users 0 +`, + }, + { + Name: "disabled domain and project", + SetupMock: func(mock sqlmock.Sqlmock) { + // Setup domain metrics query with disabled domain + domainRows := sqlmock.NewRows([]string{ + "id", "name", "description", "enabled", + }).AddRow( + "disabled-domain", "Disabled Domain", "A disabled domain", 0, + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetDomainMetrics)).WillReturnRows(domainRows) + + // Setup project metrics query with disabled project and is_domain=true + projectRows := sqlmock.NewRows([]string{ + "id", "name", "description", "enabled", "domain_id", "parent_id", "is_domain", "tags", + }).AddRow( + "project-1", "project-1", "Disabled project", 0, "default", "parent-1", 1, "tag1,tag2", + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetProjectMetrics)).WillReturnRows(projectRows) + + // Setup empty group metrics query + groupRows := sqlmock.NewRows([]string{ + "id", "domain_id", "name", "description", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetGroupMetrics)).WillReturnRows(groupRows) + + // Setup empty region metrics query + regionRows := sqlmock.NewRows([]string{ + "id", "description", "parent_region_id", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetRegionMetrics)).WillReturnRows(regionRows) + + // Setup empty user metrics query + userRows := sqlmock.NewRows([]string{ + "id", "enabled", "domain_id", "default_project_id", "created_at", "last_active_at", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetUserMetrics)).WillReturnRows(userRows) + }, + ExpectedMetrics: `# HELP openstack_identity_domain_info domain_info +# TYPE openstack_identity_domain_info gauge +openstack_identity_domain_info{description="A disabled domain",enabled="false",id="disabled-domain",name="Disabled Domain"} 1 +# HELP openstack_identity_domains domains +# TYPE openstack_identity_domains gauge +openstack_identity_domains 1 +# HELP openstack_identity_groups groups +# TYPE openstack_identity_groups gauge +openstack_identity_groups 0 +# HELP openstack_identity_project_info project_info +# TYPE openstack_identity_project_info gauge +openstack_identity_project_info{description="Disabled project",domain_id="default",enabled="false",id="project-1",is_domain="true",name="project-1",parent_id="parent-1",tags="tag1,tag2"} 1 +# HELP openstack_identity_projects projects +# TYPE openstack_identity_projects gauge +openstack_identity_projects 1 +# HELP openstack_identity_regions regions +# TYPE openstack_identity_regions gauge +openstack_identity_regions 0 +# HELP openstack_identity_up up +# TYPE openstack_identity_up gauge +openstack_identity_up 1 +# HELP openstack_identity_users users +# TYPE openstack_identity_users gauge +openstack_identity_users 0 +`, + }, + { + Name: "null values handling", + SetupMock: func(mock sqlmock.Sqlmock) { + // Setup domain metrics query with null enabled + domainRows := sqlmock.NewRows([]string{ + "id", "name", "description", "enabled", + }).AddRow( + "domain-1", "Domain 1", "Domain description", nil, + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetDomainMetrics)).WillReturnRows(domainRows) + + // Setup project metrics query with null enabled and null tags + projectRows := sqlmock.NewRows([]string{ + "id", "name", "description", "enabled", "domain_id", "parent_id", "is_domain", "tags", + }).AddRow( + "project-1", "project-1", "Project description", nil, "default", "", 0, nil, + ) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetProjectMetrics)).WillReturnRows(projectRows) + + // Setup empty group metrics query + groupRows := sqlmock.NewRows([]string{ + "id", "domain_id", "name", "description", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetGroupMetrics)).WillReturnRows(groupRows) + + // Setup empty region metrics query + regionRows := sqlmock.NewRows([]string{ + "id", "description", "parent_region_id", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetRegionMetrics)).WillReturnRows(regionRows) + + // Setup empty user metrics query + userRows := sqlmock.NewRows([]string{ + "id", "enabled", "domain_id", "default_project_id", "created_at", "last_active_at", + }) + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetUserMetrics)).WillReturnRows(userRows) + }, + ExpectedMetrics: `# HELP openstack_identity_domain_info domain_info +# TYPE openstack_identity_domain_info gauge +openstack_identity_domain_info{description="Domain description",enabled="false",id="domain-1",name="Domain 1"} 1 +# HELP openstack_identity_domains domains +# TYPE openstack_identity_domains gauge +openstack_identity_domains 1 +# HELP openstack_identity_groups groups +# TYPE openstack_identity_groups gauge +openstack_identity_groups 0 +# HELP openstack_identity_project_info project_info +# TYPE openstack_identity_project_info gauge +openstack_identity_project_info{description="Project description",domain_id="default",enabled="false",id="project-1",is_domain="false",name="project-1",parent_id="",tags=""} 1 +# HELP openstack_identity_projects projects +# TYPE openstack_identity_projects gauge +openstack_identity_projects 1 +# HELP openstack_identity_regions regions +# TYPE openstack_identity_regions gauge +openstack_identity_regions 0 +# HELP openstack_identity_up up +# TYPE openstack_identity_up gauge +openstack_identity_up 1 +# HELP openstack_identity_users users +# TYPE openstack_identity_users gauge +openstack_identity_users 0 +`, + }, + { + Name: "database error on domain query", + SetupMock: func(mock sqlmock.Sqlmock) { + mock.ExpectQuery(regexp.QuoteMeta(keystonedb.GetDomainMetrics)).WillReturnError(sql.ErrConnDone) + }, + ExpectedMetrics: `# HELP openstack_identity_up up +# TYPE openstack_identity_up gauge +openstack_identity_up 1 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) *IdentityCollector { + return NewIdentityCollector(db, logger) + }) +} diff --git a/internal/collector/magnum/containerinfra.go b/internal/collector/magnum/containerinfra.go new file mode 100644 index 0000000..b72b9db --- /dev/null +++ b/internal/collector/magnum/containerinfra.go @@ -0,0 +1,231 @@ +package magnum + +import ( + "context" + "database/sql" + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/vexxhost/openstack_database_exporter/internal/collector" + magnumdb "github.com/vexxhost/openstack_database_exporter/internal/db/magnum" +) + +const Subsystem = "container_infra" + +var ( + // Known cluster statuses from the original openstack-exporter + clusterStatuses = []string{ + "CREATE_COMPLETE", + "CREATE_FAILED", + "CREATE_IN_PROGRESS", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED", + "UPDATE_COMPLETE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETE_COMPLETE", + "RESUME_COMPLETE", + "RESUME_FAILED", + "RESTORE_COMPLETE", + "ROLLBACK_IN_PROGRESS", + "ROLLBACK_FAILED", + "ROLLBACK_COMPLETE", + "SNAPSHOT_COMPLETE", + "CHECK_COMPLETE", + "ADOPT_COMPLETE", + } + + magnumUpDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "up"), + "up", + nil, + nil, + ) + + clusterMastersDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "cluster_masters"), + "cluster_masters", + []string{ + "uuid", + "name", + "stack_id", + "status", + "node_count", + "project_id", + }, + nil, + ) + + clusterNodesDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "cluster_nodes"), + "cluster_nodes", + []string{ + "uuid", + "name", + "stack_id", + "status", + "master_count", + "project_id", + }, + nil, + ) + + clusterStatusDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "cluster_status"), + "cluster_status", + []string{ + "uuid", + "name", + "stack_id", + "status", + "node_count", + "master_count", + "project_id", + }, + nil, + ) + + totalClustersDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "total_clusters"), + "total_clusters", + nil, + nil, + ) +) + +type ContainerInfraCollector struct { + db *sql.DB + logger *slog.Logger +} + +func NewContainerInfraCollector(db *sql.DB, logger *slog.Logger) *ContainerInfraCollector { + return &ContainerInfraCollector{ + db: db, + logger: logger, + } +} + +func (c *ContainerInfraCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- magnumUpDesc + ch <- clusterMastersDesc + ch <- clusterNodesDesc + ch <- clusterStatusDesc + ch <- totalClustersDesc +} + +func (c *ContainerInfraCollector) Collect(ch chan<- prometheus.Metric) { + ctx := context.Background() + queries := magnumdb.New(c.db) + + // Set up metric = 1 (service is up) + ch <- prometheus.MustNewConstMetric( + magnumUpDesc, + prometheus.GaugeValue, + 1, + ) + + // Collect cluster metrics + c.collectClusterMetrics(ctx, queries, ch) +} + +func (c *ContainerInfraCollector) collectClusterMetrics(ctx context.Context, queries *magnumdb.Queries, ch chan<- prometheus.Metric) { + clusters, err := queries.GetClusterMetrics(ctx) + if err != nil { + c.logger.Error("Failed to get cluster metrics", "error", err) + return + } + + // total_clusters count + ch <- prometheus.MustNewConstMetric( + totalClustersDesc, + prometheus.GaugeValue, + float64(len(clusters)), + ) + + // Individual cluster metrics + for _, cluster := range clusters { + uuid := "" + if cluster.Uuid.Valid { + uuid = cluster.Uuid.String + } + + name := "" + if cluster.Name.Valid { + name = cluster.Name.String + } + + projectID := "" + if cluster.ProjectID.Valid { + projectID = cluster.ProjectID.String + } + + // Convert interface{} to int for counts + masterCount := 0 + if cluster.MasterCount != nil { + if mc, ok := cluster.MasterCount.(int64); ok { + masterCount = int(mc) + } + } + + nodeCount := 0 + if cluster.NodeCount != nil { + if nc, ok := cluster.NodeCount.(int64); ok { + nodeCount = int(nc) + } + } + + masterCountStr := fmt.Sprintf("%d", masterCount) + nodeCountStr := fmt.Sprintf("%d", nodeCount) + + // cluster_masters metric + ch <- prometheus.MustNewConstMetric( + clusterMastersDesc, + prometheus.GaugeValue, + float64(masterCount), + uuid, + name, + cluster.StackID, + cluster.Status, + nodeCountStr, + projectID, + ) + + // cluster_nodes metric + ch <- prometheus.MustNewConstMetric( + clusterNodesDesc, + prometheus.GaugeValue, + float64(nodeCount), + uuid, + name, + cluster.StackID, + cluster.Status, + masterCountStr, + projectID, + ) + + // cluster_status metric + statusValue := mapClusterStatus(cluster.Status) + ch <- prometheus.MustNewConstMetric( + clusterStatusDesc, + prometheus.GaugeValue, + float64(statusValue), + uuid, + name, + cluster.StackID, + cluster.Status, + nodeCountStr, + masterCountStr, + projectID, + ) + } +} + +func mapClusterStatus(status string) int { + for idx, s := range clusterStatuses { + if status == s { + return idx + } + } + return -1 +} diff --git a/internal/collector/magnum/containerinfra_test.go b/internal/collector/magnum/containerinfra_test.go new file mode 100644 index 0000000..3813c4e --- /dev/null +++ b/internal/collector/magnum/containerinfra_test.go @@ -0,0 +1,142 @@ +package magnum + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + magnumdb "github.com/vexxhost/openstack_database_exporter/internal/db/magnum" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestContainerInfraCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection with magnum cluster", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "uuid", "name", "stack_id", "status", "project_id", "master_count", "node_count", + }).AddRow( + "273c39d5-fa17-4372-b6b1-93a572de2cef", "k8s", "31c1ee6c-081e-4f39-9f0f-f1d87a7defa1", "CREATE_FAILED", "0cbd49cbf76d405d9c86562e1d579bd3", int64(1), int64(1), + ) + + mock.ExpectQuery(regexp.QuoteMeta(magnumdb.GetClusterMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_container_infra_cluster_masters cluster_masters +# TYPE openstack_container_infra_cluster_masters gauge +openstack_container_infra_cluster_masters{name="k8s",node_count="1",project_id="0cbd49cbf76d405d9c86562e1d579bd3",stack_id="31c1ee6c-081e-4f39-9f0f-f1d87a7defa1",status="CREATE_FAILED",uuid="273c39d5-fa17-4372-b6b1-93a572de2cef"} 1 +# HELP openstack_container_infra_cluster_nodes cluster_nodes +# TYPE openstack_container_infra_cluster_nodes gauge +openstack_container_infra_cluster_nodes{master_count="1",name="k8s",project_id="0cbd49cbf76d405d9c86562e1d579bd3",stack_id="31c1ee6c-081e-4f39-9f0f-f1d87a7defa1",status="CREATE_FAILED",uuid="273c39d5-fa17-4372-b6b1-93a572de2cef"} 1 +# HELP openstack_container_infra_cluster_status cluster_status +# TYPE openstack_container_infra_cluster_status gauge +openstack_container_infra_cluster_status{master_count="1",name="k8s",node_count="1",project_id="0cbd49cbf76d405d9c86562e1d579bd3",stack_id="31c1ee6c-081e-4f39-9f0f-f1d87a7defa1",status="CREATE_FAILED",uuid="273c39d5-fa17-4372-b6b1-93a572de2cef"} 1 +# HELP openstack_container_infra_total_clusters total_clusters +# TYPE openstack_container_infra_total_clusters gauge +openstack_container_infra_total_clusters 1 +# HELP openstack_container_infra_up up +# TYPE openstack_container_infra_up gauge +openstack_container_infra_up 1 +`, + }, + { + Name: "successful collection with multiple clusters", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "uuid", "name", "stack_id", "status", "project_id", "master_count", "node_count", + }).AddRow( + "cluster-1", "test-cluster-1", "stack-1", "CREATE_COMPLETE", "project-1", int64(3), int64(5), + ).AddRow( + "cluster-2", "test-cluster-2", "stack-2", "UPDATE_IN_PROGRESS", "project-2", int64(1), int64(2), + ).AddRow( + "cluster-3", "test-cluster-3", "stack-3", "DELETE_FAILED", "project-1", int64(2), int64(3), + ) + + mock.ExpectQuery(regexp.QuoteMeta(magnumdb.GetClusterMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_container_infra_cluster_masters cluster_masters +# TYPE openstack_container_infra_cluster_masters gauge +openstack_container_infra_cluster_masters{name="test-cluster-1",node_count="5",project_id="project-1",stack_id="stack-1",status="CREATE_COMPLETE",uuid="cluster-1"} 3 +openstack_container_infra_cluster_masters{name="test-cluster-2",node_count="2",project_id="project-2",stack_id="stack-2",status="UPDATE_IN_PROGRESS",uuid="cluster-2"} 1 +openstack_container_infra_cluster_masters{name="test-cluster-3",node_count="3",project_id="project-1",stack_id="stack-3",status="DELETE_FAILED",uuid="cluster-3"} 2 +# HELP openstack_container_infra_cluster_nodes cluster_nodes +# TYPE openstack_container_infra_cluster_nodes gauge +openstack_container_infra_cluster_nodes{master_count="3",name="test-cluster-1",project_id="project-1",stack_id="stack-1",status="CREATE_COMPLETE",uuid="cluster-1"} 5 +openstack_container_infra_cluster_nodes{master_count="1",name="test-cluster-2",project_id="project-2",stack_id="stack-2",status="UPDATE_IN_PROGRESS",uuid="cluster-2"} 2 +openstack_container_infra_cluster_nodes{master_count="2",name="test-cluster-3",project_id="project-1",stack_id="stack-3",status="DELETE_FAILED",uuid="cluster-3"} 3 +# HELP openstack_container_infra_cluster_status cluster_status +# TYPE openstack_container_infra_cluster_status gauge +openstack_container_infra_cluster_status{master_count="3",name="test-cluster-1",node_count="5",project_id="project-1",stack_id="stack-1",status="CREATE_COMPLETE",uuid="cluster-1"} 0 +openstack_container_infra_cluster_status{master_count="1",name="test-cluster-2",node_count="2",project_id="project-2",stack_id="stack-2",status="UPDATE_IN_PROGRESS",uuid="cluster-2"} 3 +openstack_container_infra_cluster_status{master_count="2",name="test-cluster-3",node_count="3",project_id="project-1",stack_id="stack-3",status="DELETE_FAILED",uuid="cluster-3"} 7 +# HELP openstack_container_infra_total_clusters total_clusters +# TYPE openstack_container_infra_total_clusters gauge +openstack_container_infra_total_clusters 3 +# HELP openstack_container_infra_up up +# TYPE openstack_container_infra_up gauge +openstack_container_infra_up 1 +`, + }, + { + Name: "empty results", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "uuid", "name", "stack_id", "status", "project_id", "master_count", "node_count", + }) + + mock.ExpectQuery(regexp.QuoteMeta(magnumdb.GetClusterMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_container_infra_total_clusters total_clusters +# TYPE openstack_container_infra_total_clusters gauge +openstack_container_infra_total_clusters 0 +# HELP openstack_container_infra_up up +# TYPE openstack_container_infra_up gauge +openstack_container_infra_up 1 +`, + }, + { + Name: "null values handling", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "uuid", "name", "stack_id", "status", "project_id", "master_count", "node_count", + }).AddRow( + nil, nil, "", "UNKNOWN_STATUS", nil, nil, nil, + ) + + mock.ExpectQuery(regexp.QuoteMeta(magnumdb.GetClusterMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_container_infra_cluster_masters cluster_masters +# TYPE openstack_container_infra_cluster_masters gauge +openstack_container_infra_cluster_masters{name="",node_count="0",project_id="",stack_id="",status="UNKNOWN_STATUS",uuid=""} 0 +# HELP openstack_container_infra_cluster_nodes cluster_nodes +# TYPE openstack_container_infra_cluster_nodes gauge +openstack_container_infra_cluster_nodes{master_count="0",name="",project_id="",stack_id="",status="UNKNOWN_STATUS",uuid=""} 0 +# HELP openstack_container_infra_cluster_status cluster_status +# TYPE openstack_container_infra_cluster_status gauge +openstack_container_infra_cluster_status{master_count="0",name="",node_count="0",project_id="",stack_id="",status="UNKNOWN_STATUS",uuid=""} -1 +# HELP openstack_container_infra_total_clusters total_clusters +# TYPE openstack_container_infra_total_clusters gauge +openstack_container_infra_total_clusters 1 +# HELP openstack_container_infra_up up +# TYPE openstack_container_infra_up gauge +openstack_container_infra_up 1 +`, + }, + { + Name: "database query error", + SetupMock: func(mock sqlmock.Sqlmock) { + mock.ExpectQuery(regexp.QuoteMeta(magnumdb.GetClusterMetrics)).WillReturnError(sql.ErrConnDone) + }, + ExpectedMetrics: `# HELP openstack_container_infra_up up +# TYPE openstack_container_infra_up gauge +openstack_container_infra_up 1 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) *ContainerInfraCollector { + return NewContainerInfraCollector(db, logger) + }) +} diff --git a/internal/collector/manila/shares.go b/internal/collector/manila/shares.go new file mode 100644 index 0000000..7756dc5 --- /dev/null +++ b/internal/collector/manila/shares.go @@ -0,0 +1,219 @@ +package manila + +import ( + "context" + "database/sql" + "fmt" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/vexxhost/openstack_database_exporter/internal/collector" + maniladb "github.com/vexxhost/openstack_database_exporter/internal/db/manila" +) + +const Subsystem = "sharev2" + +var ( + // Known share statuses from the original openstack-exporter + shareStatuses = []string{ + "available", "creating", "deleting", "error", "error_deleting", + "extending", "inactive", "managing", "migrating", "migration_error", + "restoring", "reverting", "reverting_error", "reverting_to_snapshot", + "shrinking", "shrinking_error", "soft_deleting", "unmanaging", "updating", + } + + manilaUpDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "up"), + "up", + nil, + nil, + ) + + shareGbDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "share_gb"), + "share_gb", + []string{ + "availability_zone", + "id", + "name", + "project_id", + "share_proto", + "share_type", + "share_type_name", + "status", + }, + nil, + ) + + shareStatusDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "share_status"), + "share_status", + []string{ + "id", + "name", + "project_id", + "share_proto", + "share_type", + "share_type_name", + "size", + "status", + }, + nil, + ) + + shareStatusCounterDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "share_status_counter"), + "share_status_counter", + []string{ + "status", + }, + nil, + ) + + sharesCounterDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "shares_counter"), + "shares_counter", + nil, + nil, + ) +) + +type SharesCollector struct { + db *sql.DB + queries *maniladb.Queries + logger *slog.Logger +} + +func NewSharesCollector(db *sql.DB, logger *slog.Logger) *SharesCollector { + return &SharesCollector{ + db: db, + queries: maniladb.New(db), + logger: logger.With( + "component", "manila_shares_collector", + ), + } +} + +func (c *SharesCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- manilaUpDesc + ch <- shareGbDesc + ch <- shareStatusDesc + ch <- shareStatusCounterDesc + ch <- sharesCounterDesc +} + +func (c *SharesCollector) Collect(ch chan<- prometheus.Metric) { + ctx := context.Background() + + shares, err := c.queries.GetShareMetrics(ctx) + if err != nil { + c.logger.Error("Failed to collect manila shares", "error", err) + ch <- prometheus.MustNewConstMetric(manilaUpDesc, prometheus.GaugeValue, 0) + return + } + + ch <- prometheus.MustNewConstMetric(manilaUpDesc, prometheus.GaugeValue, 1) + + // Count shares by status for status counter metrics + statusCounts := make(map[string]int) + for _, status := range shareStatuses { + statusCounts[status] = 0 + } + + totalShares := 0 + + for _, share := range shares { + // Extract values with proper null handling + shareID := share.ID + name := "" + if share.Name.Valid { + name = share.Name.String + } + projectID := "" + if share.ProjectID.Valid { + projectID = share.ProjectID.String + } + size := int32(0) + if share.Size.Valid { + size = share.Size.Int32 + } + shareProto := "" + if share.ShareProto.Valid { + shareProto = share.ShareProto.String + } + status := "" + if share.Status.Valid { + status = share.Status.String + } + shareTypeName := share.ShareTypeName + availabilityZone := share.AvailabilityZone + + // For share_type label, use availability_zone if available, otherwise empty + shareType := availabilityZone + + totalShares++ + + // Count status for counter metrics + if status != "" { + statusCounts[status]++ + } + + // share_gb metric - size in GB per share + ch <- prometheus.MustNewConstMetric( + shareGbDesc, + prometheus.GaugeValue, + float64(size), + availabilityZone, + shareID, + name, + projectID, + shareProto, + shareType, + shareTypeName, + status, + ) + + // share_status metric - status indicator per share + statusValue := 0.0 + if status != "" { + statusValue = 1.0 + } + + // Convert size to string properly + sizeStr := "0" + if share.Size.Valid { + sizeStr = fmt.Sprintf("%d", share.Size.Int32) + } + + ch <- prometheus.MustNewConstMetric( + shareStatusDesc, + prometheus.GaugeValue, + statusValue, + shareID, + name, + projectID, + shareProto, + shareType, + shareTypeName, + sizeStr, + status, + ) + } + + // Emit status counter metrics for all known statuses + for _, status := range shareStatuses { + ch <- prometheus.MustNewConstMetric( + shareStatusCounterDesc, + prometheus.GaugeValue, + float64(statusCounts[status]), + status, + ) + } + + // Total shares counter + ch <- prometheus.MustNewConstMetric( + sharesCounterDesc, + prometheus.GaugeValue, + float64(totalShares), + ) +} diff --git a/internal/collector/manila/shares_test.go b/internal/collector/manila/shares_test.go new file mode 100644 index 0000000..429c7ba --- /dev/null +++ b/internal/collector/manila/shares_test.go @@ -0,0 +1,215 @@ +package manila + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + maniladb "github.com/vexxhost/openstack_database_exporter/internal/db/manila" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestSharesCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection with manila shares", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", "name", "project_id", "size", "share_proto", "status", "share_type_name", "availability_zone", + }).AddRow( + "4be93e2e-ffff-ffff-ffff-603e3ec2a5d6", "share-test", "ffff8fa0ca1a468db8ad00970c1effff", 1, "NFS", "available", "", "az1", + ) + + mock.ExpectQuery(regexp.QuoteMeta(maniladb.GetShareMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_sharev2_share_gb share_gb +# TYPE openstack_sharev2_share_gb gauge +openstack_sharev2_share_gb{availability_zone="az1",id="4be93e2e-ffff-ffff-ffff-603e3ec2a5d6",name="share-test",project_id="ffff8fa0ca1a468db8ad00970c1effff",share_proto="NFS",share_type="az1",share_type_name="",status="available"} 1 +# HELP openstack_sharev2_share_status share_status +# TYPE openstack_sharev2_share_status gauge +openstack_sharev2_share_status{id="4be93e2e-ffff-ffff-ffff-603e3ec2a5d6",name="share-test",project_id="ffff8fa0ca1a468db8ad00970c1effff",share_proto="NFS",share_type="az1",share_type_name="",size="1",status="available"} 1 +# HELP openstack_sharev2_share_status_counter share_status_counter +# TYPE openstack_sharev2_share_status_counter gauge +openstack_sharev2_share_status_counter{status="available"} 1 +openstack_sharev2_share_status_counter{status="creating"} 0 +openstack_sharev2_share_status_counter{status="deleting"} 0 +openstack_sharev2_share_status_counter{status="error"} 0 +openstack_sharev2_share_status_counter{status="error_deleting"} 0 +openstack_sharev2_share_status_counter{status="extending"} 0 +openstack_sharev2_share_status_counter{status="inactive"} 0 +openstack_sharev2_share_status_counter{status="managing"} 0 +openstack_sharev2_share_status_counter{status="migrating"} 0 +openstack_sharev2_share_status_counter{status="migration_error"} 0 +openstack_sharev2_share_status_counter{status="restoring"} 0 +openstack_sharev2_share_status_counter{status="reverting"} 0 +openstack_sharev2_share_status_counter{status="reverting_error"} 0 +openstack_sharev2_share_status_counter{status="reverting_to_snapshot"} 0 +openstack_sharev2_share_status_counter{status="shrinking"} 0 +openstack_sharev2_share_status_counter{status="shrinking_error"} 0 +openstack_sharev2_share_status_counter{status="soft_deleting"} 0 +openstack_sharev2_share_status_counter{status="unmanaging"} 0 +openstack_sharev2_share_status_counter{status="updating"} 0 +# HELP openstack_sharev2_shares_counter shares_counter +# TYPE openstack_sharev2_shares_counter gauge +openstack_sharev2_shares_counter 1 +# HELP openstack_sharev2_up up +# TYPE openstack_sharev2_up gauge +openstack_sharev2_up 1 +`, + }, + { + Name: "successful collection with multiple shares", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", "name", "project_id", "size", "share_proto", "status", "share_type_name", "availability_zone", + }).AddRow( + "share-1", "test-share-1", "project-1", 10, "NFS", "available", "default", "nova", + ).AddRow( + "share-2", "test-share-2", "project-2", 20, "CIFS", "creating", "ssd", "nova", + ).AddRow( + "share-3", "test-share-3", "project-1", 5, "NFS", "error", "default", "nova", + ) + + mock.ExpectQuery(regexp.QuoteMeta(maniladb.GetShareMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_sharev2_share_gb share_gb +# TYPE openstack_sharev2_share_gb gauge +openstack_sharev2_share_gb{availability_zone="nova",id="share-1",name="test-share-1",project_id="project-1",share_proto="NFS",share_type="nova",share_type_name="default",status="available"} 10 +openstack_sharev2_share_gb{availability_zone="nova",id="share-2",name="test-share-2",project_id="project-2",share_proto="CIFS",share_type="nova",share_type_name="ssd",status="creating"} 20 +openstack_sharev2_share_gb{availability_zone="nova",id="share-3",name="test-share-3",project_id="project-1",share_proto="NFS",share_type="nova",share_type_name="default",status="error"} 5 +# HELP openstack_sharev2_share_status share_status +# TYPE openstack_sharev2_share_status gauge +openstack_sharev2_share_status{id="share-1",name="test-share-1",project_id="project-1",share_proto="NFS",share_type="nova",share_type_name="default",size="10",status="available"} 1 +openstack_sharev2_share_status{id="share-2",name="test-share-2",project_id="project-2",share_proto="CIFS",share_type="nova",share_type_name="ssd",size="20",status="creating"} 1 +openstack_sharev2_share_status{id="share-3",name="test-share-3",project_id="project-1",share_proto="NFS",share_type="nova",share_type_name="default",size="5",status="error"} 1 +# HELP openstack_sharev2_share_status_counter share_status_counter +# TYPE openstack_sharev2_share_status_counter gauge +openstack_sharev2_share_status_counter{status="available"} 1 +openstack_sharev2_share_status_counter{status="creating"} 1 +openstack_sharev2_share_status_counter{status="deleting"} 0 +openstack_sharev2_share_status_counter{status="error"} 1 +openstack_sharev2_share_status_counter{status="error_deleting"} 0 +openstack_sharev2_share_status_counter{status="extending"} 0 +openstack_sharev2_share_status_counter{status="inactive"} 0 +openstack_sharev2_share_status_counter{status="managing"} 0 +openstack_sharev2_share_status_counter{status="migrating"} 0 +openstack_sharev2_share_status_counter{status="migration_error"} 0 +openstack_sharev2_share_status_counter{status="restoring"} 0 +openstack_sharev2_share_status_counter{status="reverting"} 0 +openstack_sharev2_share_status_counter{status="reverting_error"} 0 +openstack_sharev2_share_status_counter{status="reverting_to_snapshot"} 0 +openstack_sharev2_share_status_counter{status="shrinking"} 0 +openstack_sharev2_share_status_counter{status="shrinking_error"} 0 +openstack_sharev2_share_status_counter{status="soft_deleting"} 0 +openstack_sharev2_share_status_counter{status="unmanaging"} 0 +openstack_sharev2_share_status_counter{status="updating"} 0 +# HELP openstack_sharev2_shares_counter shares_counter +# TYPE openstack_sharev2_shares_counter gauge +openstack_sharev2_shares_counter 3 +# HELP openstack_sharev2_up up +# TYPE openstack_sharev2_up gauge +openstack_sharev2_up 1 +`, + }, + { + Name: "successful collection with no shares", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", "name", "project_id", "size", "share_proto", "status", "share_type_name", "availability_zone", + }) + + mock.ExpectQuery(regexp.QuoteMeta(maniladb.GetShareMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_sharev2_share_status_counter share_status_counter +# TYPE openstack_sharev2_share_status_counter gauge +openstack_sharev2_share_status_counter{status="available"} 0 +openstack_sharev2_share_status_counter{status="creating"} 0 +openstack_sharev2_share_status_counter{status="deleting"} 0 +openstack_sharev2_share_status_counter{status="error"} 0 +openstack_sharev2_share_status_counter{status="error_deleting"} 0 +openstack_sharev2_share_status_counter{status="extending"} 0 +openstack_sharev2_share_status_counter{status="inactive"} 0 +openstack_sharev2_share_status_counter{status="managing"} 0 +openstack_sharev2_share_status_counter{status="migrating"} 0 +openstack_sharev2_share_status_counter{status="migration_error"} 0 +openstack_sharev2_share_status_counter{status="restoring"} 0 +openstack_sharev2_share_status_counter{status="reverting"} 0 +openstack_sharev2_share_status_counter{status="reverting_error"} 0 +openstack_sharev2_share_status_counter{status="reverting_to_snapshot"} 0 +openstack_sharev2_share_status_counter{status="shrinking"} 0 +openstack_sharev2_share_status_counter{status="shrinking_error"} 0 +openstack_sharev2_share_status_counter{status="soft_deleting"} 0 +openstack_sharev2_share_status_counter{status="unmanaging"} 0 +openstack_sharev2_share_status_counter{status="updating"} 0 +# HELP openstack_sharev2_shares_counter shares_counter +# TYPE openstack_sharev2_shares_counter gauge +openstack_sharev2_shares_counter 0 +# HELP openstack_sharev2_up up +# TYPE openstack_sharev2_up gauge +openstack_sharev2_up 1 +`, + }, + { + Name: "handles null values gracefully", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", "name", "project_id", "size", "share_proto", "status", "share_type_name", "availability_zone", + }).AddRow( + "share-null", sql.NullString{Valid: false}, sql.NullString{Valid: false}, sql.NullInt32{Valid: false}, sql.NullString{Valid: false}, sql.NullString{Valid: false}, "", "", + ) + + mock.ExpectQuery(regexp.QuoteMeta(maniladb.GetShareMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_sharev2_share_gb share_gb +# TYPE openstack_sharev2_share_gb gauge +openstack_sharev2_share_gb{availability_zone="",id="share-null",name="",project_id="",share_proto="",share_type="",share_type_name="",status=""} 0 +# HELP openstack_sharev2_share_status share_status +# TYPE openstack_sharev2_share_status gauge +openstack_sharev2_share_status{id="share-null",name="",project_id="",share_proto="",share_type="",share_type_name="",size="0",status=""} 0 +# HELP openstack_sharev2_share_status_counter share_status_counter +# TYPE openstack_sharev2_share_status_counter gauge +openstack_sharev2_share_status_counter{status="available"} 0 +openstack_sharev2_share_status_counter{status="creating"} 0 +openstack_sharev2_share_status_counter{status="deleting"} 0 +openstack_sharev2_share_status_counter{status="error"} 0 +openstack_sharev2_share_status_counter{status="error_deleting"} 0 +openstack_sharev2_share_status_counter{status="extending"} 0 +openstack_sharev2_share_status_counter{status="inactive"} 0 +openstack_sharev2_share_status_counter{status="managing"} 0 +openstack_sharev2_share_status_counter{status="migrating"} 0 +openstack_sharev2_share_status_counter{status="migration_error"} 0 +openstack_sharev2_share_status_counter{status="restoring"} 0 +openstack_sharev2_share_status_counter{status="reverting"} 0 +openstack_sharev2_share_status_counter{status="reverting_error"} 0 +openstack_sharev2_share_status_counter{status="reverting_to_snapshot"} 0 +openstack_sharev2_share_status_counter{status="shrinking"} 0 +openstack_sharev2_share_status_counter{status="shrinking_error"} 0 +openstack_sharev2_share_status_counter{status="soft_deleting"} 0 +openstack_sharev2_share_status_counter{status="unmanaging"} 0 +openstack_sharev2_share_status_counter{status="updating"} 0 +# HELP openstack_sharev2_shares_counter shares_counter +# TYPE openstack_sharev2_shares_counter gauge +openstack_sharev2_shares_counter 1 +# HELP openstack_sharev2_up up +# TYPE openstack_sharev2_up gauge +openstack_sharev2_up 1 +`, + }, + { + Name: "database query error", + SetupMock: func(mock sqlmock.Sqlmock) { + mock.ExpectQuery(regexp.QuoteMeta(maniladb.GetShareMetrics)).WillReturnError(sql.ErrConnDone) + }, + ExpectedMetrics: `# HELP openstack_sharev2_up up +# TYPE openstack_sharev2_up gauge +openstack_sharev2_up 0 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) *SharesCollector { + return NewSharesCollector(db, logger) + }) +} diff --git a/internal/collector/placement/resources.go b/internal/collector/placement/resources.go new file mode 100644 index 0000000..f039896 --- /dev/null +++ b/internal/collector/placement/resources.go @@ -0,0 +1,163 @@ +package placement + +import ( + "context" + "database/sql" + "log/slog" + "strconv" + + "github.com/prometheus/client_golang/prometheus" + "github.com/vexxhost/openstack_database_exporter/internal/collector" + placementdb "github.com/vexxhost/openstack_database_exporter/internal/db/placement" +) + +const Subsystem = "placement" + +var ( + placementUpDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "up"), + "up", + nil, + nil, + ) + + resourceTotalDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "resource_total"), + "resource_total", + []string{ + "hostname", + "resourcetype", + }, + nil, + ) + + resourceAllocationRatioDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "resource_allocation_ratio"), + "resource_allocation_ratio", + []string{ + "hostname", + "resourcetype", + }, + nil, + ) + + resourceReservedDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "resource_reserved"), + "resource_reserved", + []string{ + "hostname", + "resourcetype", + }, + nil, + ) + + resourceUsageDesc = prometheus.NewDesc( + prometheus.BuildFQName(collector.Namespace, Subsystem, "resource_usage"), + "resource_usage", + []string{ + "hostname", + "resourcetype", + }, + nil, + ) +) + +type ResourcesCollector struct { + db *sql.DB + queries *placementdb.Queries + logger *slog.Logger +} + +func NewResourcesCollector(db *sql.DB, logger *slog.Logger) *ResourcesCollector { + return &ResourcesCollector{ + db: db, + queries: placementdb.New(db), + logger: logger.With( + "component", "placement_resources_collector", + ), + } +} + +func (c *ResourcesCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- placementUpDesc + ch <- resourceTotalDesc + ch <- resourceAllocationRatioDesc + ch <- resourceReservedDesc + ch <- resourceUsageDesc +} + +func (c *ResourcesCollector) Collect(ch chan<- prometheus.Metric) { + ctx := context.Background() + + resources, err := c.queries.GetResourceMetrics(ctx) + if err != nil { + c.logger.Error("Failed to collect placement resources", "error", err) + ch <- prometheus.MustNewConstMetric(placementUpDesc, prometheus.GaugeValue, 0) + return + } + + ch <- prometheus.MustNewConstMetric(placementUpDesc, prometheus.GaugeValue, 1) + + for _, resource := range resources { + hostname := "" + if resource.Hostname.Valid { + hostname = resource.Hostname.String + } + + resourceType := resource.ResourceType + + // Convert allocation_ratio from string to float64 + allocationRatio, err := strconv.ParseFloat(resource.AllocationRatio, 64) + if err != nil { + c.logger.Warn("Failed to parse allocation ratio", "value", resource.AllocationRatio, "error", err) + allocationRatio = 1.0 // default value + } + + // Convert used from interface{} to int64 (mysql returns it as []uint8) + used := int64(0) + if resource.Used != nil { + switch v := resource.Used.(type) { + case int64: + used = v + case []uint8: + // MySQL returns large numbers as []uint8 + usedStr := string(v) + if parsedUsed, err := strconv.ParseInt(usedStr, 10, 64); err == nil { + used = parsedUsed + } + } + } + + ch <- prometheus.MustNewConstMetric( + resourceTotalDesc, + prometheus.GaugeValue, + float64(resource.Total), + hostname, + resourceType, + ) + + ch <- prometheus.MustNewConstMetric( + resourceAllocationRatioDesc, + prometheus.GaugeValue, + allocationRatio, + hostname, + resourceType, + ) + + ch <- prometheus.MustNewConstMetric( + resourceReservedDesc, + prometheus.GaugeValue, + float64(resource.Reserved), + hostname, + resourceType, + ) + + ch <- prometheus.MustNewConstMetric( + resourceUsageDesc, + prometheus.GaugeValue, + float64(used), + hostname, + resourceType, + ) + } +} diff --git a/internal/collector/placement/resources_test.go b/internal/collector/placement/resources_test.go new file mode 100644 index 0000000..1aaf0ac --- /dev/null +++ b/internal/collector/placement/resources_test.go @@ -0,0 +1,159 @@ +package placement + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + placementdb "github.com/vexxhost/openstack_database_exporter/internal/db/placement" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestResourcesCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection with placement resources", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "hostname", "resource_type", "total", "allocation_ratio", "reserved", "used", + }).AddRow( + "cmp-1-svr8204.localdomain", "DISK_GB", 2047, "1.2000000476837158", 0, []uint8("6969"), + ).AddRow( + "cmp-1-svr8204.localdomain", "MEMORY_MB", 772447, "1.2999999523162842", 8192, []uint8("1945"), + ).AddRow( + "cmp-1-svr8204.localdomain", "VCPU", 96, "3.0000000000000000", 0, []uint8("10"), + ).AddRow( + "cmp-5-svr8208.localdomain", "DISK_GB", 2047, "1.2000000476837158", 0, []uint8("0"), + ).AddRow( + "cmp-5-svr8208.localdomain", "MEMORY_MB", 772447, "1.0000000000000000", 8192, []uint8("0"), + ).AddRow( + "cmp-5-svr8208.localdomain", "PCPU", 96, "1.0000000000000000", 0, []uint8("0"), + ) + + mock.ExpectQuery(regexp.QuoteMeta(placementdb.GetResourceMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_placement_resource_allocation_ratio resource_allocation_ratio +# TYPE openstack_placement_resource_allocation_ratio gauge +openstack_placement_resource_allocation_ratio{hostname="cmp-1-svr8204.localdomain",resourcetype="DISK_GB"} 1.2000000476837158 +openstack_placement_resource_allocation_ratio{hostname="cmp-1-svr8204.localdomain",resourcetype="MEMORY_MB"} 1.2999999523162842 +openstack_placement_resource_allocation_ratio{hostname="cmp-1-svr8204.localdomain",resourcetype="VCPU"} 3 +openstack_placement_resource_allocation_ratio{hostname="cmp-5-svr8208.localdomain",resourcetype="DISK_GB"} 1.2000000476837158 +openstack_placement_resource_allocation_ratio{hostname="cmp-5-svr8208.localdomain",resourcetype="MEMORY_MB"} 1 +openstack_placement_resource_allocation_ratio{hostname="cmp-5-svr8208.localdomain",resourcetype="PCPU"} 1 +# HELP openstack_placement_resource_reserved resource_reserved +# TYPE openstack_placement_resource_reserved gauge +openstack_placement_resource_reserved{hostname="cmp-1-svr8204.localdomain",resourcetype="DISK_GB"} 0 +openstack_placement_resource_reserved{hostname="cmp-1-svr8204.localdomain",resourcetype="MEMORY_MB"} 8192 +openstack_placement_resource_reserved{hostname="cmp-1-svr8204.localdomain",resourcetype="VCPU"} 0 +openstack_placement_resource_reserved{hostname="cmp-5-svr8208.localdomain",resourcetype="DISK_GB"} 0 +openstack_placement_resource_reserved{hostname="cmp-5-svr8208.localdomain",resourcetype="MEMORY_MB"} 8192 +openstack_placement_resource_reserved{hostname="cmp-5-svr8208.localdomain",resourcetype="PCPU"} 0 +# HELP openstack_placement_resource_total resource_total +# TYPE openstack_placement_resource_total gauge +openstack_placement_resource_total{hostname="cmp-1-svr8204.localdomain",resourcetype="DISK_GB"} 2047 +openstack_placement_resource_total{hostname="cmp-1-svr8204.localdomain",resourcetype="MEMORY_MB"} 772447 +openstack_placement_resource_total{hostname="cmp-1-svr8204.localdomain",resourcetype="VCPU"} 96 +openstack_placement_resource_total{hostname="cmp-5-svr8208.localdomain",resourcetype="DISK_GB"} 2047 +openstack_placement_resource_total{hostname="cmp-5-svr8208.localdomain",resourcetype="MEMORY_MB"} 772447 +openstack_placement_resource_total{hostname="cmp-5-svr8208.localdomain",resourcetype="PCPU"} 96 +# HELP openstack_placement_resource_usage resource_usage +# TYPE openstack_placement_resource_usage gauge +openstack_placement_resource_usage{hostname="cmp-1-svr8204.localdomain",resourcetype="DISK_GB"} 6969 +openstack_placement_resource_usage{hostname="cmp-1-svr8204.localdomain",resourcetype="MEMORY_MB"} 1945 +openstack_placement_resource_usage{hostname="cmp-1-svr8204.localdomain",resourcetype="VCPU"} 10 +openstack_placement_resource_usage{hostname="cmp-5-svr8208.localdomain",resourcetype="DISK_GB"} 0 +openstack_placement_resource_usage{hostname="cmp-5-svr8208.localdomain",resourcetype="MEMORY_MB"} 0 +openstack_placement_resource_usage{hostname="cmp-5-svr8208.localdomain",resourcetype="PCPU"} 0 +# HELP openstack_placement_up up +# TYPE openstack_placement_up gauge +openstack_placement_up 1 +`, + }, + { + Name: "successful collection with no resources", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "hostname", "resource_type", "total", "allocation_ratio", "reserved", "used", + }) + + mock.ExpectQuery(regexp.QuoteMeta(placementdb.GetResourceMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_placement_up up +# TYPE openstack_placement_up gauge +openstack_placement_up 1 +`, + }, + { + Name: "handles null hostname gracefully", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "hostname", "resource_type", "total", "allocation_ratio", "reserved", "used", + }).AddRow( + sql.NullString{Valid: false}, "VCPU", 16, "2.0", 0, []uint8("4"), + ) + + mock.ExpectQuery(regexp.QuoteMeta(placementdb.GetResourceMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_placement_resource_allocation_ratio resource_allocation_ratio +# TYPE openstack_placement_resource_allocation_ratio gauge +openstack_placement_resource_allocation_ratio{hostname="",resourcetype="VCPU"} 2 +# HELP openstack_placement_resource_reserved resource_reserved +# TYPE openstack_placement_resource_reserved gauge +openstack_placement_resource_reserved{hostname="",resourcetype="VCPU"} 0 +# HELP openstack_placement_resource_total resource_total +# TYPE openstack_placement_resource_total gauge +openstack_placement_resource_total{hostname="",resourcetype="VCPU"} 16 +# HELP openstack_placement_resource_usage resource_usage +# TYPE openstack_placement_resource_usage gauge +openstack_placement_resource_usage{hostname="",resourcetype="VCPU"} 4 +# HELP openstack_placement_up up +# TYPE openstack_placement_up gauge +openstack_placement_up 1 +`, + }, + { + Name: "handles invalid allocation ratio gracefully", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "hostname", "resource_type", "total", "allocation_ratio", "reserved", "used", + }).AddRow( + "test-host", "MEMORY_MB", 1024, "invalid_ratio", 0, []uint8("256"), + ) + + mock.ExpectQuery(regexp.QuoteMeta(placementdb.GetResourceMetrics)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_placement_resource_allocation_ratio resource_allocation_ratio +# TYPE openstack_placement_resource_allocation_ratio gauge +openstack_placement_resource_allocation_ratio{hostname="test-host",resourcetype="MEMORY_MB"} 1 +# HELP openstack_placement_resource_reserved resource_reserved +# TYPE openstack_placement_resource_reserved gauge +openstack_placement_resource_reserved{hostname="test-host",resourcetype="MEMORY_MB"} 0 +# HELP openstack_placement_resource_total resource_total +# TYPE openstack_placement_resource_total gauge +openstack_placement_resource_total{hostname="test-host",resourcetype="MEMORY_MB"} 1024 +# HELP openstack_placement_resource_usage resource_usage +# TYPE openstack_placement_resource_usage gauge +openstack_placement_resource_usage{hostname="test-host",resourcetype="MEMORY_MB"} 256 +# HELP openstack_placement_up up +# TYPE openstack_placement_up gauge +openstack_placement_up 1 +`, + }, + { + Name: "database query error", + SetupMock: func(mock sqlmock.Sqlmock) { + mock.ExpectQuery(regexp.QuoteMeta(placementdb.GetResourceMetrics)).WillReturnError(sql.ErrConnDone) + }, + ExpectedMetrics: `# HELP openstack_placement_up up +# TYPE openstack_placement_up gauge +openstack_placement_up 0 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) *ResourcesCollector { + return NewResourcesCollector(db, logger) + }) +} diff --git a/internal/db/glance/db.go b/internal/db/glance/db.go new file mode 100644 index 0000000..e6fd5ee --- /dev/null +++ b/internal/db/glance/db.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package glance + +import ( + "context" + "database/sql" +) + +type DBTX interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + PrepareContext(context.Context, string) (*sql.Stmt, error) + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx *sql.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/db/glance/models.go b/internal/db/glance/models.go new file mode 100644 index 0000000..7998527 --- /dev/null +++ b/internal/db/glance/models.go @@ -0,0 +1,79 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package glance + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "time" +) + +type ImagesVisibility string + +const ( + ImagesVisibilityPrivate ImagesVisibility = "private" + ImagesVisibilityPublic ImagesVisibility = "public" + ImagesVisibilityShared ImagesVisibility = "shared" + ImagesVisibilityCommunity ImagesVisibility = "community" +) + +func (e *ImagesVisibility) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = ImagesVisibility(s) + case string: + *e = ImagesVisibility(s) + default: + return fmt.Errorf("unsupported scan type for ImagesVisibility: %T", src) + } + return nil +} + +type NullImagesVisibility struct { + ImagesVisibility ImagesVisibility + Valid bool // Valid is true if ImagesVisibility is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullImagesVisibility) Scan(value interface{}) error { + if value == nil { + ns.ImagesVisibility, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.ImagesVisibility.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullImagesVisibility) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.ImagesVisibility), nil +} + +type Image struct { + ID string + Name sql.NullString + Size sql.NullInt64 + Status string + CreatedAt time.Time + UpdatedAt sql.NullTime + DeletedAt sql.NullTime + Deleted bool + DiskFormat sql.NullString + ContainerFormat sql.NullString + Checksum sql.NullString + Owner sql.NullString + MinDisk int32 + MinRam int32 + Protected bool + VirtualSize sql.NullInt64 + Visibility ImagesVisibility + OsHidden bool + OsHashAlgo sql.NullString + OsHashValue sql.NullString +} diff --git a/internal/db/glance/queries.sql.go b/internal/db/glance/queries.sql.go new file mode 100644 index 0000000..6a4a05e --- /dev/null +++ b/internal/db/glance/queries.sql.go @@ -0,0 +1,117 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 +// source: queries.sql + +package glance + +import ( + "context" + "database/sql" + "time" +) + +const GetAllImages = `-- name: GetAllImages :many +SELECT + id, + name, + size, + status, + owner, + visibility, + disk_format, + container_format, + checksum, + created_at, + updated_at, + min_disk, + min_ram, + protected, + virtual_size, + os_hidden, + os_hash_algo, + os_hash_value +FROM + images +WHERE + deleted = 0 +` + +type GetAllImagesRow struct { + ID string + Name sql.NullString + Size sql.NullInt64 + Status string + Owner sql.NullString + Visibility ImagesVisibility + DiskFormat sql.NullString + ContainerFormat sql.NullString + Checksum sql.NullString + CreatedAt time.Time + UpdatedAt sql.NullTime + MinDisk int32 + MinRam int32 + Protected bool + VirtualSize sql.NullInt64 + OsHidden bool + OsHashAlgo sql.NullString + OsHashValue sql.NullString +} + +func (q *Queries) GetAllImages(ctx context.Context) ([]GetAllImagesRow, error) { + rows, err := q.db.QueryContext(ctx, GetAllImages) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetAllImagesRow + for rows.Next() { + var i GetAllImagesRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Size, + &i.Status, + &i.Owner, + &i.Visibility, + &i.DiskFormat, + &i.ContainerFormat, + &i.Checksum, + &i.CreatedAt, + &i.UpdatedAt, + &i.MinDisk, + &i.MinRam, + &i.Protected, + &i.VirtualSize, + &i.OsHidden, + &i.OsHashAlgo, + &i.OsHashValue, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetImageCount = `-- name: GetImageCount :one +SELECT + COUNT(*) as count +FROM + images +WHERE + deleted = 0 +` + +func (q *Queries) GetImageCount(ctx context.Context) (int64, error) { + row := q.db.QueryRowContext(ctx, GetImageCount) + var count int64 + err := row.Scan(&count) + return count, err +} diff --git a/internal/db/keystone/db.go b/internal/db/keystone/db.go new file mode 100644 index 0000000..1b58fe3 --- /dev/null +++ b/internal/db/keystone/db.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package keystone + +import ( + "context" + "database/sql" +) + +type DBTX interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + PrepareContext(context.Context, string) (*sql.Stmt, error) + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx *sql.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/db/keystone/models.go b/internal/db/keystone/models.go new file mode 100644 index 0000000..e6a2c55 --- /dev/null +++ b/internal/db/keystone/models.go @@ -0,0 +1,50 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package keystone + +import ( + "database/sql" +) + +type Group struct { + ID string + DomainID string + Name string + Description sql.NullString + Extra sql.NullString +} + +type Project struct { + ID string + Name string + Extra sql.NullString + Description sql.NullString + Enabled sql.NullBool + DomainID string + ParentID sql.NullString + IsDomain bool +} + +type ProjectTag struct { + ProjectID string + Name string +} + +type Region struct { + ID string + Description string + ParentRegionID sql.NullString + Extra sql.NullString +} + +type User struct { + ID string + Extra sql.NullString + Enabled sql.NullBool + DefaultProjectID sql.NullString + CreatedAt sql.NullTime + LastActiveAt sql.NullTime + DomainID string +} diff --git a/internal/db/keystone/queries.sql.go b/internal/db/keystone/queries.sql.go new file mode 100644 index 0000000..08fff3e --- /dev/null +++ b/internal/db/keystone/queries.sql.go @@ -0,0 +1,246 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 +// source: queries.sql + +package keystone + +import ( + "context" + "database/sql" +) + +const GetDomainMetrics = `-- name: GetDomainMetrics :many +SELECT + id, + name, + COALESCE(description, '') as description, + enabled +FROM project +WHERE is_domain = 1 AND id != '<>' +` + +type GetDomainMetricsRow struct { + ID string + Name string + Description string + Enabled sql.NullBool +} + +func (q *Queries) GetDomainMetrics(ctx context.Context) ([]GetDomainMetricsRow, error) { + rows, err := q.db.QueryContext(ctx, GetDomainMetrics) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetDomainMetricsRow + for rows.Next() { + var i GetDomainMetricsRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Description, + &i.Enabled, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetGroupMetrics = `-- name: GetGroupMetrics :many +SELECT + id, + domain_id, + name, + COALESCE(description, '') as description +FROM ` + "`" + `group` + "`" + ` +` + +type GetGroupMetricsRow struct { + ID string + DomainID string + Name string + Description string +} + +func (q *Queries) GetGroupMetrics(ctx context.Context) ([]GetGroupMetricsRow, error) { + rows, err := q.db.QueryContext(ctx, GetGroupMetrics) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetGroupMetricsRow + for rows.Next() { + var i GetGroupMetricsRow + if err := rows.Scan( + &i.ID, + &i.DomainID, + &i.Name, + &i.Description, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetProjectMetrics = `-- name: GetProjectMetrics :many +SELECT + p.id, + p.name, + COALESCE(p.description, '') as description, + p.enabled, + p.domain_id, + COALESCE(p.parent_id, '') as parent_id, + p.is_domain, + COALESCE(GROUP_CONCAT(pt.name SEPARATOR ','), '') as tags +FROM project p +LEFT JOIN project_tag pt ON p.id = pt.project_id +WHERE p.is_domain = 0 +GROUP BY p.id, p.name, p.description, p.enabled, p.domain_id, p.parent_id, p.is_domain +` + +type GetProjectMetricsRow struct { + ID string + Name string + Description string + Enabled sql.NullBool + DomainID string + ParentID string + IsDomain bool + Tags interface{} +} + +func (q *Queries) GetProjectMetrics(ctx context.Context) ([]GetProjectMetricsRow, error) { + rows, err := q.db.QueryContext(ctx, GetProjectMetrics) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetProjectMetricsRow + for rows.Next() { + var i GetProjectMetricsRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Description, + &i.Enabled, + &i.DomainID, + &i.ParentID, + &i.IsDomain, + &i.Tags, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetRegionMetrics = `-- name: GetRegionMetrics :many +SELECT + id, + COALESCE(description, '') as description, + COALESCE(parent_region_id, '') as parent_region_id +FROM region +` + +type GetRegionMetricsRow struct { + ID string + Description string + ParentRegionID string +} + +func (q *Queries) GetRegionMetrics(ctx context.Context) ([]GetRegionMetricsRow, error) { + rows, err := q.db.QueryContext(ctx, GetRegionMetrics) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetRegionMetricsRow + for rows.Next() { + var i GetRegionMetricsRow + if err := rows.Scan(&i.ID, &i.Description, &i.ParentRegionID); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetUserMetrics = `-- name: GetUserMetrics :many +SELECT + id, + enabled, + domain_id, + COALESCE(default_project_id, '') as default_project_id, + created_at, + last_active_at +FROM user +` + +type GetUserMetricsRow struct { + ID string + Enabled sql.NullBool + DomainID string + DefaultProjectID string + CreatedAt sql.NullTime + LastActiveAt sql.NullTime +} + +func (q *Queries) GetUserMetrics(ctx context.Context) ([]GetUserMetricsRow, error) { + rows, err := q.db.QueryContext(ctx, GetUserMetrics) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetUserMetricsRow + for rows.Next() { + var i GetUserMetricsRow + if err := rows.Scan( + &i.ID, + &i.Enabled, + &i.DomainID, + &i.DefaultProjectID, + &i.CreatedAt, + &i.LastActiveAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/internal/db/magnum/db.go b/internal/db/magnum/db.go new file mode 100644 index 0000000..393ec6e --- /dev/null +++ b/internal/db/magnum/db.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package magnum + +import ( + "context" + "database/sql" +) + +type DBTX interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + PrepareContext(context.Context, string) (*sql.Stmt, error) + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx *sql.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/db/magnum/models.go b/internal/db/magnum/models.go new file mode 100644 index 0000000..0617d82 --- /dev/null +++ b/internal/db/magnum/models.go @@ -0,0 +1,71 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package magnum + +import ( + "database/sql" +) + +type Cluster struct { + CreatedAt sql.NullTime + UpdatedAt sql.NullTime + ID int32 + Uuid sql.NullString + Name sql.NullString + ClusterTemplateID sql.NullString + ApiAddress sql.NullString + StackID sql.NullString + ProjectID sql.NullString + UserID sql.NullString + Status sql.NullString + DiscoveryUrl sql.NullString + StatusReason sql.NullString + CaCertRef sql.NullString + MagnumCertRef sql.NullString + CreateTimeout sql.NullInt32 + TrustID sql.NullString + TrusteeUsername sql.NullString + TrusteeUserID sql.NullString + TrusteePassword sql.NullString + CoeVersion sql.NullString + ContainerVersion sql.NullString + Keypair sql.NullString + DockerVolumeSize sql.NullInt32 + Labels sql.NullString + MasterFlavorID sql.NullString + FlavorID sql.NullString + HealthStatus sql.NullString + HealthStatusReason sql.NullString + FixedNetwork sql.NullString + FixedSubnet sql.NullString + FloatingIpEnabled sql.NullBool + MasterLbEnabled sql.NullBool + EtcdCaCertRef sql.NullString + FrontProxyCaCertRef sql.NullString +} + +type Nodegroup struct { + CreatedAt sql.NullTime + UpdatedAt sql.NullTime + ID int32 + Uuid string + Name string + ClusterID string + ProjectID string + DockerVolumeSize sql.NullInt32 + Labels sql.NullString + FlavorID sql.NullString + ImageID sql.NullString + NodeAddresses sql.NullString + NodeCount sql.NullInt32 + MaxNodeCount sql.NullInt32 + MinNodeCount sql.NullInt32 + Role sql.NullString + IsDefault sql.NullBool + StackID sql.NullString + Status sql.NullString + StatusReason sql.NullString + Version sql.NullString +} diff --git a/internal/db/magnum/queries.sql.go b/internal/db/magnum/queries.sql.go new file mode 100644 index 0000000..73dd490 --- /dev/null +++ b/internal/db/magnum/queries.sql.go @@ -0,0 +1,76 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 +// source: queries.sql + +package magnum + +import ( + "context" + "database/sql" +) + +const GetClusterMetrics = `-- name: GetClusterMetrics :many +SELECT + c.uuid, + c.name, + COALESCE(c.stack_id, '') as stack_id, + COALESCE(c.status, '') as status, + c.project_id, + COALESCE(master_ng.node_count, 0) as master_count, + COALESCE(worker_ng.node_count, 0) as node_count +FROM cluster c +LEFT JOIN ( + SELECT cluster_id, SUM(node_count) as node_count + FROM nodegroup + WHERE role = 'master' + GROUP BY cluster_id +) master_ng ON c.uuid = master_ng.cluster_id +LEFT JOIN ( + SELECT cluster_id, SUM(node_count) as node_count + FROM nodegroup + WHERE role = 'worker' + GROUP BY cluster_id +) worker_ng ON c.uuid = worker_ng.cluster_id +` + +type GetClusterMetricsRow struct { + Uuid sql.NullString + Name sql.NullString + StackID string + Status string + ProjectID sql.NullString + MasterCount interface{} + NodeCount interface{} +} + +func (q *Queries) GetClusterMetrics(ctx context.Context) ([]GetClusterMetricsRow, error) { + rows, err := q.db.QueryContext(ctx, GetClusterMetrics) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetClusterMetricsRow + for rows.Next() { + var i GetClusterMetricsRow + if err := rows.Scan( + &i.Uuid, + &i.Name, + &i.StackID, + &i.Status, + &i.ProjectID, + &i.MasterCount, + &i.NodeCount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/internal/db/manila/db.go b/internal/db/manila/db.go new file mode 100644 index 0000000..e15ce73 --- /dev/null +++ b/internal/db/manila/db.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package manila + +import ( + "context" + "database/sql" +) + +type DBTX interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + PrepareContext(context.Context, string) (*sql.Stmt, error) + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx *sql.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/db/manila/models.go b/internal/db/manila/models.go new file mode 100644 index 0000000..c2441e4 --- /dev/null +++ b/internal/db/manila/models.go @@ -0,0 +1,79 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package manila + +import ( + "database/sql" +) + +type AvailabilityZone struct { + CreatedAt sql.NullTime + UpdatedAt sql.NullTime + DeletedAt sql.NullTime + Deleted sql.NullString + ID string + Name sql.NullString +} + +type Share struct { + CreatedAt sql.NullTime + UpdatedAt sql.NullTime + DeletedAt sql.NullTime + Deleted sql.NullString + ID string + UserID sql.NullString + ProjectID sql.NullString + Size sql.NullInt32 + DisplayName sql.NullString + DisplayDescription sql.NullString + SnapshotID sql.NullString + ShareProto sql.NullString + IsPublic sql.NullBool + SnapshotSupport sql.NullBool + ShareGroupID sql.NullString + SourceShareGroupSnapshotMemberID sql.NullString + TaskState sql.NullString + ReplicationType sql.NullString + CreateShareFromSnapshotSupport sql.NullBool + RevertToSnapshotSupport sql.NullBool + MountSnapshotSupport sql.NullBool + IsSoftDeleted bool + ScheduledToBeDeletedAt sql.NullTime + SourceBackupID sql.NullString +} + +type ShareInstance struct { + CreatedAt sql.NullTime + UpdatedAt sql.NullTime + DeletedAt sql.NullTime + Deleted sql.NullString + ID string + ShareID sql.NullString + Host sql.NullString + Status sql.NullString + ScheduledAt sql.NullTime + LaunchedAt sql.NullTime + TerminatedAt sql.NullTime + ShareNetworkID sql.NullString + ShareServerID sql.NullString + AvailabilityZoneID sql.NullString + AccessRulesStatus sql.NullString + ReplicaState sql.NullString + ShareTypeID sql.NullString + CastRulesToReadonly bool + Progress sql.NullString + MountPointName sql.NullString +} + +type ShareType struct { + CreatedAt sql.NullTime + UpdatedAt sql.NullTime + DeletedAt sql.NullTime + Deleted sql.NullString + ID string + Name sql.NullString + IsPublic sql.NullBool + Description sql.NullString +} diff --git a/internal/db/manila/queries.sql.go b/internal/db/manila/queries.sql.go new file mode 100644 index 0000000..80e35b6 --- /dev/null +++ b/internal/db/manila/queries.sql.go @@ -0,0 +1,74 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 +// source: queries.sql + +package manila + +import ( + "context" + "database/sql" +) + +const GetShareMetrics = `-- name: GetShareMetrics :many +SELECT + s.id, + s.display_name as name, + s.project_id, + s.size, + s.share_proto, + si.status, + COALESCE(st.name, '') as share_type_name, + COALESCE(az.name, '') as availability_zone +FROM shares s +LEFT JOIN share_instances si ON s.id = si.share_id AND si.deleted = 'False' +LEFT JOIN share_types st ON si.share_type_id = st.id AND st.deleted = 'False' +LEFT JOIN availability_zones az ON si.availability_zone_id = az.id AND az.deleted = 'False' +WHERE s.deleted = 'False' +ORDER BY s.created_at +` + +type GetShareMetricsRow struct { + ID string + Name sql.NullString + ProjectID sql.NullString + Size sql.NullInt32 + ShareProto sql.NullString + Status sql.NullString + ShareTypeName string + AvailabilityZone string +} + +// Get share metrics for openstack_sharev2_share_gb and openstack_sharev2_share_status +// This joins shares with share_instances to get current status and availability zone info +func (q *Queries) GetShareMetrics(ctx context.Context) ([]GetShareMetricsRow, error) { + rows, err := q.db.QueryContext(ctx, GetShareMetrics) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetShareMetricsRow + for rows.Next() { + var i GetShareMetricsRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.ProjectID, + &i.Size, + &i.ShareProto, + &i.Status, + &i.ShareTypeName, + &i.AvailabilityZone, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/internal/db/placement/db.go b/internal/db/placement/db.go new file mode 100644 index 0000000..5747582 --- /dev/null +++ b/internal/db/placement/db.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package placement + +import ( + "context" + "database/sql" +) + +type DBTX interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + PrepareContext(context.Context, string) (*sql.Stmt, error) + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx *sql.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/db/placement/models.go b/internal/db/placement/models.go new file mode 100644 index 0000000..be209fa --- /dev/null +++ b/internal/db/placement/models.go @@ -0,0 +1,52 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 + +package placement + +import ( + "database/sql" +) + +type Allocation struct { + ID int32 + ResourceProviderID int32 + ConsumerID string + ResourceClassID int32 + Used int32 + CreatedAt sql.NullTime + UpdatedAt sql.NullTime +} + +type Inventory struct { + ID int32 + ResourceProviderID int32 + ResourceClassID int32 + Total int32 + Reserved int32 + MinUnit int32 + MaxUnit int32 + StepSize int32 + AllocationRatio string + CreatedAt sql.NullTime + UpdatedAt sql.NullTime +} + +type ResourceClass struct { + ID int32 + Name string + CreatedAt sql.NullTime + UpdatedAt sql.NullTime +} + +type ResourceProvider struct { + ID int32 + Uuid string + Name sql.NullString + Generation int32 + CanHost int32 + CreatedAt sql.NullTime + UpdatedAt sql.NullTime + RootProviderID int32 + ParentProviderID sql.NullInt32 +} diff --git a/internal/db/placement/queries.sql.go b/internal/db/placement/queries.sql.go new file mode 100644 index 0000000..ebf69db --- /dev/null +++ b/internal/db/placement/queries.sql.go @@ -0,0 +1,71 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 +// source: queries.sql + +package placement + +import ( + "context" + "database/sql" +) + +const GetResourceMetrics = `-- name: GetResourceMetrics :many +SELECT + rp.name as hostname, + rc.name as resource_type, + i.total, + i.allocation_ratio, + i.reserved, + COALESCE(SUM(a.used), 0) as used +FROM resource_providers rp +JOIN inventories i ON rp.id = i.resource_provider_id +JOIN resource_classes rc ON i.resource_class_id = rc.id +LEFT JOIN allocations a ON rp.id = a.resource_provider_id AND rc.id = a.resource_class_id +GROUP BY rp.id, rp.name, rc.id, rc.name, i.total, i.allocation_ratio, i.reserved +ORDER BY rp.name, rc.name +` + +type GetResourceMetricsRow struct { + Hostname sql.NullString + ResourceType string + Total int32 + AllocationRatio string + Reserved int32 + Used interface{} +} + +// This is the main query that provides data for all four metrics: +// - resource_total: inventory total +// - resource_allocation_ratio: inventory allocation_ratio +// - resource_reserved: inventory reserved +// - resource_usage: sum of allocations per resource provider + class +func (q *Queries) GetResourceMetrics(ctx context.Context) ([]GetResourceMetricsRow, error) { + rows, err := q.db.QueryContext(ctx, GetResourceMetrics) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetResourceMetricsRow + for rows.Next() { + var i GetResourceMetricsRow + if err := rows.Scan( + &i.Hostname, + &i.ResourceType, + &i.Total, + &i.AllocationRatio, + &i.Reserved, + &i.Used, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/sql/glance/queries.sql b/sql/glance/queries.sql new file mode 100644 index 0000000..b2d1d4b --- /dev/null +++ b/sql/glance/queries.sql @@ -0,0 +1,32 @@ +-- name: GetAllImages :many +SELECT + id, + name, + size, + status, + owner, + visibility, + disk_format, + container_format, + checksum, + created_at, + updated_at, + min_disk, + min_ram, + protected, + virtual_size, + os_hidden, + os_hash_algo, + os_hash_value +FROM + images +WHERE + deleted = 0; + +-- name: GetImageCount :one +SELECT + COUNT(*) as count +FROM + images +WHERE + deleted = 0; \ No newline at end of file diff --git a/sql/glance/schema.sql b/sql/glance/schema.sql new file mode 100644 index 0000000..4e6a021 --- /dev/null +++ b/sql/glance/schema.sql @@ -0,0 +1,34 @@ +CREATE TABLE + `images` ( + `id` varchar(36) NOT NULL, + `name` varchar(255) DEFAULT NULL, + `size` bigint DEFAULT NULL, + `status` varchar(30) NOT NULL, + `created_at` datetime NOT NULL, + `updated_at` datetime DEFAULT NULL, + `deleted_at` datetime DEFAULT NULL, + `deleted` tinyint(1) NOT NULL, + `disk_format` varchar(20) DEFAULT NULL, + `container_format` varchar(20) DEFAULT NULL, + `checksum` varchar(32) DEFAULT NULL, + `owner` varchar(255) DEFAULT NULL, + `min_disk` int NOT NULL, + `min_ram` int NOT NULL, + `protected` tinyint(1) NOT NULL DEFAULT '0', + `virtual_size` bigint DEFAULT NULL, + `visibility` enum('private','public','shared','community') NOT NULL DEFAULT 'shared', + `os_hidden` tinyint(1) NOT NULL DEFAULT '0', + `os_hash_algo` varchar(64) DEFAULT NULL, + `os_hash_value` varchar(128) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `checksum_image_idx` (`checksum`), + KEY `ix_images_deleted` (`deleted`), + KEY `owner_image_idx` (`owner`), + KEY `created_at_image_idx` (`created_at`), + KEY `updated_at_image_idx` (`updated_at`), + KEY `visibility_image_idx` (`visibility`), + KEY `os_hidden_image_idx` (`os_hidden`), + KEY `os_hash_value_image_idx` (`os_hash_value`), + CONSTRAINT `images_chk_2` CHECK ((`deleted` in (0,1))), + CONSTRAINT `images_chk_3` CHECK ((`protected` in (0,1))) + ); diff --git a/sql/keystone/queries.sql b/sql/keystone/queries.sql new file mode 100644 index 0000000..f3ffa6b --- /dev/null +++ b/sql/keystone/queries.sql @@ -0,0 +1,48 @@ +-- name: GetProjectMetrics :many +SELECT + p.id, + p.name, + COALESCE(p.description, '') as description, + p.enabled, + p.domain_id, + COALESCE(p.parent_id, '') as parent_id, + p.is_domain, + COALESCE(GROUP_CONCAT(pt.name SEPARATOR ','), '') as tags +FROM project p +LEFT JOIN project_tag pt ON p.id = pt.project_id +WHERE p.is_domain = 0 +GROUP BY p.id, p.name, p.description, p.enabled, p.domain_id, p.parent_id, p.is_domain; + +-- name: GetDomainMetrics :many +SELECT + id, + name, + COALESCE(description, '') as description, + enabled +FROM project +WHERE is_domain = 1 AND id != '<>'; + +-- name: GetUserMetrics :many +SELECT + id, + enabled, + domain_id, + COALESCE(default_project_id, '') as default_project_id, + created_at, + last_active_at +FROM user; + +-- name: GetRegionMetrics :many +SELECT + id, + COALESCE(description, '') as description, + COALESCE(parent_region_id, '') as parent_region_id +FROM region; + +-- name: GetGroupMetrics :many +SELECT + id, + domain_id, + name, + COALESCE(description, '') as description +FROM `group`; \ No newline at end of file diff --git a/sql/keystone/schema.sql b/sql/keystone/schema.sql new file mode 100644 index 0000000..dd5f83e --- /dev/null +++ b/sql/keystone/schema.sql @@ -0,0 +1,59 @@ +CREATE TABLE + `project` ( + `id` varchar(64) NOT NULL, + `name` varchar(64) NOT NULL, + `extra` text, + `description` text, + `enabled` tinyint(1) DEFAULT NULL, + `domain_id` varchar(64) NOT NULL, + `parent_id` varchar(64) DEFAULT NULL, + `is_domain` tinyint(1) NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `ixu_project_name_domain_id` (`domain_id`,`name`), + KEY `project_parent_id_fkey` (`parent_id`), + CONSTRAINT `project_domain_id_fkey` FOREIGN KEY (`domain_id`) REFERENCES `project` (`id`), + CONSTRAINT `project_parent_id_fkey` FOREIGN KEY (`parent_id`) REFERENCES `project` (`id`) + ); + +CREATE TABLE + `region` ( + `id` varchar(255) NOT NULL, + `description` varchar(255) NOT NULL, + `parent_region_id` varchar(255) DEFAULT NULL, + `extra` text, + PRIMARY KEY (`id`) + ); + +CREATE TABLE + `project_tag` ( + `project_id` varchar(64) NOT NULL, + `name` varchar(255) NOT NULL, + PRIMARY KEY (`project_id`,`name`), + CONSTRAINT `project_tag_ibfk_1` FOREIGN KEY (`project_id`) REFERENCES `project` (`id`) ON DELETE CASCADE + ); + +CREATE TABLE + `user` ( + `id` varchar(64) NOT NULL, + `extra` text, + `enabled` tinyint(1) DEFAULT NULL, + `default_project_id` varchar(64) DEFAULT NULL, + `created_at` datetime DEFAULT NULL, + `last_active_at` date DEFAULT NULL, + `domain_id` varchar(64) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `ixu_user_id_domain_id` (`id`,`domain_id`), + KEY `ix_default_project_id` (`default_project_id`), + KEY `domain_id` (`domain_id`) + ); + +CREATE TABLE + `group` ( + `id` varchar(64) NOT NULL, + `domain_id` varchar(64) NOT NULL, + `name` varchar(64) NOT NULL, + `description` text, + `extra` text, + PRIMARY KEY (`id`), + UNIQUE KEY `ixu_group_name_domain_id` (`domain_id`,`name`) + ); diff --git a/sql/magnum/queries.sql b/sql/magnum/queries.sql new file mode 100644 index 0000000..6d50a95 --- /dev/null +++ b/sql/magnum/queries.sql @@ -0,0 +1,22 @@ +-- name: GetClusterMetrics :many +SELECT + c.uuid, + c.name, + COALESCE(c.stack_id, '') as stack_id, + COALESCE(c.status, '') as status, + c.project_id, + COALESCE(master_ng.node_count, 0) as master_count, + COALESCE(worker_ng.node_count, 0) as node_count +FROM cluster c +LEFT JOIN ( + SELECT cluster_id, SUM(node_count) as node_count + FROM nodegroup + WHERE role = 'master' + GROUP BY cluster_id +) master_ng ON c.uuid = master_ng.cluster_id +LEFT JOIN ( + SELECT cluster_id, SUM(node_count) as node_count + FROM nodegroup + WHERE role = 'worker' + GROUP BY cluster_id +) worker_ng ON c.uuid = worker_ng.cluster_id; \ No newline at end of file diff --git a/sql/magnum/schema.sql b/sql/magnum/schema.sql new file mode 100644 index 0000000..3626168 --- /dev/null +++ b/sql/magnum/schema.sql @@ -0,0 +1,68 @@ +CREATE TABLE + `cluster` ( + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + `id` int NOT NULL AUTO_INCREMENT, + `uuid` varchar(36) DEFAULT NULL, + `name` varchar(255) DEFAULT NULL, + `cluster_template_id` varchar(255) DEFAULT NULL, + `api_address` varchar(255) DEFAULT NULL, + `stack_id` varchar(255) DEFAULT NULL, + `project_id` varchar(255) DEFAULT NULL, + `user_id` varchar(255) DEFAULT NULL, + `status` varchar(20) DEFAULT NULL, + `discovery_url` varchar(255) DEFAULT NULL, + `status_reason` text, + `ca_cert_ref` varchar(512) DEFAULT NULL, + `magnum_cert_ref` varchar(512) DEFAULT NULL, + `create_timeout` int DEFAULT NULL, + `trust_id` varchar(255) DEFAULT NULL, + `trustee_username` varchar(255) DEFAULT NULL, + `trustee_user_id` varchar(255) DEFAULT NULL, + `trustee_password` varchar(255) DEFAULT NULL, + `coe_version` varchar(255) DEFAULT NULL, + `container_version` varchar(255) DEFAULT NULL, + `keypair` varchar(255) DEFAULT NULL, + `docker_volume_size` int DEFAULT NULL, + `labels` text, + `master_flavor_id` varchar(255) DEFAULT NULL, + `flavor_id` varchar(255) DEFAULT NULL, + `health_status` varchar(20) DEFAULT NULL, + `health_status_reason` text, + `fixed_network` varchar(255) DEFAULT NULL, + `fixed_subnet` varchar(255) DEFAULT NULL, + `floating_ip_enabled` tinyint(1) DEFAULT NULL, + `master_lb_enabled` tinyint(1) DEFAULT NULL, + `etcd_ca_cert_ref` varchar(512) DEFAULT NULL, + `front_proxy_ca_cert_ref` varchar(512) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_bay0uuid` (`uuid`) +); + +CREATE TABLE + `nodegroup` ( + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + `id` int NOT NULL AUTO_INCREMENT, + `uuid` varchar(36) NOT NULL, + `name` varchar(255) NOT NULL, + `cluster_id` varchar(255) NOT NULL, + `project_id` varchar(255) NOT NULL, + `docker_volume_size` int DEFAULT NULL, + `labels` text, + `flavor_id` varchar(255) DEFAULT NULL, + `image_id` varchar(255) DEFAULT NULL, + `node_addresses` text, + `node_count` int DEFAULT NULL, + `max_node_count` int DEFAULT NULL, + `min_node_count` int DEFAULT NULL, + `role` varchar(255) DEFAULT NULL, + `is_default` tinyint(1) DEFAULT NULL, + `stack_id` varchar(255) DEFAULT NULL, + `status` varchar(20) DEFAULT NULL, + `status_reason` text, + `version` varchar(20) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_nodegroup0uuid` (`uuid`), + UNIQUE KEY `uniq_nodegroup0cluster_id0name` (`cluster_id`,`name`) +); diff --git a/sql/manila/queries.sql b/sql/manila/queries.sql new file mode 100644 index 0000000..2ad8c8b --- /dev/null +++ b/sql/manila/queries.sql @@ -0,0 +1,18 @@ +-- name: GetShareMetrics :many +-- Get share metrics for openstack_sharev2_share_gb and openstack_sharev2_share_status +-- This joins shares with share_instances to get current status and availability zone info +SELECT + s.id, + s.display_name as name, + s.project_id, + s.size, + s.share_proto, + si.status, + COALESCE(st.name, '') as share_type_name, + COALESCE(az.name, '') as availability_zone +FROM shares s +LEFT JOIN share_instances si ON s.id = si.share_id AND si.deleted = 'False' +LEFT JOIN share_types st ON si.share_type_id = st.id AND st.deleted = 'False' +LEFT JOIN availability_zones az ON si.availability_zone_id = az.id AND az.deleted = 'False' +WHERE s.deleted = 'False' +ORDER BY s.created_at; diff --git a/sql/manila/schema.sql b/sql/manila/schema.sql new file mode 100644 index 0000000..23f57f8 --- /dev/null +++ b/sql/manila/schema.sql @@ -0,0 +1,89 @@ +CREATE TABLE + `shares` ( + `created_at` datetime(6) DEFAULT NULL, + `updated_at` datetime(6) DEFAULT NULL, + `deleted_at` datetime(6) DEFAULT NULL, + `deleted` varchar(36) DEFAULT NULL, + `id` varchar(36) NOT NULL, + `user_id` varchar(255) DEFAULT NULL, + `project_id` varchar(255) DEFAULT NULL, + `size` int DEFAULT NULL, + `display_name` varchar(255) DEFAULT NULL, + `display_description` varchar(255) DEFAULT NULL, + `snapshot_id` varchar(36) DEFAULT NULL, + `share_proto` varchar(255) DEFAULT NULL, + `is_public` tinyint(1) DEFAULT NULL, + `snapshot_support` tinyint(1) DEFAULT NULL, + `share_group_id` varchar(36) DEFAULT NULL, + `source_share_group_snapshot_member_id` varchar(36) DEFAULT NULL, + `task_state` varchar(255) DEFAULT NULL, + `replication_type` varchar(255) DEFAULT NULL, + `create_share_from_snapshot_support` tinyint(1) DEFAULT NULL, + `revert_to_snapshot_support` tinyint(1) DEFAULT NULL, + `mount_snapshot_support` tinyint(1) DEFAULT NULL, + `is_soft_deleted` tinyint(1) NOT NULL DEFAULT '0', + `scheduled_to_be_deleted_at` datetime DEFAULT NULL, + `source_backup_id` varchar(36) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `fk_shares_share_group_id` (`share_group_id`), + CONSTRAINT `fk_shares_share_group_id` FOREIGN KEY (`share_group_id`) REFERENCES `share_groups` (`id`) + ); + +CREATE TABLE + `share_types` ( + `created_at` datetime(6) DEFAULT NULL, + `updated_at` datetime(6) DEFAULT NULL, + `deleted_at` datetime(6) DEFAULT NULL, + `deleted` varchar(36) DEFAULT NULL, + `id` varchar(36) NOT NULL, + `name` varchar(255) DEFAULT NULL, + `is_public` tinyint(1) DEFAULT NULL, + `description` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `st_name_uc` (`name`,`deleted`) + ); + +CREATE TABLE + `availability_zones` ( + `created_at` datetime(6) DEFAULT NULL, + `updated_at` datetime(6) DEFAULT NULL, + `deleted_at` datetime(6) DEFAULT NULL, + `deleted` varchar(36) DEFAULT NULL, + `id` varchar(36) NOT NULL, + `name` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `az_name_uc` (`name`,`deleted`) + ); + +CREATE TABLE + `share_instances` ( + `created_at` datetime(6) DEFAULT NULL, + `updated_at` datetime(6) DEFAULT NULL, + `deleted_at` datetime(6) DEFAULT NULL, + `deleted` varchar(36) DEFAULT NULL, + `id` varchar(36) NOT NULL, + `share_id` varchar(36) DEFAULT NULL, + `host` varchar(255) DEFAULT NULL, + `status` varchar(255) DEFAULT NULL, + `scheduled_at` datetime DEFAULT NULL, + `launched_at` datetime DEFAULT NULL, + `terminated_at` datetime DEFAULT NULL, + `share_network_id` varchar(36) DEFAULT NULL, + `share_server_id` varchar(36) DEFAULT NULL, + `availability_zone_id` varchar(36) DEFAULT NULL, + `access_rules_status` varchar(255) DEFAULT NULL, + `replica_state` varchar(255) DEFAULT NULL, + `share_type_id` varchar(36) DEFAULT NULL, + `cast_rules_to_readonly` tinyint(1) NOT NULL, + `progress` varchar(32) DEFAULT NULL, + `mount_point_name` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `si_share_network_fk` (`share_network_id`), + KEY `si_share_server_fk` (`share_server_id`), + KEY `si_az_id_fk` (`availability_zone_id`), + KEY `si_st_id_fk` (`share_type_id`), + KEY `share_instances_share_id_idx` (`share_id`), + CONSTRAINT `si_az_id_fk` FOREIGN KEY (`availability_zone_id`) REFERENCES `availability_zones` (`id`), + CONSTRAINT `si_share_fk` FOREIGN KEY (`share_id`) REFERENCES `shares` (`id`), + CONSTRAINT `si_st_id_fk` FOREIGN KEY (`share_type_id`) REFERENCES `share_types` (`id`) + ); diff --git a/sql/placement/queries.sql b/sql/placement/queries.sql new file mode 100644 index 0000000..dc40044 --- /dev/null +++ b/sql/placement/queries.sql @@ -0,0 +1,19 @@ +-- name: GetResourceMetrics :many +-- This is the main query that provides data for all four metrics: +-- - resource_total: inventory total +-- - resource_allocation_ratio: inventory allocation_ratio +-- - resource_reserved: inventory reserved +-- - resource_usage: sum of allocations per resource provider + class +SELECT + rp.name as hostname, + rc.name as resource_type, + i.total, + i.allocation_ratio, + i.reserved, + COALESCE(SUM(a.used), 0) as used +FROM resource_providers rp +JOIN inventories i ON rp.id = i.resource_provider_id +JOIN resource_classes rc ON i.resource_class_id = rc.id +LEFT JOIN allocations a ON rp.id = a.resource_provider_id AND rc.id = a.resource_class_id +GROUP BY rp.id, rp.name, rc.id, rc.name, i.total, i.allocation_ratio, i.reserved +ORDER BY rp.name, rc.name; diff --git a/sql/placement/schema.sql b/sql/placement/schema.sql new file mode 100644 index 0000000..5432410 --- /dev/null +++ b/sql/placement/schema.sql @@ -0,0 +1,64 @@ +CREATE TABLE + `resource_providers` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `uuid` varchar(36) NOT NULL, + `name` varchar(200) DEFAULT NULL, + `generation` int(11) NOT NULL, + `can_host` int(11) NOT NULL DEFAULT '0', + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + `root_provider_id` int(11) NOT NULL, + `parent_provider_id` int(11) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_resource_providers0uuid` (`uuid`), + KEY `resource_providers_name_idx` (`name`), + KEY `resource_providers_root_provider_id_idx` (`root_provider_id`), + KEY `resource_providers_parent_provider_id_idx` (`parent_provider_id`) + ); + +CREATE TABLE + `resource_classes` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(255) NOT NULL, + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_resource_classes0name` (`name`) + ); + +CREATE TABLE + `allocations` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `resource_provider_id` int(11) NOT NULL, + `consumer_id` varchar(36) NOT NULL, + `resource_class_id` int(11) NOT NULL, + `used` int(11) NOT NULL, + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `allocations_resource_provider_class_used_idx` (`resource_provider_id`,`resource_class_id`,`used`), + KEY `allocations_resource_class_id_idx` (`resource_class_id`), + KEY `allocations_consumer_id_idx` (`consumer_id`), + CONSTRAINT `allocations_ibfk_1` FOREIGN KEY (`resource_provider_id`) REFERENCES `resource_providers` (`id`), + CONSTRAINT `allocations_ibfk_2` FOREIGN KEY (`resource_class_id`) REFERENCES `resource_classes` (`id`) + ); + +CREATE TABLE + `inventories` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `resource_provider_id` int(11) NOT NULL, + `resource_class_id` int(11) NOT NULL, + `total` int(11) NOT NULL, + `reserved` int(11) NOT NULL DEFAULT '0', + `min_unit` int(11) NOT NULL DEFAULT '1', + `max_unit` int(11) NOT NULL, + `step_size` int(11) NOT NULL DEFAULT '1', + `allocation_ratio` decimal(16,4) NOT NULL DEFAULT '1.0000', + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `uniq_inventories0resource_provider_resource_class` (`resource_provider_id`,`resource_class_id`), + KEY `inventories_resource_class_id_idx` (`resource_class_id`), + CONSTRAINT `inventories_ibfk_1` FOREIGN KEY (`resource_provider_id`) REFERENCES `resource_providers` (`id`), + CONSTRAINT `inventories_ibfk_2` FOREIGN KEY (`resource_class_id`) REFERENCES `resource_classes` (`id`) + ); diff --git a/sqlc.yaml b/sqlc.yaml index 3f792e3..0119757 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -32,3 +32,43 @@ sql: package: "octavia" out: "internal/db/octavia" emit_exported_queries: true + - engine: "mysql" + schema: "sql/glance/schema.sql" + queries: "sql/glance/queries.sql" + gen: + go: + package: "glance" + out: "internal/db/glance" + emit_exported_queries: true + - engine: "mysql" + schema: "sql/placement/schema.sql" + queries: "sql/placement/queries.sql" + gen: + go: + package: "placement" + out: "internal/db/placement" + emit_exported_queries: true + - engine: "mysql" + schema: "sql/manila/schema.sql" + queries: "sql/manila/queries.sql" + gen: + go: + package: "manila" + out: "internal/db/manila" + emit_exported_queries: true + - engine: "mysql" + schema: "sql/keystone/schema.sql" + queries: "sql/keystone/queries.sql" + gen: + go: + package: "keystone" + out: "internal/db/keystone" + emit_exported_queries: true + - engine: "mysql" + schema: "sql/magnum/schema.sql" + queries: "sql/magnum/queries.sql" + gen: + go: + package: "magnum" + out: "internal/db/magnum" + emit_exported_queries: true