Console add bucket usage report (#1706)
This commit is contained in:
parent
ed8fc126aa
commit
71843632a0
@ -32,8 +32,8 @@ const (
|
||||
FieldStorage = "storage"
|
||||
// FieldEgress is a field name for egress total
|
||||
FieldEgress = "egress"
|
||||
// FieldObjectsCount is a field name for objects count
|
||||
FieldObjectsCount = "objectsCount"
|
||||
// FieldObjectCount is a field name for objects count
|
||||
FieldObjectCount = "objectCount"
|
||||
// LimitArg is argument name for limit
|
||||
LimitArg = "limit"
|
||||
// OffsetArg is argument name for offset
|
||||
@ -174,7 +174,7 @@ func graphqlProjectUsage() *graphql.Object {
|
||||
FieldEgress: &graphql.Field{
|
||||
Type: graphql.Float,
|
||||
},
|
||||
FieldObjectsCount: &graphql.Field{
|
||||
FieldObjectCount: &graphql.Field{
|
||||
Type: graphql.Float,
|
||||
},
|
||||
SinceArg: &graphql.Field{
|
||||
|
@ -6,13 +6,17 @@ package consoleweb
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"html/template"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/graphql-go/graphql"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -90,6 +94,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, mail
|
||||
if server.config.StaticDir != "" {
|
||||
mux.Handle("/activation/", http.HandlerFunc(server.accountActivationHandler))
|
||||
mux.Handle("/registrationToken/", http.HandlerFunc(server.createRegistrationTokenHandler))
|
||||
mux.Handle("/usage-report/", http.HandlerFunc(server.bucketUsageReportHandler))
|
||||
mux.Handle("/static/", http.StripPrefix("/static", fs))
|
||||
mux.Handle("/", http.HandlerFunc(server.appHandler))
|
||||
}
|
||||
@ -106,6 +111,69 @@ func (s *Server) appHandler(w http.ResponseWriter, req *http.Request) {
|
||||
http.ServeFile(w, req, filepath.Join(s.config.StaticDir, "dist", "public", "index.html"))
|
||||
}
|
||||
|
||||
// bucketUsageReportHandler generate bucket usage report page for project
|
||||
func (s *Server) bucketUsageReportHandler(w http.ResponseWriter, req *http.Request) {
|
||||
var err error
|
||||
|
||||
var projectID *uuid.UUID
|
||||
var since, before time.Time
|
||||
|
||||
tokenCookie, err := req.Cookie("tokenKey")
|
||||
if err != nil {
|
||||
s.log.Error("bucket usage report error", zap.Error(err))
|
||||
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
http.ServeFile(w, req, filepath.Join(s.config.StaticDir, "static", "errors", "404.html"))
|
||||
return
|
||||
}
|
||||
|
||||
auth, err := s.service.Authorize(auth.WithAPIKey(req.Context(), []byte(tokenCookie.Value)))
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
http.ServeFile(w, req, filepath.Join(s.config.StaticDir, "static", "errors", "404.html"))
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
http.ServeFile(w, req, filepath.Join(s.config.StaticDir, "static", "errors", "404.html"))
|
||||
}
|
||||
}()
|
||||
|
||||
// parse query params
|
||||
projectID, err = uuid.Parse(req.URL.Query().Get("projectID"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
since, err = time.Parse(time.RFC3339, req.URL.Query().Get("since"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
before, err = time.Parse(time.RFC3339, req.URL.Query().Get("before"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.log.Debug("querying bucket usage report",
|
||||
zap.String("projectID", projectID.String()),
|
||||
zap.String("since", since.String()),
|
||||
zap.String("before", before.String()))
|
||||
|
||||
ctx := console.WithAuth(context.Background(), auth)
|
||||
bucketRollups, err := s.service.GetBucketUsageRollups(ctx, *projectID, since, before)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
report, err := template.ParseFiles(path.Join(s.config.StaticDir, "static", "reports", "UsageReport.html"))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = report.Execute(w, bucketRollups)
|
||||
}
|
||||
|
||||
// accountActivationHandler is web app http handler function
|
||||
func (s *Server) createRegistrationTokenHandler(w http.ResponseWriter, req *http.Request) {
|
||||
w.Header().Set(contentType, applicationJSON)
|
||||
|
@ -653,6 +653,24 @@ func (s *Service) GetProjectUsage(ctx context.Context, projectID uuid.UUID, sinc
|
||||
return s.store.UsageRollups().GetProjectTotal(ctx, projectID, since, before)
|
||||
}
|
||||
|
||||
// GetBucketUsageRollups retrieves summed usage rollups for every bucket of particular project for a given period
|
||||
func (s *Service) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) ([]BucketUsageRollup, error) {
|
||||
var err error
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
auth, err := GetAuth(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = s.isProjectMember(ctx, auth.User.ID, projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.store.UsageRollups().GetBucketUsageRollups(ctx, projectID, since, before)
|
||||
}
|
||||
|
||||
// Authorize validates token from context and returns authorized Authorization
|
||||
func (s *Service) Authorize(ctx context.Context) (a Authorization, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
@ -13,14 +13,37 @@ import (
|
||||
// UsageRollups defines how console works with usage rollups
|
||||
type UsageRollups interface {
|
||||
GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (*ProjectUsage, error)
|
||||
GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) ([]BucketUsageRollup, error)
|
||||
}
|
||||
|
||||
// ProjectUsage consist of period total storage, egress
|
||||
// and objects count per hour for certain Project
|
||||
type ProjectUsage struct {
|
||||
Storage float64
|
||||
Egress float64
|
||||
ObjectsCount float64
|
||||
Storage float64
|
||||
Egress float64
|
||||
ObjectCount float64
|
||||
|
||||
Since time.Time
|
||||
Before time.Time
|
||||
}
|
||||
|
||||
// BucketUsageRollup is total bucket usage info
|
||||
// for certain period
|
||||
type BucketUsageRollup struct {
|
||||
ProjectID uuid.UUID
|
||||
BucketName []byte
|
||||
|
||||
RemoteStoredData float64
|
||||
InlineStoredData float64
|
||||
|
||||
RemoteSegments float64
|
||||
InlineSegments float64
|
||||
ObjectCount float64
|
||||
MetadataSize float64
|
||||
|
||||
RepairEgress float64
|
||||
GetEgress float64
|
||||
AuditEgress float64
|
||||
|
||||
Since time.Time
|
||||
Before time.Time
|
||||
|
@ -471,6 +471,12 @@ type lockedUsageRollups struct {
|
||||
db console.UsageRollups
|
||||
}
|
||||
|
||||
func (m *lockedUsageRollups) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) ([]console.BucketUsageRollup, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.GetBucketUsageRollups(ctx, projectID, since, before)
|
||||
}
|
||||
|
||||
func (m *lockedUsageRollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) (*console.ProjectUsage, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
@ -25,12 +25,12 @@ type usagerollups struct {
|
||||
func (db *usagerollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (usage *console.ProjectUsage, err error) {
|
||||
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
|
||||
|
||||
roullupsQuery := `SELECT SUM(settled), SUM(inline), action
|
||||
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
|
||||
FROM bucket_bandwidth_rollups
|
||||
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
|
||||
GROUP BY action`
|
||||
GROUP BY action`)
|
||||
|
||||
rollupsRows, err := db.db.QueryContext(ctx, db.db.Rebind(roullupsQuery), []byte(projectID.String()), since, before)
|
||||
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, []byte(projectID.String()), since, before)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -52,23 +52,10 @@ func (db *usagerollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID
|
||||
}
|
||||
}
|
||||
|
||||
bucketsQuery := "SELECT DISTINCT bucket_name FROM bucket_bandwidth_rollups where project_id = ? and interval_start >= ? and interval_start <= ?"
|
||||
bucketRows, err := db.db.QueryContext(ctx, db.db.Rebind(bucketsQuery), []byte(projectID.String()), since, before)
|
||||
buckets, err := db.getBuckets(ctx, projectID, since, before)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
|
||||
|
||||
var buckets []string
|
||||
for bucketRows.Next() {
|
||||
var bucket string
|
||||
err = bucketRows.Scan(&bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buckets = append(buckets, bucket)
|
||||
}
|
||||
|
||||
bucketsTallies := make(map[string]*[]*dbx.BucketStorageTally)
|
||||
for _, bucket := range buckets {
|
||||
@ -97,7 +84,7 @@ func (db *usagerollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID
|
||||
|
||||
usage.Storage += memory.Size(current.Inline).GB() * hours
|
||||
usage.Storage += memory.Size(current.Remote).GB() * hours
|
||||
usage.ObjectsCount += float64(current.ObjectCount) * hours
|
||||
usage.ObjectCount += float64(current.ObjectCount) * hours
|
||||
}
|
||||
}
|
||||
|
||||
@ -105,3 +92,113 @@ func (db *usagerollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID
|
||||
usage.Before = before
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// GetBucketUsageRollups retrieves summed usage rollups for every bucket of particular project for a given period
|
||||
func (db *usagerollups) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) ([]console.BucketUsageRollup, error) {
|
||||
buckets, err := db.getBuckets(ctx, projectID, since, before)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
|
||||
FROM bucket_bandwidth_rollups
|
||||
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
|
||||
GROUP BY action`)
|
||||
|
||||
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
|
||||
|
||||
var bucketUsageRollups []console.BucketUsageRollup
|
||||
for _, bucket := range buckets {
|
||||
bucketRollup := console.BucketUsageRollup{
|
||||
ProjectID: projectID,
|
||||
BucketName: []byte(bucket),
|
||||
Since: since,
|
||||
Before: before,
|
||||
}
|
||||
|
||||
// get bucket_bandwidth_rollups
|
||||
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, []byte(projectID.String()), []byte(bucket), since, before)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
|
||||
|
||||
// fill egress
|
||||
for rollupsRows.Next() {
|
||||
var action pb.PieceAction
|
||||
var settled, inline int64
|
||||
|
||||
err = rollupsRows.Scan(&settled, &inline, &action)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch action {
|
||||
case pb.PieceAction_GET:
|
||||
bucketRollup.GetEgress += memory.Size(settled + inline).GB()
|
||||
case pb.PieceAction_GET_AUDIT:
|
||||
bucketRollup.AuditEgress += memory.Size(settled + inline).GB()
|
||||
case pb.PieceAction_GET_REPAIR:
|
||||
bucketRollup.RepairEgress += memory.Size(settled + inline).GB()
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
bucketStorageTallies, err := storageQuery(ctx,
|
||||
dbx.BucketStorageTally_ProjectId([]byte(projectID.String())),
|
||||
dbx.BucketStorageTally_BucketName([]byte(bucket)),
|
||||
dbx.BucketStorageTally_IntervalStart(since),
|
||||
dbx.BucketStorageTally_IntervalStart(before))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// fill metadata, objects and stored data
|
||||
// hours calculated from previous tallies,
|
||||
// so we skip the most recent one
|
||||
for i := len(bucketStorageTallies) - 1; i > 0; i-- {
|
||||
current := bucketStorageTallies[i]
|
||||
|
||||
hours := bucketStorageTallies[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
|
||||
|
||||
bucketRollup.RemoteStoredData += memory.Size(current.Remote).GB() * hours
|
||||
bucketRollup.InlineStoredData += memory.Size(current.Inline).GB() * hours
|
||||
bucketRollup.MetadataSize += memory.Size(current.MetadataSize).GB() * hours
|
||||
bucketRollup.RemoteSegments += float64(current.RemoteSegmentsCount) * hours
|
||||
bucketRollup.InlineSegments += float64(current.InlineSegmentsCount) * hours
|
||||
bucketRollup.ObjectCount += float64(current.ObjectCount) * hours
|
||||
}
|
||||
|
||||
bucketUsageRollups = append(bucketUsageRollups, bucketRollup)
|
||||
}
|
||||
|
||||
return bucketUsageRollups, nil
|
||||
}
|
||||
|
||||
// getBuckets list all bucket of certain project for given period
|
||||
func (db *usagerollups) getBuckets(ctx context.Context, projectID uuid.UUID, since, before time.Time) ([]string, error) {
|
||||
bucketsQuery := db.db.Rebind(`SELECT DISTINCT bucket_name
|
||||
FROM bucket_bandwidth_rollups
|
||||
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?`)
|
||||
|
||||
bucketRows, err := db.db.QueryContext(ctx, bucketsQuery, []byte(projectID.String()), since, before)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
|
||||
|
||||
var buckets []string
|
||||
for bucketRows.Next() {
|
||||
var bucket string
|
||||
err = bucketRows.Scan(&bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buckets = append(buckets, bucket)
|
||||
}
|
||||
|
||||
return buckets, nil
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ export async function fetchProjectUsage(projectID: string, since: Date, before:
|
||||
usage(since: "${since.toISOString()}", before: "${before.toISOString()}") {
|
||||
storage,
|
||||
egress,
|
||||
objectsCount,
|
||||
objectCount,
|
||||
since,
|
||||
before
|
||||
}
|
||||
|
@ -1,188 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
<template>
|
||||
<div>
|
||||
<table class="blueTable">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Bucket Name</th>
|
||||
<th>Roll Up Period</th>
|
||||
<th>Network Stored Data, GBh</th>
|
||||
<th colspan="3">Egress, GB</th>
|
||||
<th colspan="2">Objects, count*hours</th>
|
||||
<th>Metadata Size, GBh</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th></th>
|
||||
<th></th>
|
||||
<th>Repair</th>
|
||||
<th>Get</th>
|
||||
<th>Audit</th>
|
||||
<th>Inline Segments</th>
|
||||
<th>Remote segments</th>
|
||||
<th></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 1</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">800</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.1</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 2</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.2</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 3</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">600</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.3</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 4</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">550</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.15</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 5</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">500</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.11</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 6</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.13</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 7</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">1200</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.13</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 8</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">1300</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.13</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 9</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">343</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.13</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tg-0lax">My Data Bucket 10</td>
|
||||
<td class="tg-0lax">01.02.2019-01.28.2019</td>
|
||||
<td class="tg-0lax">12345</td>
|
||||
<td class="tg-0lax">10</td>
|
||||
<td class="tg-0lax">400</td>
|
||||
<td class="tg-0lax">20</td>
|
||||
<td class="tg-0lax">1000</td>
|
||||
<td class="tg-0lax">700</td>
|
||||
<td class="tg-0lax">0.13</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script lang="ts">
|
||||
import { Component, Vue } from 'vue-property-decorator';
|
||||
|
||||
@Component(
|
||||
{}
|
||||
)
|
||||
|
||||
export default class ReportTable extends Vue {}
|
||||
</script>
|
||||
|
||||
<style scoped lang="scss">
|
||||
table.blueTable {
|
||||
border: 1px solid #1C6EA4;
|
||||
background-color: #EEEEEE;
|
||||
width: 100%;
|
||||
text-align: center;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
table.blueTable td, table.blueTable th {
|
||||
border: 1px solid #AAAAAA;
|
||||
padding: 3px 2px;
|
||||
}
|
||||
table.blueTable tbody td {
|
||||
font-size: 13px;
|
||||
}
|
||||
table.blueTable tbody tr:nth-child(even) {
|
||||
background: #D0E4F5;
|
||||
}
|
||||
table.blueTable thead {
|
||||
background: #1C6EA4;
|
||||
background: -moz-linear-gradient(top, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
|
||||
background: -webkit-linear-gradient(top, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
|
||||
background: linear-gradient(to bottom, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
|
||||
border-bottom: 2px solid #444444;
|
||||
}
|
||||
table.blueTable thead th {
|
||||
font-size: 15px;
|
||||
font-weight: bold;
|
||||
color: #FFFFFF;
|
||||
border-left: 2px solid #D0E4F5;
|
||||
}
|
||||
table.blueTable thead th:first-child {
|
||||
border-left: none;
|
||||
}
|
||||
</style>
|
@ -160,8 +160,15 @@ import { NOTIFICATION_ACTIONS, PROJECT_USAGE_ACTIONS } from '@/utils/constants/a
|
||||
target.classList.add('active');
|
||||
},
|
||||
onReportClick: function (): void {
|
||||
let route = this.$router.resolve(ROUTES.REPORT_TABLE);
|
||||
window.open(route.href, '_blank');
|
||||
let projectID = this.$store.getters.selectedProject.id;
|
||||
|
||||
let url = new URL(location.origin);
|
||||
url.pathname = "usage-report";
|
||||
url.searchParams.append('projectID', projectID);
|
||||
url.searchParams.append('since', this.$data.dateRange.startDate.toISOString());
|
||||
url.searchParams.append('before', this.$data.dateRange.endDate.toISOString());
|
||||
|
||||
window.open(url.href, '_blank');
|
||||
},
|
||||
},
|
||||
computed: {
|
||||
@ -172,7 +179,7 @@ import { NOTIFICATION_ACTIONS, PROJECT_USAGE_ACTIONS } from '@/utils/constants/a
|
||||
return this.$store.state.usageModule.projectUsage.egress.toPrecision(5);
|
||||
},
|
||||
objectsCount: function () {
|
||||
return this.$store.state.usageModule.projectUsage.objectsCount.toPrecision(5);
|
||||
return this.$store.state.usageModule.projectUsage.objectCount.toPrecision(5);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,6 @@ import TeamArea from '@/components/team/TeamArea.vue';
|
||||
import Page404 from '@/components/errors/Page404.vue';
|
||||
import ApiKeysArea from '@/components/apiKeys/ApiKeysArea.vue';
|
||||
import UsageReport from '@/components/project/UsageReport.vue';
|
||||
import ReportTable from '@/components/project/ReportTable.vue';
|
||||
import BucketArea from '@/components/buckets/BucketArea.vue';
|
||||
import { getToken } from '@/utils/tokenManager';
|
||||
import store from '@/store';
|
||||
@ -89,11 +88,6 @@ let router = new Router({
|
||||
// },
|
||||
]
|
||||
},
|
||||
{
|
||||
path: ROUTES.REPORT_TABLE.path,
|
||||
name: ROUTES.REPORT_TABLE.name,
|
||||
component: ReportTable,
|
||||
},
|
||||
{
|
||||
path: '*',
|
||||
name: '404',
|
||||
|
@ -7,14 +7,14 @@ import { fetchProjectUsage } from '@/api/usage';
|
||||
|
||||
export const usageModule = {
|
||||
state: {
|
||||
projectUsage: {storage: 0, egress: 0, objectsCount: 0} as ProjectUsage
|
||||
projectUsage: {storage: 0, egress: 0, objectCount: 0} as ProjectUsage
|
||||
},
|
||||
mutations: {
|
||||
[PROJECT_USAGE_MUTATIONS.FETCH](state: any, projectUsage: ProjectUsage) {
|
||||
state.projectUsage = projectUsage;
|
||||
},
|
||||
[PROJECT_USAGE_MUTATIONS.CLEAR](state:any) {
|
||||
state.projectUsage = {storage: 0, egress: 0, objectsCount: 0} as ProjectUsage;
|
||||
state.projectUsage = {storage: 0, egress: 0, objectCount: 0} as ProjectUsage;
|
||||
}
|
||||
},
|
||||
actions: {
|
||||
|
2
web/satellite/src/types/projects.d.ts
vendored
2
web/satellite/src/types/projects.d.ts
vendored
@ -33,7 +33,7 @@ declare type TeamMemberModel = {
|
||||
declare type ProjectUsage = {
|
||||
storage: number,
|
||||
egress: number,
|
||||
objectsCount: number,
|
||||
objectCount: number,
|
||||
since: Date,
|
||||
before: Date
|
||||
};
|
||||
|
@ -38,10 +38,6 @@ const ROUTES = {
|
||||
path: '/project-details/usage-report',
|
||||
name: 'UsageReport'
|
||||
},
|
||||
REPORT_TABLE: {
|
||||
path: '/project-details/usage-report/detailed-report',
|
||||
name: 'ReportTable'
|
||||
},
|
||||
BUCKETS: {
|
||||
path: '/buckets',
|
||||
name: 'Buckets'
|
||||
|
97
web/satellite/static/reports/UsageReport.html
Normal file
97
web/satellite/static/reports/UsageReport.html
Normal file
@ -0,0 +1,97 @@
|
||||
<!--Copyright (C) 2019 Storj Labs, Inc.-->
|
||||
<!--See LICENSE for copying information.-->
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>bucket usage rollups</title>
|
||||
<link href="/static/static/fonts/font_regular.ttf" rel="stylesheet">
|
||||
<link rel="stylesheet" type="text/css" href="error.css">
|
||||
<style>
|
||||
table.blueTable {
|
||||
border: 1px solid #1C6EA4;
|
||||
background-color: #EEEEEE;
|
||||
width: 100%;
|
||||
text-align: center;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
table.blueTable td, table.blueTable th {
|
||||
border: 1px solid #AAAAAA;
|
||||
padding: 3px 2px;
|
||||
}
|
||||
table.blueTable tbody td {
|
||||
font-size: 13px;
|
||||
}
|
||||
table.blueTable tbody tr:nth-child(even) {
|
||||
background: #D0E4F5;
|
||||
}
|
||||
table.blueTable thead {
|
||||
background: #1C6EA4;
|
||||
background: -moz-linear-gradient(top, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
|
||||
background: -webkit-linear-gradient(top, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
|
||||
background: linear-gradient(to bottom, #5592bb 0%, #327cad 66%, #1C6EA4 100%);
|
||||
border-bottom: 2px solid #444444;
|
||||
}
|
||||
table.blueTable thead th {
|
||||
font-size: 15px;
|
||||
font-weight: bold;
|
||||
color: #FFFFFF;
|
||||
border-left: 2px solid #D0E4F5;
|
||||
}
|
||||
table.blueTable thead th:first-child {
|
||||
border-left: none;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div>
|
||||
<table class="blueTable">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Bucket Name</th>
|
||||
<th>Roll Up Period Start</th>
|
||||
<th>Roll Up Period End</th>
|
||||
<th colspan="2">Network Stored Data, GBh</th>
|
||||
<th colspan="3">Egress, GB</th>
|
||||
<th colspan="2">Segments, count*hours</th>
|
||||
<th>Objects, count*hours</th>
|
||||
<th>Metadata Size, GBh</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th></th>
|
||||
<th></th>
|
||||
<th>Inline</th>
|
||||
<th>Remote</th>
|
||||
<th>Repair</th>
|
||||
<th>Get</th>
|
||||
<th>Audit</th>
|
||||
<th>Inline Segments</th>
|
||||
<th>Remote segments</th>
|
||||
<th></th>
|
||||
<th></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ range $i, $value := . }}
|
||||
<tr>
|
||||
<td class="tg-0lax">{{ printf "%s" $value.BucketName }}</td>
|
||||
<td class="tg-0lax">{{ printf "%s" $value.Since }}</td>
|
||||
<td class="tg-0lax">{{ printf "%s" $value.Before }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.InlineStoredData }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.RemoteStoredData }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.RepairEgress }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.GetEgress }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.AuditEgress }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.InlineSegments }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.RemoteSegments }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.ObjectCount }}</td>
|
||||
<td class="tg-0lax">{{ printf "%.6f" $value.MetadataSize }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
Loading…
Reference in New Issue
Block a user