7ffa9ef914
Create a config to specify one-time prices and corresponding coupon ids for partners. github issue: https://github.com/storj/storj-private/issues/118 Change-Id: I67b26e7208b12ba8f0e6dc1b164dd9545b09cac0
1107 lines
35 KiB
Plaintext
Executable File
1107 lines
35 KiB
Plaintext
Executable File
# admin peer http listening address
|
|
# admin.address: ""
|
|
|
|
# the oauth host allowed to bypass token authentication.
|
|
# admin.allowed-oauth-host: ""
|
|
|
|
# an alternate directory path which contains the static assets to serve. When empty, it uses the embedded assets
|
|
# admin.static-dir: ""
|
|
|
|
# enable analytics reporting
|
|
# analytics.enabled: false
|
|
|
|
# the number of events that can be in the queue before dropping
|
|
# analytics.hub-spot.channel-size: 1000
|
|
|
|
# hubspot client ID
|
|
# analytics.hub-spot.client-id: ""
|
|
|
|
# hubspot client secret
|
|
# analytics.hub-spot.client-secret: ""
|
|
|
|
# the number of concurrent api requests that can be made
|
|
# analytics.hub-spot.concurrent-sends: 4
|
|
|
|
# the default timeout for the hubspot http client
|
|
# analytics.hub-spot.default-timeout: 10s
|
|
|
|
# hubspot refresh token
|
|
# analytics.hub-spot.refresh-token: ""
|
|
|
|
# hubspot token refresh API
|
|
# analytics.hub-spot.token-api: https://api.hubapi.com/oauth/v1/token
|
|
|
|
# segment write key
|
|
# analytics.segment-write-key: ""
|
|
|
|
# how often to run the reservoir chore
|
|
# audit.chore-interval: 24h0m0s
|
|
|
|
# max number of times to attempt updating a statdb batch
|
|
# audit.max-retries-stat-db: 3
|
|
|
|
# limit above which we consider an audit is failed
|
|
# audit.max-reverify-count: 3
|
|
|
|
# the minimum acceptable bytes that storage nodes can transfer per second to the satellite
|
|
# audit.min-bytes-per-second: 128 B
|
|
|
|
# the minimum duration for downloading a share from storage nodes before timing out
|
|
# audit.min-download-timeout: 5m0s
|
|
|
|
# how often to recheck an empty audit queue
|
|
# audit.queue-interval: 1h0m0s
|
|
|
|
# how long a single reverification job can take before it may be taken over by another worker
|
|
# audit.reverification-retry-interval: 6h0m0s
|
|
|
|
# number of workers to run reverify audits on pieces
|
|
# audit.reverify-worker-concurrency: 2
|
|
|
|
# number of reservoir slots allotted for nodes, currently capped at 3
|
|
# audit.slots: 3
|
|
|
|
# whether or not to use the ranged loop observer instead of the chore.
|
|
# audit.use-ranged-loop: false
|
|
|
|
# number of audit jobs to push at once to the verification queue
|
|
# audit.verification-push-batch-size: 4096
|
|
|
|
# number of workers to run audits on segments
|
|
# audit.worker-concurrency: 2
|
|
|
|
# how frequently checker should check for bad segments
|
|
# checker.interval: 30s
|
|
|
|
# the probability of a single node going down within the next checker iteration
|
|
# checker.node-failure-rate: 5.435e-05
|
|
|
|
# how stale reliable node cache can be
|
|
# checker.reliability-cache-staleness: 5m0s
|
|
|
|
# comma-separated override values for repair threshold in the format k/o/n-override (min/optimal/total-override)
|
|
# checker.repair-overrides: 29/80/110-52,29/80/95-52,29/80/130-52
|
|
|
|
# Number of damaged segments to buffer in-memory before flushing to the repair queue
|
|
# checker.repair-queue-insert-batch-size: 100
|
|
|
|
# percent of held amount disposed to node after leaving withheld
|
|
compensation.dispose-percent: 50
|
|
|
|
# rate for data at rest per GB/hour
|
|
compensation.rates.at-rest-gb-hours: "0.00000205"
|
|
|
|
# rate for audit egress bandwidth per TB
|
|
compensation.rates.get-audit-tb: "10"
|
|
|
|
# rate for repair egress bandwidth per TB
|
|
compensation.rates.get-repair-tb: "10"
|
|
|
|
# rate for egress bandwidth per TB
|
|
compensation.rates.get-tb: "20"
|
|
|
|
# rate for repair ingress bandwidth per TB
|
|
compensation.rates.put-repair-tb: "0"
|
|
|
|
# rate for ingress bandwidth per TB
|
|
compensation.rates.put-tb: "0"
|
|
|
|
# comma separated monthly withheld percentage rates
|
|
compensation.withheld-percents: 75,75,75,50,50,50,25,25,25,0,0,0,0,0,0
|
|
|
|
# expiration time for account recovery and activation tokens
|
|
# console-auth.token-expiration-time: 30m0s
|
|
|
|
# the Flagship API key
|
|
# console.ab-testing.api-key: ""
|
|
|
|
# whether or not AB testing is enabled
|
|
# console.ab-testing.enabled: false
|
|
|
|
# the Flagship environment ID
|
|
# console.ab-testing.env-id: ""
|
|
|
|
# the Flagship API URL
|
|
# console.ab-testing.flagship-url: https://decision.flagship.io/v2
|
|
|
|
# the Flagship environment ID
|
|
# console.ab-testing.hit-tracking-url: https://ariane.abtasty.com
|
|
|
|
# url link for account activation redirect
|
|
# console.account-activation-redirect-url: ""
|
|
|
|
# server address of the graphql api gateway and frontend app
|
|
# console.address: :10100
|
|
|
|
# default duration for AS OF SYSTEM TIME
|
|
# console.as-of-system-time-duration: -5m0s
|
|
|
|
# auth token needed for access to registration token creation endpoint
|
|
# console.auth-token: ""
|
|
|
|
# secret used to sign auth tokens
|
|
# console.auth-token-secret: ""
|
|
|
|
# url link for for beta satellite feedback
|
|
# console.beta-satellite-feedback-url: ""
|
|
|
|
# url link for for beta satellite support
|
|
# console.beta-satellite-support-url: ""
|
|
|
|
# whether or not captcha is enabled
|
|
# console.captcha.login.hcaptcha.enabled: false
|
|
|
|
# captcha secret key
|
|
# console.captcha.login.hcaptcha.secret-key: ""
|
|
|
|
# captcha site key
|
|
# console.captcha.login.hcaptcha.site-key: ""
|
|
|
|
# whether or not captcha is enabled
|
|
# console.captcha.login.recaptcha.enabled: false
|
|
|
|
# captcha secret key
|
|
# console.captcha.login.recaptcha.secret-key: ""
|
|
|
|
# captcha site key
|
|
# console.captcha.login.recaptcha.site-key: ""
|
|
|
|
# whether or not captcha is enabled
|
|
# console.captcha.registration.hcaptcha.enabled: false
|
|
|
|
# captcha secret key
|
|
# console.captcha.registration.hcaptcha.secret-key: ""
|
|
|
|
# captcha site key
|
|
# console.captcha.registration.hcaptcha.site-key: ""
|
|
|
|
# whether or not captcha is enabled
|
|
# console.captcha.registration.recaptcha.enabled: false
|
|
|
|
# captcha secret key
|
|
# console.captcha.registration.recaptcha.secret-key: ""
|
|
|
|
# captcha site key
|
|
# console.captcha.registration.recaptcha.site-key: ""
|
|
|
|
# url link to contacts page
|
|
# console.contact-info-url: https://forum.storj.io
|
|
|
|
# indicates if user is allowed to add coupon codes to account from billing
|
|
# console.coupon-code-billing-ui-enabled: false
|
|
|
|
# indicates if user is allowed to add coupon codes to account from signup
|
|
# console.coupon-code-signup-ui-enabled: false
|
|
|
|
# indicates if Content Security Policy is enabled
|
|
# console.csp-enabled: true
|
|
|
|
# default project limits for users
|
|
# console.default-project-limit: 1
|
|
|
|
# url link to documentation
|
|
# console.documentation-url: https://docs.storj.io/
|
|
|
|
# external endpoint of the satellite if hosted
|
|
# console.external-address: ""
|
|
|
|
# incremental duration of penalty for failed login attempts in minutes
|
|
# console.failed-login-penalty: 2
|
|
|
|
# indicates if file browser flow is disabled
|
|
# console.file-browser-flow-disabled: false
|
|
|
|
# allow domains to embed the satellite in a frame, space separated
|
|
# console.frame-ancestors: tardigrade.io storj.io
|
|
|
|
# url link for gateway credentials requests
|
|
# console.gateway-credentials-request-url: https://auth.storjshare.io
|
|
|
|
# url link to general request page
|
|
# console.general-request-url: https://supportdcs.storj.io/hc/en-us/requests/new?ticket_form_id=360000379291
|
|
|
|
# indicates if generated console api should be used
|
|
# console.generated-api-enabled: false
|
|
|
|
# url link to storj.io homepage
|
|
# console.homepage-url: https://www.storj.io
|
|
|
|
# indicates if satellite is in beta
|
|
# console.is-beta-satellite: false
|
|
|
|
# url link to let us know page
|
|
# console.let-us-know-url: https://storjlabs.atlassian.net/servicedesk/customer/portals
|
|
|
|
# url link for linksharing requests
|
|
# console.linksharing-url: https://link.storjshare.io
|
|
|
|
# number of times user can try to login without penalty
|
|
# console.login-attempts-without-penalty: 3
|
|
|
|
# indicates if storj native token payments system is enabled
|
|
# console.native-token-payments-enabled: false
|
|
|
|
# indicates if new billing screens should be used
|
|
# console.new-billing-screen: true
|
|
|
|
# indicates if new encryption passphrase flow is enabled
|
|
# console.new-encryption-passphrase-flow-enabled: true
|
|
|
|
# indicates if new project dashboard should be used
|
|
# console.new-project-dashboard: true
|
|
|
|
# how long oauth access tokens are issued for
|
|
# console.oauth-access-token-expiry: 24h0m0s
|
|
|
|
# how long oauth authorization codes are issued for
|
|
# console.oauth-code-expiry: 10m0s
|
|
|
|
# how long oauth refresh tokens are issued for
|
|
# console.oauth-refresh-token-expiry: 720h0m0s
|
|
|
|
# enable open registration
|
|
# console.open-registration-enabled: false
|
|
|
|
# optional url to external registration success page
|
|
# console.optional-signup-success-url: ""
|
|
|
|
# names and addresses of partnered satellites in JSON list format
|
|
# console.partnered-satellites: '[{"name":"US1","address":"https://us1.storj.io"},{"name":"EU1","address":"https://eu1.storj.io"},{"name":"AP1","address":"https://ap1.storj.io"}]'
|
|
|
|
# password hashing cost (0=automatic)
|
|
# console.password-cost: 0
|
|
|
|
# indicates if the overview onboarding step should render with pathways
|
|
# console.pathway-overview-enabled: true
|
|
|
|
# url link to project limit increase request page
|
|
# console.project-limits-increase-request-url: https://supportdcs.storj.io/hc/en-us/requests/new?ticket_form_id=360000683212
|
|
|
|
# number of events before the limit kicks in
|
|
# console.rate-limit.burst: 5
|
|
|
|
# the rate at which request are allowed
|
|
# console.rate-limit.duration: 5m0s
|
|
|
|
# number of clients whose rate limits we store
|
|
# console.rate-limit.num-limits: 1000
|
|
|
|
# used to display at web satellite console
|
|
# console.satellite-name: Storj
|
|
|
|
# name of organization which set up satellite
|
|
# console.satellite-operator: Storj Labs
|
|
|
|
# used to communicate with web crawlers and other web robots
|
|
# console.seo: "User-agent: *\nDisallow: \nDisallow: /cgi-bin/"
|
|
|
|
# duration a session is valid for (superseded by inactivity timer delay if inactivity timer is enabled)
|
|
# console.session.duration: 168h0m0s
|
|
|
|
# inactivity timer delay in seconds
|
|
# console.session.inactivity-timer-duration: 600
|
|
|
|
# indicates if session can be timed out due inactivity
|
|
# console.session.inactivity-timer-enabled: true
|
|
|
|
# indicates whether remaining session time is shown for debugging
|
|
# console.session.inactivity-timer-viewer-enabled: false
|
|
|
|
# path to static resources
|
|
# console.static-dir: ""
|
|
|
|
# url link to terms and conditions page
|
|
# console.terms-and-conditions-url: https://www.storj.io/terms-of-service/
|
|
|
|
# the default free-tier bandwidth usage limit
|
|
# console.usage-limits.bandwidth.free: 150.00 GB
|
|
|
|
# the default paid-tier bandwidth usage limit
|
|
# console.usage-limits.bandwidth.paid: 100.00 TB
|
|
|
|
# the default free-tier project limit
|
|
# console.usage-limits.project.free: 1
|
|
|
|
# the default paid-tier project limit
|
|
# console.usage-limits.project.paid: 3
|
|
|
|
# the default free-tier segment usage limit
|
|
# console.usage-limits.segment.free: 150000
|
|
|
|
# the default paid-tier segment usage limit
|
|
# console.usage-limits.segment.paid: 100000000
|
|
|
|
# the default free-tier storage usage limit
|
|
# console.usage-limits.storage.free: 150.00 GB
|
|
|
|
# the default paid-tier storage usage limit
|
|
# console.usage-limits.storage.paid: 25.00 TB
|
|
|
|
# whether to load templates on each request
|
|
# console.watch: false
|
|
|
|
# allow private IPs in CheckIn and PingMe
|
|
# contact.allow-private-ip: false
|
|
|
|
# the public address of the node, useful for nodes behind NAT
|
|
contact.external-address: ""
|
|
|
|
# the maximum burst size for the contact rate limit token bucket
|
|
# contact.rate-limit-burst: 2
|
|
|
|
# the number of nodes or addresses to keep token buckets for
|
|
# contact.rate-limit-cache-size: 1000
|
|
|
|
# the amount of time that should happen between contact attempts usually
|
|
# contact.rate-limit-interval: 10m0s
|
|
|
|
# timeout for pinging storage nodes
|
|
# contact.timeout: 10m0s
|
|
|
|
# satellite database connection string
|
|
# database: postgres://
|
|
|
|
# satellite database api key lru capacity
|
|
# database-options.api-keys-cache.capacity: 1000
|
|
|
|
# satellite database api key expiration
|
|
# database-options.api-keys-cache.expiration: 1m0s
|
|
|
|
# macaroon revocation cache capacity
|
|
# database-options.revocations-cache.capacity: 10000
|
|
|
|
# macaroon revocation cache expiration
|
|
# database-options.revocations-cache.expiration: 5m0s
|
|
|
|
# Maximum Database Connection Lifetime, -1ns means the stdlib default
|
|
# db.conn_max_lifetime: 30m0s
|
|
|
|
# Maximum Amount of Idle Database connections, -1 means the stdlib default
|
|
# db.max_idle_conns: 1
|
|
|
|
# Maximum Amount of Open Database connections, -1 means the stdlib default
|
|
# db.max_open_conns: 5
|
|
|
|
# address to listen on for debug endpoints
|
|
# debug.addr: 127.0.0.1:0
|
|
|
|
# expose control panel
|
|
# debug.control: false
|
|
|
|
# provide the name of the peer to enable continuous cpu/mem profiling for
|
|
# debug.profilername: ""
|
|
|
|
# If set, a path to write a process trace SVG to
|
|
# debug.trace-out: ""
|
|
|
|
# how often to send reminders to users who need to verify their email
|
|
# email-reminders.chore-interval: 24h0m0s
|
|
|
|
# enable sending emails reminding users to verify their email
|
|
# email-reminders.enable: true
|
|
|
|
# amount of time before sending first reminder to users who need to verify their email
|
|
# email-reminders.first-verification-reminder: 24h0m0s
|
|
|
|
# amount of time before sending second reminder to users who need to verify their email
|
|
# email-reminders.second-verification-reminder: 120h0m0s
|
|
|
|
# set if expired segment cleanup is enabled or not
|
|
# expired-deletion.enabled: true
|
|
|
|
# the time between each attempt to go through the db and clean up expired segments
|
|
# expired-deletion.interval: 24h0m0s
|
|
|
|
# how many expired objects to query in a batch
|
|
# expired-deletion.list-limit: 100
|
|
|
|
# Access Grant which will be used to upload bloom filters to the bucket
|
|
# garbage-collection-bf.access-grant: ""
|
|
|
|
# Bucket which will be used to upload bloom filters
|
|
# garbage-collection-bf.bucket: ""
|
|
|
|
# set if garbage collection bloom filters is enabled or not
|
|
# garbage-collection-bf.enabled: true
|
|
|
|
# how quickly uploaded bloom filters will be automatically deleted
|
|
# garbage-collection-bf.expire-in: 336h0m0s
|
|
|
|
# the false positive rate used for creating a garbage collection bloom filter
|
|
# garbage-collection-bf.false-positive-rate: 0.1
|
|
|
|
# the initial number of pieces expected for a storage node to have, used for creating a filter
|
|
# garbage-collection-bf.initial-pieces: 400000
|
|
|
|
# the time between each garbage collection executions
|
|
# garbage-collection-bf.interval: 120h0m0s
|
|
|
|
# set if garbage collection bloom filter process should only run once then exit
|
|
# garbage-collection-bf.run-once: false
|
|
|
|
# whether to use ranged loop instead of segment loop
|
|
# garbage-collection-bf.use-ranged-loop: false
|
|
|
|
# how many bloom filters will be packed in a single zip
|
|
# garbage-collection-bf.zip-batch-size: 500
|
|
|
|
# Access to download the bloom filters. Needs read and write permission.
|
|
# garbage-collection.access-grant: ""
|
|
|
|
# bucket where retain info is stored
|
|
# garbage-collection.bucket: ""
|
|
|
|
# the number of nodes to concurrently send garbage collection retain filters to
|
|
# garbage-collection.concurrent-sends: 1
|
|
|
|
# set if loop to send garbage collection retain filters is enabled
|
|
# garbage-collection.enabled: false
|
|
|
|
# Expiration of newly created objects. These objects store error messages.
|
|
# garbage-collection.expire-in: 336h0m0s
|
|
|
|
# the time between each attempt to download and send garbage collection retain filters to storage nodes
|
|
# garbage-collection.interval: 48h0m0s
|
|
|
|
# the amount of time to allow a node to handle a retain request
|
|
# garbage-collection.retain-send-timeout: 1m0s
|
|
|
|
# interval for AS OF SYSTEM TIME clause (crdb specific) to read from db at a specific time in the past
|
|
# graceful-exit.as-of-system-time-interval: -10s
|
|
|
|
# size of the buffer used to batch inserts into the transfer queue.
|
|
# graceful-exit.chore-batch-size: 500
|
|
|
|
# how often to run the transfer queue chore.
|
|
# graceful-exit.chore-interval: 30s
|
|
|
|
# whether or not graceful exit is enabled on the satellite side.
|
|
# graceful-exit.enabled: true
|
|
|
|
# size of the buffer used to batch transfer queue reads and sends to the storage node.
|
|
# graceful-exit.endpoint-batch-size: 300
|
|
|
|
# maximum number of transfer failures per piece.
|
|
# graceful-exit.max-failures-per-piece: 5
|
|
|
|
# maximum inactive time frame of transfer activities per node.
|
|
# graceful-exit.max-inactive-time-frame: 168h0m0s
|
|
|
|
# maximum number of order limits a satellite sends to a node before marking piece transfer failed
|
|
# graceful-exit.max-order-limit-send-count: 10
|
|
|
|
# minimum age for a node on the network in order to initiate graceful exit
|
|
# graceful-exit.node-min-age-in-months: 6
|
|
|
|
# maximum percentage of transfer failures per node.
|
|
# graceful-exit.overall-max-failures-percentage: 10
|
|
|
|
# the minimum duration for receiving a stream from a storage node before timing out
|
|
# graceful-exit.recv-timeout: 2h0m0s
|
|
|
|
# batch size (crdb specific) for deleting and adding items to the transfer queue
|
|
# graceful-exit.transfer-queue-batch-size: 1000
|
|
|
|
# whether or not to use the ranged loop observer instead of the chore.
|
|
# graceful-exit.use-ranged-loop: false
|
|
|
|
# path to the certificate chain for this identity
|
|
identity.cert-path: /root/.local/share/storj/identity/satellite/identity.cert
|
|
|
|
# path to the private key for this identity
|
|
identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
|
|
|
# as of system interval
|
|
# live-accounting.as-of-system-interval: -10s
|
|
|
|
# bandwidth cache key time to live
|
|
# live-accounting.bandwidth-cache-ttl: 5m0s
|
|
|
|
# what to use for storing real-time accounting data
|
|
# live-accounting.storage-backend: ""
|
|
|
|
# if true, log function filename and line number
|
|
# log.caller: false
|
|
|
|
# if true, set logging to development mode
|
|
# log.development: false
|
|
|
|
# configures log encoding. can either be 'console', 'json', 'pretty', or 'gcloudlogging'.
|
|
# log.encoding: ""
|
|
|
|
# the minimum log level to log
|
|
# log.level: info
|
|
|
|
# can be stdout, stderr, or a filename
|
|
# log.output: stderr
|
|
|
|
# if true, log stack traces
|
|
# log.stack: false
|
|
|
|
# smtp authentication type
|
|
# mail.auth-type: login
|
|
|
|
# oauth2 app's client id
|
|
# mail.client-id: ""
|
|
|
|
# oauth2 app's client secret
|
|
# mail.client-secret: ""
|
|
|
|
# sender email address
|
|
# mail.from: ""
|
|
|
|
# plain/login auth user login
|
|
# mail.login: ""
|
|
|
|
# plain/login auth user password
|
|
# mail.password: ""
|
|
|
|
# refresh token used to retrieve new access token
|
|
# mail.refresh-token: ""
|
|
|
|
# smtp server address
|
|
# mail.smtp-server-address: ""
|
|
|
|
# path to email templates source
|
|
# mail.template-path: ""
|
|
|
|
# uri which is used when retrieving new access token
|
|
# mail.token-uri: ""
|
|
|
|
# the database connection string to use
|
|
# metainfo.database-url: postgres://
|
|
|
|
# maximum time allowed to pass between creating and committing a segment
|
|
# metainfo.max-commit-interval: 48h0m0s
|
|
|
|
# maximum encrypted object key length
|
|
# metainfo.max-encrypted-object-key-length: 1280
|
|
|
|
# maximum inline segment size
|
|
# metainfo.max-inline-segment-size: 4.0 KiB
|
|
|
|
# maximum segment metadata size
|
|
# metainfo.max-metadata-size: 2.0 KiB
|
|
|
|
# maximum number of parts object can contain
|
|
# metainfo.max-number-of-parts: 10000
|
|
|
|
# maximum segment size
|
|
# metainfo.max-segment-size: 64.0 MiB
|
|
|
|
# minimum allowed part size (last part has no minimum size limit)
|
|
# metainfo.min-part-size: 5.0 MiB
|
|
|
|
# minimum remote segment size
|
|
# metainfo.min-remote-segment-size: 1.2 KiB
|
|
|
|
# feature flag to enable using multple objects versions in the system internally
|
|
# metainfo.multiple-versions: false
|
|
|
|
# toggle flag if overlay is enabled
|
|
# metainfo.overlay: true
|
|
|
|
# Which fraction of nodes should be contacted successfully until the delete of a batch of pieces is considered completed
|
|
# metainfo.piece-deletion.delete-success-threshold: 0.75
|
|
|
|
# timeout for dialing nodes (0 means satellite default)
|
|
# metainfo.piece-deletion.dial-timeout: 3s
|
|
|
|
# threshold for retrying a failed node
|
|
# metainfo.piece-deletion.fail-threshold: 10m0s
|
|
|
|
# maximum number of concurrent requests to storage nodes
|
|
# metainfo.piece-deletion.max-concurrency: 100
|
|
|
|
# maximum number of concurrent pieces can be processed
|
|
# metainfo.piece-deletion.max-concurrent-pieces: 1000000
|
|
|
|
# maximum number of pieces per batch
|
|
# metainfo.piece-deletion.max-pieces-per-batch: 5000
|
|
|
|
# maximum number pieces per single request
|
|
# metainfo.piece-deletion.max-pieces-per-request: 1000
|
|
|
|
# timeout for a single delete request
|
|
# metainfo.piece-deletion.request-timeout: 15s
|
|
|
|
# max bucket count for a project.
|
|
# metainfo.project-limits.max-buckets: 100
|
|
|
|
# number of projects to cache.
|
|
# metainfo.rate-limiter.cache-capacity: 10000
|
|
|
|
# how long to cache the projects limiter.
|
|
# metainfo.rate-limiter.cache-expiration: 10m0s
|
|
|
|
# whether rate limiting is enabled.
|
|
# metainfo.rate-limiter.enabled: true
|
|
|
|
# request rate per project per second.
|
|
# metainfo.rate-limiter.rate: 100
|
|
|
|
# redundancy scheme configuration in the format k/m/o/n-sharesize
|
|
# metainfo.rs: 29/35/80/110-256 B
|
|
|
|
# as of system interval
|
|
# metainfo.segment-loop.as-of-system-interval: -5m0s
|
|
|
|
# how long to wait for new observers before starting iteration
|
|
# metainfo.segment-loop.coalesce-duration: 5s
|
|
|
|
# how many items to query in a batch
|
|
# metainfo.segment-loop.list-limit: 2500
|
|
|
|
# rate limit (default is 0 which is unlimited segments per second)
|
|
# metainfo.segment-loop.rate-limit: 0
|
|
|
|
# ratio where to consider processed count as supicious
|
|
# metainfo.segment-loop.suspicious-processed-ratio: 0.03
|
|
|
|
# enable code for server-side copy, deprecated. please leave this to true.
|
|
# metainfo.server-side-copy: true
|
|
|
|
# disable already enabled server-side copy. this is because once server side copy is enabled, delete code should stay changed, even if you want to disable server side copy
|
|
# metainfo.server-side-copy-disabled: false
|
|
|
|
# test the new query for non-recursive listing
|
|
# metainfo.test-listing-query: false
|
|
|
|
# address(es) to send telemetry to (comma-separated)
|
|
# metrics.addr: collectora.storj.io:9000
|
|
|
|
# application name for telemetry identification. Ignored for certain applications.
|
|
# metrics.app: satellite
|
|
|
|
# application suffix. Ignored for certain applications.
|
|
# metrics.app-suffix: -release
|
|
|
|
# address(es) to send telemetry to (comma-separated)
|
|
# metrics.event-addr: eventkitd.datasci.storj.io:9002
|
|
|
|
# instance id prefix
|
|
# metrics.instance-prefix: ""
|
|
|
|
# how frequently to send up telemetry. Ignored for certain applications.
|
|
# metrics.interval: 1m0s
|
|
|
|
# whether to use ranged loop instead of segment loop
|
|
# metrics.use-ranged-loop: false
|
|
|
|
# path to log for oom notices
|
|
# monkit.hw.oomlog: /var/log/kern.log
|
|
|
|
# api key for the customer.io api
|
|
# node-events.customerio.api-key: ""
|
|
|
|
# timeout for the http request to customer.io endpoint
|
|
# node-events.customerio.request-timeout: 30s
|
|
|
|
# the account id for the customer.io api
|
|
# node-events.customerio.site-id: ""
|
|
|
|
# the url for the customer.io endpoint to send node event data to
|
|
# node-events.customerio.url: https://track.customer.io/api/v1
|
|
|
|
# how long to wait before checking the node events DB again if there is nothing to work on
|
|
# node-events.interval: 5m0s
|
|
|
|
# which notification provider to use
|
|
# node-events.notifier: ""
|
|
|
|
# how long the earliest instance of an event for a particular email should exist in the DB before it is selected
|
|
# node-events.selection-wait-period: 5m0s
|
|
|
|
# how long to wait between sending Node Offline emails
|
|
# offline-nodes.cooldown: 24h0m0s
|
|
|
|
# how often to check for offline nodes and send them emails
|
|
# offline-nodes.interval: 1h0m0s
|
|
|
|
# Max number of nodes to return in a single query. Chore will iterate until rows returned is less than limit
|
|
# offline-nodes.limit: 1000
|
|
|
|
# max number of offline emails to send a node operator until the node comes back online
|
|
# offline-nodes.max-emails: 3
|
|
|
|
# encryption keys to encrypt info in orders
|
|
# orders.encryption-keys: ""
|
|
|
|
# how long until an order expires
|
|
# orders.expiration: 24h0m0s
|
|
|
|
# how many items in the rollups write cache before they are flushed to the database
|
|
# orders.flush-batch-size: 1000
|
|
|
|
# how often to flush the rollups write cache to the database
|
|
# orders.flush-interval: 1m0s
|
|
|
|
# how many concurrent orders to process at once. zero is unlimited
|
|
# orders.orders-semaphore-size: 2
|
|
|
|
# the location of the maxmind database containing geoip country information
|
|
# overlay.geo-ip.db: ""
|
|
|
|
# a mock list of countries the satellite will attribute to nodes (useful for testing)
|
|
# overlay.geo-ip.mock-countries: []
|
|
|
|
# the amount of time to wait before accepting a redundant check-in from a node (unmodified info since last check-in)
|
|
# overlay.node-check-in-wait-period: 2h0m0s
|
|
|
|
# disable node cache
|
|
# overlay.node-selection-cache.disabled: false
|
|
|
|
# how stale the node selection cache can be
|
|
# overlay.node-selection-cache.staleness: 3m0s
|
|
|
|
# the amount of time to wait between sending Node Software Update emails
|
|
# overlay.node-software-update-email-cooldown: 168h0m0s
|
|
|
|
# default duration for AS OF SYSTEM TIME
|
|
# overlay.node.as-of-system-time.default-interval: -10s
|
|
|
|
# enables the use of the AS OF SYSTEM TIME feature in CRDB
|
|
# overlay.node.as-of-system-time.enabled: true
|
|
|
|
# require distinct IPs when choosing nodes for upload
|
|
# overlay.node.distinct-ip: true
|
|
|
|
# how much disk space a node at minimum must have to be selected for upload
|
|
# overlay.node.minimum-disk-space: 500.00 MB
|
|
|
|
# the minimum node software version for node selection queries
|
|
# overlay.node.minimum-version: ""
|
|
|
|
# the fraction of new nodes allowed per request
|
|
# overlay.node.new-node-fraction: 0.05
|
|
|
|
# the amount of time without seeing a node before its considered offline
|
|
# overlay.node.online-window: 4h0m0s
|
|
|
|
# list of country codes to exclude from node selection for uploads
|
|
# overlay.node.upload-excluded-country-codes: []
|
|
|
|
# list of country codes to exclude nodes from target repair selection
|
|
# overlay.repair-excluded-country-codes: []
|
|
|
|
# whether to send emails to nodes
|
|
# overlay.send-node-emails: false
|
|
|
|
# number of update requests to process per transaction
|
|
# overlay.update-stats-batch-size: 100
|
|
|
|
# flag to disable querying for new billing transactions by billing chore
|
|
# payments.billing-config.disable-loop: true
|
|
|
|
# billing chore interval to query for new transactions from all payment types
|
|
# payments.billing-config.interval: 15s
|
|
|
|
# amount of percents that user will earn as bonus credits by depositing in STORJ tokens
|
|
# payments.bonus-rate: 10
|
|
|
|
# price node receive for storing TB of audit in cents
|
|
# payments.node-audit-bandwidth-price: 1000
|
|
|
|
# price node receive for storing disk space in cents/TB
|
|
# payments.node-disk-space-price: 150
|
|
|
|
# price node receive for storing TB of egress in cents
|
|
# payments.node-egress-bandwidth-price: 2000
|
|
|
|
# price node receive for storing TB of repair in cents
|
|
# payments.node-repair-bandwidth-price: 1000
|
|
|
|
# semicolon-separated partner package plans in the format partner:couponID,price. Price is in cents USD.
|
|
# payments.package-plans: ""
|
|
|
|
# payments provider to use
|
|
# payments.provider: ""
|
|
|
|
# basic auth identifier
|
|
# payments.storjscan.auth.identifier: ""
|
|
|
|
# basic auth secret
|
|
# payments.storjscan.auth.secret: ""
|
|
|
|
# required number of following blocks in the chain to accept payment as confirmed
|
|
# payments.storjscan.confirmations: 15
|
|
|
|
# flag to disable querying new storjscan payments by storjscan chore
|
|
# payments.storjscan.disable-loop: true
|
|
|
|
# storjscan API endpoint
|
|
# payments.storjscan.endpoint: ""
|
|
|
|
# storjscan chore interval to query new payments for all satellite deposit wallets
|
|
# payments.storjscan.interval: 1m0s
|
|
|
|
# toggle autoadvance feature for invoice creation
|
|
# payments.stripe-coin-payments.auto-advance: false
|
|
|
|
# if set, skips the creation of empty invoices for customers with zero usage for the billing period
|
|
# payments.stripe-coin-payments.skip-empty-invoices: true
|
|
|
|
# stripe free tier coupon ID
|
|
# payments.stripe-coin-payments.stripe-free-tier-coupon-id: ""
|
|
|
|
# stripe API public key
|
|
# payments.stripe-coin-payments.stripe-public-key: ""
|
|
|
|
# stripe API secret key
|
|
# payments.stripe-coin-payments.stripe-secret-key: ""
|
|
|
|
# semicolon-separated usage price overrides in the format partner:storage,egress,segment
|
|
# payments.usage-price-overrides: ""
|
|
|
|
# price user should pay for egress in dollars/TB
|
|
# payments.usage-price.egress-tb: "7"
|
|
|
|
# price user should pay for segments stored on network per month in dollars/segment
|
|
# payments.usage-price.segment: "0.0000088"
|
|
|
|
# price user should pay for storage per month in dollars/TB
|
|
# payments.usage-price.storage-tb: "4"
|
|
|
|
# how often to remove unused project bandwidth rollups
|
|
# project-bw-cleanup.interval: 168h0m0s
|
|
|
|
# number of months of project bandwidth rollups to retain, not including the current month
|
|
# project-bw-cleanup.retain-months: 2
|
|
|
|
# number of projects to cache.
|
|
# project-limit.cache-capacity: 10000
|
|
|
|
# how long to cache the project limits.
|
|
# project-limit.cache-expiration: 10m0s
|
|
|
|
# as of system interval
|
|
# ranged-loop.as-of-system-interval: -5m0s
|
|
|
|
# how many items to query in a batch
|
|
# ranged-loop.batch-size: 2500
|
|
|
|
# how often to run the loop
|
|
# ranged-loop.interval: 2h0m0s
|
|
|
|
# how many chunks of segments to process in parallel
|
|
# ranged-loop.parallelism: 2
|
|
|
|
# time limit for downloading pieces from a node for repair
|
|
# repairer.download-timeout: 5m0s
|
|
|
|
# whether to download pieces for repair in memory (true) or download to disk (false)
|
|
# repairer.in-memory-repair: false
|
|
|
|
# how frequently repairer should try and repair more data
|
|
# repairer.interval: 5m0s
|
|
|
|
# maximum buffer memory (in bytes) to be allocated for read buffers
|
|
# repairer.max-buffer-mem: 4.0 MiB
|
|
|
|
# ratio applied to the optimal threshold to calculate the excess of the maximum number of repaired pieces to upload
|
|
# repairer.max-excess-rate-optimal-threshold: 0.05
|
|
|
|
# maximum segments that can be repaired concurrently
|
|
# repairer.max-repair: 5
|
|
|
|
# whether the audit score of nodes should be updated as a part of repair
|
|
# repairer.reputation-update-enabled: false
|
|
|
|
# time limit for uploading repaired pieces to new storage nodes
|
|
# repairer.timeout: 5m0s
|
|
|
|
# time limit for an entire repair job, from queue pop to upload completion
|
|
# repairer.total-timeout: 45m0s
|
|
|
|
# the number of times a node has been audited to not be considered a New Node
|
|
# reputation.audit-count: 100
|
|
|
|
# the reputation cut-off for disqualifying SNs based on audit history
|
|
# reputation.audit-dq: 0.96
|
|
|
|
# The length of time to give suspended SNOs to diagnose and fix issues causing downtime. Afterwards, they will have one tracking period to reach the minimum online score before disqualification
|
|
# reputation.audit-history.grace-period: 168h0m0s
|
|
|
|
# whether nodes will be disqualified if they have low online score after a review period
|
|
# reputation.audit-history.offline-dq-enabled: false
|
|
|
|
# whether nodes will be suspended if they have low online score
|
|
# reputation.audit-history.offline-suspension-enabled: true
|
|
|
|
# The point below which a node is punished for offline audits. Determined by calculating the ratio of online/total audits within each window and finding the average across windows within the tracking period.
|
|
# reputation.audit-history.offline-threshold: 0.6
|
|
|
|
# The length of time to track audit windows for node suspension and disqualification
|
|
# reputation.audit-history.tracking-period: 720h0m0s
|
|
|
|
# The length of time spanning a single audit window
|
|
# reputation.audit-history.window-size: 12h0m0s
|
|
|
|
# the forgetting factor used to update storage node reputation due to audits
|
|
# reputation.audit-lambda: 0.999
|
|
|
|
# weight to apply to audit reputation for total repair reputation calculation
|
|
# reputation.audit-repair-weight: 1
|
|
|
|
# weight to apply to audit reputation for total uplink reputation calculation
|
|
# reputation.audit-uplink-weight: 1
|
|
|
|
# the normalization weight used to calculate the audit SNs reputation
|
|
# reputation.audit-weight: 1
|
|
|
|
# the amount of time that should elapse before the cache retries failed database operations
|
|
# reputation.error-retry-interval: 1m0s
|
|
|
|
# the maximum amount of time that should elapse before cached reputation writes are flushed to the database (if 0, no reputation cache is used)
|
|
# reputation.flush-interval: 2h0m0s
|
|
|
|
# the value to which an alpha reputation value should be initialized
|
|
# reputation.initial-alpha: 1000
|
|
|
|
# the value to which a beta reputation value should be initialized
|
|
# reputation.initial-beta: 0
|
|
|
|
# whether nodes will be disqualified if they have been suspended for longer than the suspended grace period
|
|
# reputation.suspension-dq-enabled: false
|
|
|
|
# the time period that must pass before suspended nodes will be disqualified
|
|
# reputation.suspension-grace-period: 168h0m0s
|
|
|
|
# the reputation cut-off for disqualifying SNs based on returning 'unknown' errors during audit
|
|
# reputation.unknown-audit-dq: 0.6
|
|
|
|
# the forgetting factor used to update storage node reputation due to returning 'unknown' errors during audit'
|
|
# reputation.unknown-audit-lambda: 0.95
|
|
|
|
# expiration to use if user does not specify an rest key expiration
|
|
# rest-keys.default-expiration: 720h0m0s
|
|
|
|
# age at which a rollup is archived
|
|
# rollup-archive.archive-age: 2160h0m0s
|
|
|
|
# number of records to delete per delete execution. Used only for crdb which is slow without limit.
|
|
# rollup-archive.batch-size: 500
|
|
|
|
# whether or not the rollup archive is enabled.
|
|
# rollup-archive.enabled: true
|
|
|
|
# how frequently rollup archiver should run
|
|
# rollup-archive.interval: 24h0m0s
|
|
|
|
# option for deleting tallies after they are rolled up
|
|
# rollup.delete-tallies: true
|
|
|
|
# how many tallies to delete in a batch
|
|
# rollup.delete-tallies-batch-size: 10000
|
|
|
|
# how frequently rollup should run
|
|
# rollup.interval: 24h0m0s
|
|
|
|
# public address to listen on
|
|
server.address: :7777
|
|
|
|
# if true, client leaves may contain the most recent certificate revocation for the current certificate
|
|
# server.extensions.revocation: true
|
|
|
|
# if true, client leaves must contain a valid "signed certificate extension" (NB: verified against certs in the peer ca whitelist; i.e. if true, a whitelist must be provided)
|
|
# server.extensions.whitelist-signed-leaf: false
|
|
|
|
# path to the CA cert whitelist (peer identities must be signed by one these to be verified). this will override the default peer whitelist
|
|
# server.peer-ca-whitelist-path: ""
|
|
|
|
# identity version(s) the server will be allowed to talk to
|
|
# server.peer-id-versions: latest
|
|
|
|
# private address to listen on
|
|
server.private-address: 127.0.0.1:7778
|
|
|
|
# url for revocation database (e.g. bolt://some.db OR redis://127.0.0.1:6378?db=2&password=abc123)
|
|
# server.revocation-dburl: bolt://testdata/revocations.db
|
|
|
|
# if true, uses peer ca whitelist checking
|
|
# server.use-peer-ca-whitelist: true
|
|
|
|
# whether nodes will be disqualified if they have not been contacted in some time
|
|
# stray-nodes.enable-dq: true
|
|
|
|
# how often to check for and DQ stray nodes
|
|
# stray-nodes.interval: 168h0m0s
|
|
|
|
# Max number of nodes to return in a single query. Chore will iterate until rows returned is less than limit
|
|
# stray-nodes.limit: 1000
|
|
|
|
# length of time a node can go without contacting satellite before being disqualified
|
|
# stray-nodes.max-duration-without-contact: 720h0m0s
|
|
|
|
# as of system interval
|
|
# tally.as-of-system-interval: -5m0s
|
|
|
|
# how frequently the tally service should run
|
|
# tally.interval: 1h0m0s
|
|
|
|
# how many objects to query in a batch
|
|
# tally.list-limit: 2500
|
|
|
|
# how large of batches GetBandwidthSince should process at a time
|
|
# tally.read-rollup-batch-size: 10000
|
|
|
|
# how large of batches SaveRollup should process at a time
|
|
# tally.save-rollup-batch-size: 1000
|
|
|
|
# flag to switch between calculating bucket tallies using objects loop or custom query
|
|
# tally.use-objects-loop: false
|
|
|
|
# flag whether to use ranged loop instead of segment loop
|
|
# tally.use-ranged-loop: false
|
|
|
|
# address for jaeger agent
|
|
# tracing.agent-addr: agent.tracing.datasci.storj.io:5775
|
|
|
|
# application name for tracing identification
|
|
# tracing.app: satellite
|
|
|
|
# application suffix
|
|
# tracing.app-suffix: -release
|
|
|
|
# buffer size for collector batch packet size
|
|
# tracing.buffer-size: 0
|
|
|
|
# whether tracing collector is enabled
|
|
# tracing.enabled: true
|
|
|
|
# how frequently to flush traces to tracing agent
|
|
# tracing.interval: 0s
|
|
|
|
# buffer size for collector queue size
|
|
# tracing.queue-size: 0
|
|
|
|
# how frequent to sample traces
|
|
# tracing.sample: 0
|
|
|
|
# A comma delimited list of peers (IDs/addresses) allowed to use this endpoint.
|
|
# userinfo.allowed-peers: ""
|
|
|
|
# Whether the private Userinfo rpc endpoint is enabled
|
|
# userinfo.enabled: false
|
|
|
|
# Interval to check the version
|
|
# version.check-interval: 15m0s
|
|
|
|
# Request timeout for version checks
|
|
# version.request-timeout: 1m0s
|
|
|
|
# server address to check its version against
|
|
# version.server-address: https://version.storj.io
|
|
|
|
# set if zombie object cleanup is enabled or not
|
|
# zombie-deletion.enabled: true
|
|
|
|
# after what time object will be deleted if there where no new upload activity
|
|
# zombie-deletion.inactive-for: 24h0m0s
|
|
|
|
# the time between each attempt to go through the db and clean up zombie objects
|
|
# zombie-deletion.interval: 12h0m0s
|
|
|
|
# how many objects to query in a batch
|
|
# zombie-deletion.list-limit: 100
|