# how frequently segments are audited # audit.interval: 30s # max number of times to attempt updating a statdb batch # audit.max-retries-stat-db: 3 # limit above which we consider an audit is failed # audit.max-reverify-count: 3 # the minimum acceptable bytes that storage nodes can transfer per second to the satellite # audit.min-bytes-per-second: 128 B # the minimum duration for downloading a share from storage nodes before timing out # audit.min-download-timeout: 25s # how frequently checker should check for bad segments # checker.interval: 30s # how frequently irrepairable checker should check for lost pieces # checker.irreparable-interval: 30m0s # how stale reliable node cache can be # checker.reliability-cache-staleness: 5m0s # server address of the graphql api gateway and frontend app # console.address: ":10100" # auth token needed for access to registration token creation endpoint # console.auth-token: "" # secret used to sign auth tokens # console.auth-token-secret: "" # external endpoint of the satellite if hosted # console.external-address: "" # path to static resources # console.static-dir: "" # stripe api key # console.stripe-key: "" # satellite database connection string # database: "postgres://" # Maximum Database Connection Lifetime, -1ns means the stdlib default # db.conn_max_lifetime: -1ns # Maximum Amount of Idle Database connections, -1 means the stdlib default # db.max_idle_conns: 20 # Maximum Amount of Open Database connections, -1 means the stdlib default # db.max_open_conns: 25 # address to listen on for debug endpoints # debug.addr: "127.0.0.1:0" # If set, a path to write a process trace SVG to # debug.trace-out: "" # determines which set of configuration defaults to use. can either be 'dev' or 'release' defaults: "release" # the interval at which the satellite attempts to find new nodes via random node ID lookups # discovery.discovery-interval: 1s # the amount of nodes refreshed in parallel # discovery.refresh-concurrency: 8 # the interval at which the cache refreshes itself in seconds # discovery.refresh-interval: 1s # the amount of nodes read from the overlay cache in a single pagination call # discovery.refresh-limit: 100 # the number of nodes to concurrently send garbage collection bloom filters to # garbage-collection.concurrent-sends: 1 # set if garbage collection is enabled or not # garbage-collection.enabled: false # the false positive rate used for creating a garbage collection bloom filter # garbage-collection.false-positive-rate: 0.1 # the initial number of pieces expected for a storage node to have, used for creating a filter # garbage-collection.initial-pieces: 400000 # the time between each send of garbage collection filters to storage nodes # garbage-collection.interval: 168h0m0s # help for setup # help: false # path to the certificate chain for this identity identity.cert-path: "/root/.local/share/storj/identity/satellite/identity.cert" # path to the private key for this identity identity.key-path: "/root/.local/share/storj/identity/satellite/identity.key" # alpha is a system wide concurrency parameter # kademlia.alpha: 5 # the Kademlia node to bootstrap against # kademlia.bootstrap-addr: "bootstrap.storj.io:8888" # the base interval to wait when retrying bootstrap # kademlia.bootstrap-backoff-base: 1s # the maximum amount of time to wait when retrying bootstrap # kademlia.bootstrap-backoff-max: 30s # size of each Kademlia bucket # kademlia.bucket-size: 20 # the path for storage node db services to be created on # kademlia.db-path: "testdata/kademlia" # the public address of the Kademlia node, useful for nodes behind NAT kademlia.external-address: "" # operator email address kademlia.operator.email: "" # operator wallet address kademlia.operator.wallet: "" # size of Kademlia replacement cache # kademlia.replacement-cache-size: 5 # what to use for storing real-time accounting data # live-accounting.storage-backend: "plainmemory" # if true, log function filename and line number # log.caller: false # if true, set logging to development mode # log.development: false # configures log encoding. can either be 'console' or 'json' # log.encoding: "console" # the minimum log level to log # log.level: info # can be stdout, stderr, or a filename # log.output: "stderr" # if true, log stack traces # log.stack: false # smtp authentication type # mail.auth-type: "login" # oauth2 app's client id # mail.client-id: "" # oauth2 app's client secret # mail.client-secret: "" # sender email address # mail.from: "" # plain/login auth user login # mail.login: "" # plain/login auth user password # mail.password: "" # refresh token used to retrieve new access token # mail.refresh-token: "" # smtp server address # mail.smtp-server-address: "" # path to email templates source # mail.template-path: "" # uri which is used when retrieving new access token # mail.token-uri: "" # server address of the marketing Admin GUI # marketing.address: "127.0.0.1:8090" # path to static resources # marketing.static-dir: "" # the database connection string to use # metainfo.database-url: "postgres://" # how long to wait for new observers before starting iteration # metainfo.loop.coalesce-duration: 5s # maximum inline segment size # metainfo.max-inline-segment-size: 8.0 KB # minimum remote segment size # metainfo.min-remote-segment-size: 1.2 KiB # toggle flag if overlay is enabled # metainfo.overlay: true # the size of each new erasure share in bytes # metainfo.rs.erasure-share-size: 256 B # maximum buffer memory to be allocated for read buffers # metainfo.rs.max-buffer-mem: 4.0 MiB # maximum segment size # metainfo.rs.max-segment-size: 64.0 MiB # the largest amount of pieces to encode to. n. # metainfo.rs.max-threshold: 130 # the minimum pieces required to recover a segment. k. # metainfo.rs.min-threshold: 29 # the minimum safe pieces before a repair is triggered. m. # metainfo.rs.repair-threshold: 35 # the desired total pieces for a segment. o. # metainfo.rs.success-threshold: 80 # validate redundancy scheme configuration # metainfo.rs.validate: true # address to send telemetry to # metrics.addr: "collectora.storj.io:9000" # application name for telemetry identification # metrics.app: "satellite" # application suffix # metrics.app-suffix: "-release" # instance id prefix # metrics.instance-prefix: "" # how frequently to send up telemetry # metrics.interval: 1m0s # path to log for oom notices # monkit.hw.oomlog: "/var/log/kern.log" # how long until an order expires # orders.expiration: 168h0m0s # the number of times a node has been audited to not be considered a New Node # overlay.node.audit-count: 100 # the initial shape 'alpha' used to calculate audit SNs reputation # overlay.node.audit-reputation-alpha0: 1 # the initial shape 'beta' value used to calculate audit SNs reputation # overlay.node.audit-reputation-beta0: 0 # the reputation cut-off for disqualifying SNs based on audit history # overlay.node.audit-reputation-dq: 0.6 # the forgetting factor used to calculate the audit SNs reputation # overlay.node.audit-reputation-lambda: 0.95 # weight to apply to audit reputation for total repair reputation calculation # overlay.node.audit-reputation-repair-weight: 1 # weight to apply to audit reputation for total uplink reputation calculation # overlay.node.audit-reputation-uplink-weight: 1 # the normalization weight used to calculate the audit SNs reputation # overlay.node.audit-reputation-weight: 1 # require distinct IPs when choosing nodes for upload # overlay.node.distinct-ip: true # the minimum node software version for node selection queries # overlay.node.minimum-version: "" # the percentage of new nodes allowed per request # overlay.node.new-node-percentage: 0.05 # the amount of time without seeing a node before its considered offline # overlay.node.online-window: 1h0m0s # the number of times a node's uptime has been checked to not be considered a New Node # overlay.node.uptime-count: 100 # the initial shape 'alpha' used to calculate uptime SNs reputation # overlay.node.uptime-reputation-alpha0: 2 # the initial shape 'beta' value used to calculate uptime SNs reputation # overlay.node.uptime-reputation-beta0: 0 # the reputation cut-off for disqualifying SNs based on uptime history # overlay.node.uptime-reputation-dq: 0 # the forgetting factor used to calculate the uptime SNs reputation # overlay.node.uptime-reputation-lambda: 0.99 # weight to apply to uptime reputation for total repair reputation calculation # overlay.node.uptime-reputation-repair-weight: 1 # weight to apply to uptime reputation for total uplink reputation calculation # overlay.node.uptime-reputation-uplink-weight: 1 # the normalization weight used to calculate the uptime SNs reputation # overlay.node.uptime-reputation-weight: 1 # how frequently repairer should try and repair more data # repairer.interval: 1h0m0s # maximum buffer memory (in bytes) to be allocated for read buffers # repairer.max-buffer-mem: 4.0 MB # ratio applied to the optimal threshold to calculate the excess of the maximum number of repaired pieces to upload # repairer.max-excess-rate-optimal-threshold: 0.05 # maximum segments that can be repaired concurrently # repairer.max-repair: 5 # time limit for uploading repaired pieces to new storage nodes # repairer.timeout: 2h0m0s # option for deleting tallies after they are rolled up # rollup.delete-tallies: false # how frequently rollup should run # rollup.interval: 24h0m0s # the bandwidth and storage usage limit for the alpha release # rollup.max-alpha-usage: 25.0 GB # public address to listen on server.address: ":7777" # log all GRPC traffic to zap logger server.debug-log-traffic: false # if true, client leaves may contain the most recent certificate revocation for the current certificate # server.extensions.revocation: true # if true, client leaves must contain a valid "signed certificate extension" (NB: verified against certs in the peer ca whitelist; i.e. if true, a whitelist must be provided) # server.extensions.whitelist-signed-leaf: false # path to the CA cert whitelist (peer identities must be signed by one these to be verified). this will override the default peer whitelist # server.peer-ca-whitelist-path: "" # identity version(s) the server will be allowed to talk to # server.peer-id-versions: "latest" # private address to listen on server.private-address: "127.0.0.1:7778" # url for revocation database (e.g. bolt://some.db OR redis://127.0.0.1:6378?db=2&password=abc123) # server.revocation-dburl: "bolt://testdata/revocations.db" # if true, uses peer ca whitelist checking # server.use-peer-ca-whitelist: true # how frequently the tally service should run # tally.interval: 1h0m0s # Interval to check the version # version.check-interval: 15m0s # Request timeout for version checks # version.request-timeout: 1m0s # server address to check its version against # version.server-address: "https://version.alpha.storj.io" # length of time before a voucher expires # vouchers.expiration: 720h0m0s