diff --git a/documentation/configuration.md b/documentation/configuration.md index 5d46483b..6835fb55 100644 --- a/documentation/configuration.md +++ b/documentation/configuration.md @@ -24,7 +24,7 @@ USAGE: liftbridge [global options] command [command options] [arguments...] VERSION: - v1.4.0 + v1.4.1 COMMANDS: help, h Shows a list of commands or help for one command diff --git a/documentation/quick_start.md b/documentation/quick_start.md index 06b6cf76..2fb61d3d 100644 --- a/documentation/quick_start.md +++ b/documentation/quick_start.md @@ -43,7 +43,7 @@ only be set on one server when bootstrapping a cluster.** ```shell $ liftbridge --raft-bootstrap-seed -INFO[2020-10-15 14:29:50] Liftbridge Version: v1.4.0 +INFO[2020-10-15 14:29:50] Liftbridge Version: v1.4.1 INFO[2020-10-15 14:29:50] Server ID: 4nbhBr66WnRsy0I5oKF9bo INFO[2020-10-15 14:29:50] Namespace: liftbridge-default INFO[2020-10-15 14:29:50] Default Retention Policy: [Age: 1 week, Compact: false] @@ -57,7 +57,7 @@ We set the `--data-dir` and `--port` flags to avoid clobbering the first server. ```shell $ liftbridge --data-dir /tmp/liftbridge/server-2 --port=9293 -INFO[2020-10-15 14:30:48] Liftbridge Version: v1.4.0 +INFO[2020-10-15 14:30:48] Liftbridge Version: v1.4.1 INFO[2020-10-15 14:30:48] Server ID: lbW05esZTab3guEwcmWD9M INFO[2020-10-15 14:30:48] Namespace: liftbridge-default INFO[2020-10-15 14:30:48] Default Retention Policy: [Age: 1 week, Compact: false] diff --git a/go.mod b/go.mod index 8710f2cb..842aa656 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/raft v1.1.2 github.com/liftbridge-io/go-liftbridge/v2 v2.0.2-0.20201118225953-b849cccb6467 - github.com/liftbridge-io/liftbridge-api v1.4.0 + github.com/liftbridge-io/liftbridge-api v1.4.1 github.com/liftbridge-io/nats-on-a-log v0.0.0-20200818183806-bb17516cf3a3 github.com/liftbridge-io/raft-boltdb v0.0.0-20200414234651-aaf6e08d8f73 github.com/mattn/go-colorable v0.1.7 // indirect diff --git a/go.sum b/go.sum index c745cd08..dec12240 100644 --- a/go.sum +++ b/go.sum @@ -192,8 +192,8 @@ github.com/liftbridge-io/go-liftbridge/v2 v2.0.2-0.20201118225953-b849cccb6467 h github.com/liftbridge-io/go-liftbridge/v2 v2.0.2-0.20201118225953-b849cccb6467/go.mod h1:24NMu02Ba2sMO2y+IYstP1UFKVA4a6/p54Lc7KccSLc= github.com/liftbridge-io/liftbridge-api v1.1.1-0.20201029165056-10f2aa65f256 h1:2pZtC3v6IBTwE70xfb/k0DPlOJ6BlXpthCUWrxCnhwo= github.com/liftbridge-io/liftbridge-api v1.1.1-0.20201029165056-10f2aa65f256/go.mod h1:6IFEFZ4ncnOgeDVjSt0vh1lKNhlJ5YT9xnG1eRa9LC8= -github.com/liftbridge-io/liftbridge-api v1.4.0 h1:2axkVGBEnQdXvXVHG+GyTYQucqxuvk7W9vJLXOCJy68= -github.com/liftbridge-io/liftbridge-api v1.4.0/go.mod h1:6IFEFZ4ncnOgeDVjSt0vh1lKNhlJ5YT9xnG1eRa9LC8= +github.com/liftbridge-io/liftbridge-api v1.4.1 h1:7LUThKH8z9Nr1Es6Arec4r5yI3JFOz166el5WFepp7A= +github.com/liftbridge-io/liftbridge-api v1.4.1/go.mod h1:6IFEFZ4ncnOgeDVjSt0vh1lKNhlJ5YT9xnG1eRa9LC8= github.com/liftbridge-io/nats-on-a-log v0.0.0-20200818183806-bb17516cf3a3 h1:O4mg1NEmukgY8hxen3grrG5RY34LadMTzpbjf8kM2tA= github.com/liftbridge-io/nats-on-a-log v0.0.0-20200818183806-bb17516cf3a3/go.mod h1:wmIIYVq+psahPlB1rvtTkGiltdihsKJbqwE1DkIPwj4= github.com/liftbridge-io/raft-boltdb v0.0.0-20200414234651-aaf6e08d8f73 h1:8r/ReB1ns87pVDwSnPj87HIbOu/5y0uDyGChx9mUGSQ= diff --git a/server/version.go b/server/version.go index 907ab7ce..a745f898 100644 --- a/server/version.go +++ b/server/version.go @@ -1,4 +1,4 @@ package server // Version of the Liftbridge server. -const Version = "v1.4.0" +const Version = "v1.4.1" diff --git a/website/versioned_docs/version-v1.4.1/configuration.md b/website/versioned_docs/version-v1.4.1/configuration.md new file mode 100644 index 00000000..801da1b7 --- /dev/null +++ b/website/versioned_docs/version-v1.4.1/configuration.md @@ -0,0 +1,220 @@ +--- +id: version-v1.4.1-configuration +title: Configuration +original_id: configuration +--- + +Liftbridge provides limited configuration through command-line flags and full +configuration using a configuration file. Flags will always take precedent +over settings in the configuration file. + +The configuration file is passed in using the `--config` flag: + +```shell +$ liftbridge --config liftbridge.yaml +``` + +To get a full list of the CLI flags, use the `--help` flag: + +```shell +$ liftbridge --help +NAME: + liftbridge - Lightweight, fault-tolerant message streams + +USAGE: + liftbridge [global options] command [command options] [arguments...] + +VERSION: + v1.4.1 + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --config FILE, -c FILE load configuration from FILE + --server-id value, --id value ID of the server in the cluster if there is no stored ID (default: random ID) + --namespace value, --ns value cluster namespace (default: "liftbridge-default") + --nats-servers ADDR[,ADDR], -n ADDR[,ADDR] connect to NATS cluster at ADDR[,ADDR] (default: "nats://127.0.0.1:4222") + --data-dir DIR, -d DIR store data in DIR (default: "/tmp/liftbridge/") + --port value, -p value port to bind to (default: 9292) + --tls-cert value server certificate file + --tls-key value private key for server certificate + --level value, -l value logging level [debug|info|warn|error] (default: "info") + --raft-bootstrap-seed bootstrap the Raft cluster by electing self as leader if there is no existing state + --raft-bootstrap-peers value bootstrap the Raft cluster with the provided list of peer IDs if there is no existing state + --help, -h show help + --version, -v print the version +``` + +## Configuration File Format + +The configuration file uses a YAML format. The configuration settings are +described below. Settings follow a hierarchical pattern delimited by periods. +The full, flattened name or nested names can be used. For example: + +```yaml +logging.level: debug +logging.recovery: true +logging.raft: true +``` + +The above configuration is equivalent to: + +```yaml +logging: + level: debug + recovery: true + raft: true +``` + +## Example Configuration File + +An example configuration file is shown below. + +```yaml +--- +listen: localhost:9293 +host: localhost +data.dir: /tmp/liftbridge/server-2 +activity.stream.enabled: true + +# Configure logging. +logging: + level: debug + raft: true + +# Define NATS cluster to connect to. +nats.servers: + - nats://localhost:4300 + - nats://localhost:4301 + +# Specify message stream settings. +streams: + retention.max: + age: 24h + messages: 100 + compact.enabled: true + +# Specify cluster settings. +clustering: + server.id: server-2 + raft.bootstrap.seed: true + replica.max.lag.time: 20s +``` + +## Overriding configuration settings with environment variables + +For configuration set in the configuration file the value can be overridden +with environment variables prefixed with `LIFTBRIDGE_`. The key must exist in +the config file to be overridden. + +For example using the config file from above one could override the host and +logging level with: + +```sh +env LIFTBRIDGE_HOST=liftbridge.example.com \ + LIFTBRIDGE_LOGGING_LEVEL=error \ + liftbridge --config config.yaml +``` + +## Configuration Settings + +Below is the list of Liftbridge configuration settings, including the name of +the setting in the configuration file and the CLI flag if it exists. + +| Name | Flag | Description | Type | Default | Valid Values | +|:----|:----|:----|:----|:----|:----| +| listen | | The server listen host/port. This is the host and port the server will bind to. If this is not specified but `host` and `port` are specified, these values will be used. If neither `listen` nor `host`/`port` are specified, the default listen address will be used. | string | 0:0:0:0:9292 | | +| host | | The server host that is advertised to clients, i.e. the address clients will attempt to connect to based on metadata API responses. If not set, `listen` will be returned to clients. This value may differ from `listen` in situations where the external address differs from the internal address, e.g. when running in a container. If `listen` is not specified, the server will also bind to this host. | string | localhost | | +| port | port | The server port that is advertised to clients. See `host` for more information on how this behaves. | int | 9292 | | +| tls.key | tls-key | The private key file for server certificate. This must be set in combination with `tls.cert` to enable TLS. | string | | +| tls.cert | tls-cert | The server certificate file. This must be set in combination with `tls.key` to enable TLS. | string | | +| tls.client.auth.enabled | tls-client-auth | Enforce client-side authentication via certificate. | bool | false | +| tls.client.auth.ca | tls-client-auth-ca | The CA certificate file to use when authenticating clients. | string | | +| logging.level | level | The logging level. | string | info | [debug, info, warn, error] | +| logging.recovery | | Log messages resulting from the replay of the Raft log on server recovery. | bool | false | | +| logging.raft | | Enables logging in the Raft subsystem. | bool | false | | +| data.dir | data-dir | The directory to store data in. | string | /tmp/liftbridge/namespace | | +| batch.max.messages | | The maximum number of messages to batch when writing to disk. | int | 1024 | +| batch.max.time | | The maximum time to wait to batch more messages when writing to disk. | duration | 0 | | +| metadata.cache.max.age | | The maximum age of cached broker metadata. | duration | 2m | | +| nats | | NATS configuration. | map | | [See below](#nats-configuration-settings) | +| streams | | Write-ahead log configuration for message streams. | map | | [See below](#streams-configuration-settings) | +| clustering | | Broker cluster configuration. | map | | [See below](#clustering-configuration-settings) | +| activity | | Meta activity event stream configuration. | map | | [See below](#activity-configuration-settings) | +| cursors | | Cursor management configuration. | map | | [See below](#cursors-configuration-settings) | + +### NATS Configuration Settings + +Below is the list of the configuration settings for the `nats` section of +the configuration file. + +| Name | Flag | Description | Type | Default | Valid Values | +|:----|:----|:----|:----|:----|:----| +| servers | nats-servers | List of NATS hosts to connect to. | list | nats://localhost:4222 | | +| user | | Username to use to connect to NATS servers. | string | | | +| password | | Password to use to connect to NATS servers. | string | | | +| tls.cert | | Path to NATS certificate file. | string | | | +| tls.key | | Path to NATS key file. | string | | | +| tls.ca | | Path to NATS CA Root file. | string | | | + +### Streams Configuration Settings + +Below is the list of the configuration settings for the `streams` section of the +configuration file. These settings are applied globally to all streams. +However, streams can be individually configured when they are created, +overriding these settings. + +| Name | Flag | Description | Type | Default | Valid Values | +|:----|:----|:----|:----|:----|:----| +| retention.max.bytes | | The maximum size a stream's log can grow to, in bytes, before we will discard old log segments to free up space. A value of 0 indicates no limit. | int64 | 0 | | +| retention.max.messages | | The maximum size a stream's log can grow to, in number of messages, before we will discard old log segments to free up space. A value of 0 indicates no limit. | int64 | 0 | | +| retention.max.age | | The TTL for stream log segment files, after which they are deleted. A value of 0 indicates no TTL. | duration | 168h | | +| cleaner.interval | | The frequency to check if a new stream log segment file should be rolled and whether any segments are eligible for deletion based on the retention policy or compaction if enabled. | duration | 5m | | +| segment.max.bytes | | The maximum size of a single stream log segment file in bytes. Retention is always done a file at a time, so a larger segment size means fewer files but less granular control over retention. | int64 | 268435456 | | +| segment.max.age | | The maximum time before a new stream log segment is rolled out. A value of 0 means new segments will only be rolled when `segment.max.bytes` is reached. Retention is always done a file at a time, so a larger value means fewer files but less granular control over retention. | duration | value of `retention.max.age` | | +| compact.enabled | | Enables stream log compaction. Compaction works by retaining only the latest message for each key and discarding older messages. The frequency in which compaction runs is controlled by `cleaner.interval`. | bool | false | | +| compact.max.goroutines | | The maximum number of concurrent goroutines to use for compaction on a stream log (only applicable if `compact.enabled` is `true`). | int | 10 | | +| auto.pause.time | | The amount of time a stream partition can go idle, i.e. not receive a message, before it is automatically paused. A value of 0 disables auto pausing. | duration | 0 | | +| auto.pause.disable.if.subscribers | | Disables automatic stream partition pausing when there are subscribers. | bool | false | | + +### Clustering Configuration Settings + +Below is the list of the configuration settings for the `clustering` section of +the configuration file. + +| Name | Flag | Description | Type | Default | Valid Values | +|:----|:----|:----|:----|:----|:----| +| server.id | server-id | ID of the server in the cluster. | string | random id | string with no spaces or periods | +| namespace | namespace | Cluster namespace. | string | liftbridge-default | string with no spaces or periods | +| raft.snapshot.retain | | The number Raft log snapshots to retain on disk. | int | 2 | | +| raft.snapshot.threshold | | Controls how many outstanding logs there must be before taking a snapshot. This prevents excessive snapshots when a small set of logs can be replayed. | int | 8192 | | +| raft.cache.size | | The number of Raft logs to hold in memory for quick lookup. | int | 512 | | +| raft.bootstrap.seed | raft-bootstrap-seed | Bootstrap the Raft cluster by electing self as leader if there is no existing state. If this is enabled, `raft.bootstrap.peers` should generally not be used, either on this node or peer nodes, since cluster topology is not being explicitly defined. Instead, peers should be started without bootstrap flags which will cause them to automatically discover the bootstrapped leader and join the cluster. | bool | false | | +| raft.bootstrap.peers | raft-bootstrap-peers | Bootstrap the Raft cluster with the provided list of peer IDs if there is no existing state. This should generally not be used in combination with `raft.bootstrap.seed` since it is explicitly defining cluster topology and the configured topology will elect a leader. Note that once the cluster is established, new nodes can join without setting bootstrap flags since they will automatically discover the elected leader and join the cluster. | list | | | +| replica.max.lag.time | | If a follower hasn't sent any replication requests or hasn't caught up to the leader's log end offset for at least this time, the leader will remove the follower from ISR. | duration | 15s | | +| replica.max.leader.timeout | | If a leader hasn't sent any replication responses for at least this time, the follower will report the leader to the controller. If a majority of the replicas report the leader, a new leader is selected by the controller. | duration | 15s | | +| replica.max.idle.wait | | The maximum amount of time a follower will wait before making a replication request once the follower is caught up with the leader. This value should always be less than `replica.max.lag.time` to avoid frequent shrinking of ISR for low-throughput streams. | duration | 10s | | +| replica.fetch.timeout | | Timeout duration for follower replication requests. | duration | 3s | | +| min.insync.replicas | | Specifies the minimum number of replicas that must acknowledge a stream write before it can be committed. If the ISR drops below this size, messages cannot be committed. | int | 1 | [1,...] | + +### Activity Configuration Settings + +Below is the list of the configuration settings for the `activity` section of +the configuration file. + +| Name | Flag | Description | Type | Default | Valid Values | +|:----|:----|:----|:----|:----|:----| +| stream.enabled | | Enables the activity stream. This will create an internal stream called `__activity` which events will be published to. | bool | false | | +| stream.publish.timeout | | The timeout for publishes to the activity stream. This is the time to wait for an ack from the activity stream, which means it's related to `stream.publish.ack.policy`. If the ack policy is `none`, this has no effect. | duration | 5s | | +| stream.publish.ack.policy | | The ack policy to use for publishes to the activity stream. The value `none` means publishes will not wait for an ack, `leader` means publishes will wait for the ack sent when the leader has committed the event, and `all` means publishes will wait for the ack sent when all replicas have committed the event. | string | all | [none, leader, all] | + +### Cursors Configuration Settings + +Below is the list of the configuration settings for the `cursors` section of +the configuration file. + +| Name | Flag | Description | Type | Default | Valid Values | +|:----|:----|:----|:----|:----|:----| +| stream.partitions | | Sets the number of partitions for the internal `__cursors` stream which stores consumer cursors. A value of 0 disables the cursors stream. This cannot be changed once it is set. | int | 0 | | +| stream.auto.pause.time | | The amount of time a partition in the internal `__cursors` stream can go idle, i.e. not receive a cursor update or fetch, before it is automatically paused. A value of 0 disables auto pausing. | duration | 1m | | diff --git a/website/versioned_docs/version-v1.4.1/quick_start.md b/website/versioned_docs/version-v1.4.1/quick_start.md new file mode 100644 index 00000000..0cca73f3 --- /dev/null +++ b/website/versioned_docs/version-v1.4.1/quick_start.md @@ -0,0 +1,105 @@ +--- +id: version-v1.4.1-quick-start +title: Quick Start +original_id: quick-start +--- + +There are three ways to get started running Liftbridge on your machine: +[downloading a pre-built binary](#binary), [building from source](#building-from-source), +or [running a Docker container](#docker-container). There are also several +options for running a Liftbridge cluster [described below](#running-a-liftbridge-cluster-locally). + +## Binary + +A pre-built Liftbridge binary can be downloaded for a specific platform from +the [releases](https://github.com/liftbridge-io/liftbridge/releases) page. Once +you have installed the binary, refer to the steps under [Building From +Source](#building-from-source) for running the server. + +## Building From Source + +A Liftbridge binary can be built and installed from source using +[Go](https://golang.org/doc/install). Follow the below step to install from +source. + +```shell +$ go get github.com/liftbridge-io/liftbridge +``` +*Liftbridge uses [Go modules](https://github.com/golang/go/wiki/Modules), so +ensure this is enabled, e.g. `export GO111MODULE=on`.* + +Liftbridge currently relies on an externally running +[NATS server](https://github.com/nats-io/gnatsd). By default, it will connect +to a NATS server running on localhost. The `--nats-servers` flag allows +configuring the NATS server(s) to connect to. + +Also note that Liftbridge is clustered by default and relies on Raft for +coordination. This means a cluster of three or more servers is normally run +for high availability, and Raft manages electing a leader. A single server is +actually a cluster of size 1. For safety purposes, the server cannot elect +itself as leader without using the `--raft-bootstrap-seed` flag, which will +indicate to the server to elect itself as leader. This will start a single +server that can begin handling requests. **Use this flag with caution as it should +only be set on one server when bootstrapping a cluster.** + +```shell +$ liftbridge --raft-bootstrap-seed +INFO[2020-10-15 14:29:50] Liftbridge Version: v1.4.1 +INFO[2020-10-15 14:29:50] Server ID: 4nbhBr66WnRsy0I5oKF9bo +INFO[2020-10-15 14:29:50] Namespace: liftbridge-default +INFO[2020-10-15 14:29:50] Default Retention Policy: [Age: 1 week, Compact: false] +INFO[2020-10-15 14:29:50] Default Partition Pausing: disabled +INFO[2020-10-15 14:29:50] Starting server on 0.0.0.0:9292... +INFO[2020-10-15 14:29:51] Server became metadata leader, performing leader promotion actions +``` + +Once a leader has been elected, other servers will automatically join the cluster. +We set the `--data-dir` and `--port` flags to avoid clobbering the first server. + +```shell +$ liftbridge --data-dir /tmp/liftbridge/server-2 --port=9293 +INFO[2020-10-15 14:30:48] Liftbridge Version: v1.4.1 +INFO[2020-10-15 14:30:48] Server ID: lbW05esZTab3guEwcmWD9M +INFO[2020-10-15 14:30:48] Namespace: liftbridge-default +INFO[2020-10-15 14:30:48] Default Retention Policy: [Age: 1 week, Compact: false] +INFO[2020-10-15 14:30:48] Default Partition Pausing: disabled +INFO[2020-10-15 14:30:48] Starting server on 0.0.0.0:9293... +``` + +We can also bootstrap a cluster by providing the explicit cluster configuration. +To do this, we provide the IDs of the participating peers in the cluster using the +`--raft-bootstrap-peers` flag. Raft will then handle electing a leader. + +```shell +$ liftbridge --raft-bootstrap-peers server-2,server-3 +``` + +## Docker Container + +Instead of running a binary, you can run Liftbridge using a container. There is +a [container image](https://hub.docker.com/r/liftbridge/standalone-dev) +available which runs an instance of Liftbridge and NATS inside a single Docker +container. This is meant for development and testing purposes. Use the +following Docker commands to run this container: + +```shell +$ docker pull liftbridge/standalone-dev +$ docker run -d --name=liftbridge-main -p 4222:4222 -p 9292:9292 -p 8222:8222 -p 6222:6222 liftbridge/standalone-dev +``` + +This will run the container which will start both the NATS and Liftbridge +servers. To check the logs to see if the container started properly, run: + +```shell +$ docker logs liftbridge-main +``` + +See the [deployment guide](./deployment.md) for more information. + +## Running a Liftbridge Cluster Locally + +The quickest way to get a multi-node Liftbridge cluster up and running on your +machine is with either [Docker Compose](https://docs.docker.com/compose) or +[Kind](https://kind.sigs.k8s.io) (Kubernetes in Docker). Follow the +[deployment guide](./deployment.md) for help running a cluster locally for +development or testing. diff --git a/website/versions.json b/website/versions.json index c84e64ee..5ebd785b 100644 --- a/website/versions.json +++ b/website/versions.json @@ -1,4 +1,5 @@ [ + "v1.4.1", "v1.4.0", "v1.3.0", "v1.2.0",