diff --git a/docusaurus.config.js b/docusaurus.config.js index f722195d..a8e88f67 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -98,7 +98,7 @@ export default async function createConfigAsync() { ], rehypePlugins: [(await require("rehype-katex")).default], editUrl: ({ docPath }) => { - return `https://github.com/MultiQC/MultiQC/blob/main/docs${docPath.replace('multiqc_docs/multiqc_repo/docs', '')}` + return `https://github.com/MultiQC/MultiQC/blob/main/docs/markdown/${docPath.replace('multiqc_docs/multiqc_repo/docs', '')}` }, sidebarPath: "./multiqc_docs/sidebar.js", }, diff --git a/fusion_docs/faq.mdx b/fusion_docs/faq.mdx index 06239bf9..ee9c2939 100644 --- a/fusion_docs/faq.mdx +++ b/fusion_docs/faq.mdx @@ -2,53 +2,67 @@ title: FAQ --- -# Frequently Asked Questions +### Which cloud object stores does Fusion support? -## Which object storage are supported by Fusion +Fusion supports AWS S3, Azure Blob, and Google Cloud Storage. Fusion can also be used with local storage solutions that support the AWS S3 API. -Fusion currently supports AWS S3 and Google Storage. In the near future it will also support Azure Blob storage. +### How does Fusion work? -## Can I use Fusion with Minio? +Fusion implements a FUSE driver that mounts the cloud storage bucket in the job execution context as +a POSIX file system. This allows the job script to read and write data files in cloud object storage as if they were local files. -Yes. [Minio](https://min.io/), implements a S3-compatible API, therefore it can be used in place of AWS S3. -See the documentation how to configure your pipeline execution to use Fusion and Minio. (link to guide TBD). +### Why is Fusion faster than other FUSE drivers? -## Can I download Fusion? +Fusion is not a general purpose file system. It has been designed to optimize the data transfer of bioinformatics pipelines by taking advantage of the Nextflow data model. -No. Currently, Fusion can only be used by enabling Wave containers in the configuration of your Nextflow pipeline. +### Why do I need Wave containers to use Fusion? -## Why I need Wave containers to use Fusion? +Fusion is designed to work at the job execution level. This means it must run in a containerized job execution context. -Fusion is designed to work at level of job executions. For this reason, it needs to run in containerised job -execution context. +Downloading and installing Fusion manually would require you to rebuild all the containers used by your data pipeline to include the Fusion client each time a new version of the client is released. You would also need to maintain a custom mirror or existing container +collections, such as [BioContainers](https://biocontainers.pro/). -This would require to rebuild all containers used by your data pipeline to include the Fusion client each time a new -version of the Fusion client is released, and it would make necessary to maintain a custom mirror or existing containers -collections, such as [BioContainers](https://biocontainers.pro/) which is definitively not desirable. +Wave enables you to add the Fusion client to your pipeline containers at deploy time, without the need to rebuild them or +maintain a separate container image collection. -Wave allows adding the Fusion client in your pipeline containers at deploy time, without having to rebuild them or -to maintainer a separate container images collection. +### Can Fusion mount more than one bucket in the job's file system? -## How Fusion works behind the scene? +Yes. Any access to cloud object storage is automatically detected by Fusion and the corresponding buckets are mounted +on demand. -Fusion is implemented a FUSE driver that mounts the storage bucket in the job execution context as -a POSIX file system. This allows the job script to read and write data over the object storage like it were local files. +### Can Fusion mount buckets of different vendors in the same execution? -## Can Fusion mount more than one bucket in job file system +No. Fusion can mount multiple buckets per execution, but all from the same vendor, such as AWS S3 or Google Cloud Storage. -Yes. Fusion any access to an object storage is automatically detected by Fusion and the corresponding bucket is mounted -on-demand. +### I tried Fusion, but I didn't notice any performance improvement. Why? -## Can Fusion mount buckets of different vendors in the same execution? +If you didn’t notice any performance improvement with Fusion, the bottleneck may lie in other factors, such as network latency or memory limitations. Fusion’s caching strategy relies heavily on NVMe SSD or similar storage technology, so ensure your computing nodes are using the recommended storage. Check your Platform compute environment page for optimal instance and storage configurations: -No. Fusion can mount multiple buckets but the must be of the vendor e.g. AWS S3 or Google Storage. +- [AWS Batch](https://docs.seqera.io/platform/latest/compute-envs/aws-batch) +- [Azure Batch](https://docs.seqera.io/platform/latest/compute-envs/azure-batch) +- [Google Cloud Batch](https://docs.seqera.io/platform/latest/compute-envs/google-cloud-batch) +- [Amazon EKS](https://docs.seqera.io/platform/latest/compute-envs/eks) +- [Google GKE](https://docs.seqera.io/platform/latest/compute-envs/gke) -## How Fusion can be faster of other existing FUSE driver? +### Can I pin a specific Fusion version to use with Nextflow? -Fusion is not a general purpose file system. Instead, it has been designed to optimise the data transfer of Nextflow -data pipeline taking advantage of the data model used by Nextflow. [to be improved] +Yes. Add the Fusion version's config URL using the `containerConfigUrl` option in the Fusion block of your Nextflow configuration (replace `v2.4.2` with the version of your choice): -## I tried Fusion, but I didn't notice any performance improvement. Why? +```groovy +fusion { + enabled = true + containerConfigUrl = 'https://fusionfs.seqera.io/releases/v2.4.2-amd64.json' +} +``` -Make sure the computing nodes in your cluster have NVMe SSD storage or equivalent technology. Fusion implements an -aggressive caching strategy that requires the use of local scratch storage bases on solid-state disks. +:::note +For ARM CPU architectures, use https://fusionfs.seqera.io/releases/v2.4.2-arm64.json. +::: + +### Can I use Fusion with Minio? + +Yes. [Minio](https://min.io/) implements an S3-compatible API, therefore it can be used instead of AWS S3. See [Local execution with Minio](https://www.nextflow.io/docs/latest/fusion.html#local-execution-with-minio) for more information. + +### Can I download Fusion? + +No. Fusion can only be used directly in supported [Seqera Platform compute environments](https://docs.seqera.io/platform/latest/compute-envs/overview), or by enabling [Wave containers](https://docs.seqera.io/wave) in your Nextflow configuration. diff --git a/fusion_docs/get-started.mdx b/fusion_docs/get-started.mdx new file mode 100644 index 00000000..c501634c --- /dev/null +++ b/fusion_docs/get-started.mdx @@ -0,0 +1,56 @@ +--- +title: Get started +description: "Use the Fusion v2 file system in Seqera Platform and Nextflow" +date: "23 Aug 2024" +tags: [fusion, storage, compute, file system, posix, client] +--- + +Use Fusion directly in Seqera Platform compute environments, or add Fusion to your Nextflow pipeline configuration. + +### Seqera Platform + +Use Fusion directly in the following Seqera Platform compute environments: +- [AWS Batch](https://docs.seqera.io/platform/latest/compute-envs/aws-batch) +- [Azure Batch](https://docs.seqera.io/platform/latest/compute-envs/azure-batch) +- [Google Cloud Batch](https://docs.seqera.io/platform/latest/compute-envs/google-cloud-batch) +- [Amazon EKS](https://docs.seqera.io/platform/latest/compute-envs/eks) +- [Google GKE](https://docs.seqera.io/platform/latest/compute-envs/gke) + +See the Platform compute environment page for your cloud provider for Fusion configuration instructions and optimal compute and storage recommendations. + +### Nextflow + +:::note +Fusion requires Nextflow `22.10.0` or later. +::: + +Fusion integrates with Nextflow directly and does not require any installation or change in pipeline code. It only requires to use of a container runtime or a container computing service such as Kubernetes, AWS Batch, or Google Cloud Batch. + +#### Nextflow installation + +If you already have Nextflow installed, update to the latest version using this command: + +```bash +nextflow -self-update +``` + +Otherwise, install Nextflow with this command: + +```bash +curl get.nextflow.io | bash +``` + +#### Fusion configuration + +To enable Fusion in your Nextflow pipeline, add the following snippet to your `nextflow.config` file: + +```groovy +fusion.enabled = true +wave.enabled = true +tower.accessToken = '' //optional +``` + +:::tip +The use of the Platform access token is not mandatory, however, it's required to enable access to private repositories +and it allows higher service rate limits compared to anonymous users. +::: \ No newline at end of file diff --git a/fusion_docs/guide.mdx b/fusion_docs/guide.mdx index 21eb5a3a..c80bc5b0 100644 --- a/fusion_docs/guide.mdx +++ b/fusion_docs/guide.mdx @@ -1,5 +1,8 @@ --- title: User guide +description: "Overview of the Fusin v2 file system" +date: "23 Aug 2024" +tags: [fusion, storage, compute, file system, posix, client] --- # User guide @@ -23,13 +26,13 @@ Fusion smoothly integrates with Nextflow and does not require any installation o ### Nextflow installation -If you have already installed Nextflow, update to the latest version using this command:: +If you have already installed Nextflow, update to the latest version using this command: ```bash nextflow -self-update ``` -If you don't have Nextflow already installed, install it with the command below:: +If you don't have Nextflow already installed, install it with the command below: ```bash curl get.nextflow.io | bash @@ -37,7 +40,7 @@ curl get.nextflow.io | bash ### Fusion configuration -To enable Fusion in your Nextflow pipeline add the following snippet to your `nextflow.config` file:: +To enable Fusion in your Nextflow pipeline add the following snippet to your `nextflow.config` file: ```groovy fusion.enabled = true diff --git a/fusion_docs/guide/aws-batch-s3.mdx b/fusion_docs/guide/aws-batch-s3.mdx deleted file mode 100644 index cc4f1688..00000000 --- a/fusion_docs/guide/aws-batch-s3.mdx +++ /dev/null @@ -1,36 +0,0 @@ -# Fusion using AWS Batch and S3 object storage - -Fusion simplifies and makes more efficient the execution of Nextflow pipelines with [AWS Batch](https://aws.amazon.com/batch/) in several ways: - -1. No need to use the AWS CLI tool for copying data from and to S3. -2. No need to create a custom AMI or create custom containers to include the AWS CLI tool. -3. Fusion uses an efficient data transfer and caching algorithm that provides much faster throughput compared to AWS CLI and does not require the local copy of the data files. -4. Replacing the AWS CLI with a native API client, the transfer is much more robust at scale. - -A minimal pipeline configuration looks like the following: - -```groovy -process.executor = 'awsbatch' -process.queue = '' -aws.region = '' -fusion.enaled = true -wave.enabled = true -``` - -In the above snippet replace `YOUR AWS BATCH QUEUE` and `YOUR AWS REGION` with corresponding AWS Batch queue and region -of your choice, and save it to a file named `nextflow.config` into the pipeline launching directory. - -Then launch the pipeline execution with the usual run command: - -``` -nextflow run -w s3:///work -``` - -Replacing `YOUR PIPELINE SCRIPT` with the URI of your pipeline Git repository -and `YOUR-BUCKET` with a S3 bucket of your choice. - -:::tip - - To achieve best performance make sure to setup a SSD volumes as temporary directory. - See the section [SSD storage](#ssd-storage) for details. -::: diff --git a/fusion_docs/guide/aws-batch.mdx b/fusion_docs/guide/aws-batch.mdx new file mode 100644 index 00000000..d1576b33 --- /dev/null +++ b/fusion_docs/guide/aws-batch.mdx @@ -0,0 +1,50 @@ +--- +title: AWS Batch +description: "Use Fusion with AWS Batch and S3 storage" +date: "23 Aug 2024" +tags: [fusion, storage, compute, aws batch, s3] +--- + +Fusion simplifies and improves the efficiency of Nextflow pipelines in [AWS Batch](https://aws.amazon.com/batch/) in several ways: + +- No need to use the AWS CLI tool for copying data to and from S3 storage. +- No need to create a custom AMI or create custom containers to include the AWS CLI tool. +- Fusion uses an efficient data transfer and caching algorithm that provides much faster throughput compared to AWS CLI and does not require a local copy of data files. +- By replacing the AWS CLI with a native API client, the transfer is much more robust at scale. + +### Platform AWS Batch compute environments + +Seqera Platform supports Fusion in Batch Forge and manual AWS Batch compute environments. + +See [AWS Batch](https://docs.seqera.io/platform/latest/compute-envs/aws-batch) for compute and storage recommendations and instructions to enable Fusion. + +### Nextflow CLI + +:::tip +Fusion file system implements a lazy download and upload algorithm that runs in the background to transfer files in +parallel to and from the object storage into the container-local temporary directory (`/tmp`). To achieve optimal performance, set up an SSD volume as the temporary directory. + +Several AWS EC2 instance types include one or more NVMe SSD volumes. These volumes must be formatted to be used. See [SSD instance storage](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html) for details. Seqera Platform automatically formats and configures NVMe instance storage with the “Fast instance storage” option when you create an AWS Batch compute environment. +::: + +1. Add the following to your `nextflow.conf` file: + + ```groovy + process.executor = 'awsbatch' + process.queue = '' + process.scratch = false + process.containerOptions = '-v /path/to/ssd:/tmp' // Required for SSD volumes + aws.region = '' + fusion.enaled = true + wave.enabled = true + ``` + + Replace `` and `` with your AWS Batch queue and region. + +1. Run the pipeline with the usual run command: + + ``` + nextflow run -w s3:///work + ``` + + Replace `` with your pipeline Git repository URI and `` with your S3 bucket. diff --git a/fusion_docs/guide/aws-eks-s3.mdx b/fusion_docs/guide/aws-eks-s3.mdx deleted file mode 100644 index 6825018a..00000000 --- a/fusion_docs/guide/aws-eks-s3.mdx +++ /dev/null @@ -1,130 +0,0 @@ -# Fusion with AWS EKS and S3 object storage - -Fusion streamlines the deployment of Nextflow pipeline in a Kubernetes cluster, because it replaces the need to configure -and maintain a shared file system in your cluster. - -## Kubernetes config - -You will need to create a namespace and a service account in your Kubernetes cluster to run the job submitted by the pipeline execution. - -The following manifest shows the bare minimum configuration. - -```yaml ---- -apiVersion: v1 -kind: Namespace -metadata: - name: fusion-demo ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - namespace: fusion-demo - name: fusion-sa - annotations: - eks.amazonaws.com/role-arn: "arn:aws:iam:::role/fusion-demo-role" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: fusion-demo - name: fusion-role -rules: - - apiGroups: [""] - resources: ["pods", "pods/status", "pods/log", "pods/exec"] - verbs: ["get", "list", "watch", "create", "delete"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - namespace: fusion-demo - name: fusion-rolebind -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: fusion-role -subjects: - - kind: ServiceAccount - name: fusion-sa -``` - -The AWS IAM role should provide read-write permission to the S3 bucket used as the pipeline work directory. For example: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["s3:ListBucket"], - "Resource": ["arn:aws:s3:::"] - }, - { - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:PutObjectTagging", - "s3:DeleteObject" - ], - "Resource": ["arn:aws:s3:::/*"], - "Effect": "Allow" - } - ] -} -``` - -In the above policy replace `` with a bucket name of your choice. - -Also, make sure that the role defines a trust relationship similar to this: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "oidc.eks.eu-west-2.amazonaws.com/id/:aud": "sts.amazonaws.com", - "oidc.eks.eu-west-2.amazonaws.com/id/:sub": "system:serviceaccount:fusion-demo:fusion-sa" - } - } - } - ] -} -``` - -## Nextflow configuration - -The minimal Nextflow configuration looks like the following: - -```groovy -wave.enabled = true -fusion.enabled = true -process.executor = 'k8s' -k8s.context = '' -k8s.namespace = 'fusion-demo' -k8s.serviceAccount = 'fusion-sa' -``` - -In the above snippet replace `YOUR K8S CLUSTER CONTEXT` with Kubernetes context in your Kubernetes config, and save it -to a file named `nextflow.config` into the pipeline launching directory. - -Then launch the pipeline execution with the usual run command: - -``` -nextflow run -w s3:///work -``` - -Replacing `YOUR PIPELINE SCRIPT` with the URI of your pipeline Git repository -and `YOUR-BUCKET` with a S3 bucket of your choice. - -:::tip - - To achieve best performance make sure to setup a SSD volumes as temporary directory. - See the section [SSD storage](#ssd-storage) for details. -::: diff --git a/fusion_docs/guide/aws-eks.mdx b/fusion_docs/guide/aws-eks.mdx new file mode 100644 index 00000000..bee14749 --- /dev/null +++ b/fusion_docs/guide/aws-eks.mdx @@ -0,0 +1,145 @@ +--- +title: Amazon EKS +description: "Use Fusion with AWS EKS and S3 storage" +date: "23 Aug 2024" +tags: [fusion, storage, compute, aws eks, s3] +--- + +Fusion streamlines the deployment of Nextflow pipelines in Kubernetes because it replaces the need to configure +and maintain a shared file system in your cluster. + +### Platform Amazon EKS compute environments + +Seqera Platform supports Fusion in Amazon EKS compute environments. + +See [Amazon EKS](https://docs.seqera.io/platform/latest/compute-envs/eks) for Platform instructions to enable Fusion. + +### Nextflow CLI + +:::tip +Fusion file system implements a lazy download and upload algorithm that runs in the background to transfer files in +parallel to and from the object storage into the container-local temporary directory (`/tmp`). To achieve optimal performance, set up an SSD volume as the temporary directory. + +Several AWS EC2 instance types include one or more NVMe SSD volumes. These volumes must be formatted to be used. See [SSD instance storage](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html) for details. +::: + +To use Fusion directly in Nextflow with an Amazon EKS cluster, you must configure a namespace and service account and update your Nextflow configuration. + +#### Kubernetes configuration + +You must create a namespace and a service account in your Kubernetes cluster to run the jobs submitted during pipeline execution. + +1. Create a manifest that includes the following configuration at minimum: + + ```yaml + --- + apiVersion: v1 + kind: Namespace + metadata: + name: fusion-demo + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: fusion-demo + name: fusion-sa + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam:::role/fusion-demo-role" + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + namespace: fusion-demo + name: fusion-role + rules: + - apiGroups: [""] + resources: ["pods", "pods/status", "pods/log", "pods/exec"] + verbs: ["get", "list", "watch", "create", "delete"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + namespace: fusion-demo + name: fusion-rolebind + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: fusion-role + subjects: + - kind: ServiceAccount + name: fusion-sa + ``` + +1. The AWS IAM role must provide read-write permission to the S3 bucket used as the pipeline work directory: + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["s3:ListBucket"], + "Resource": ["arn:aws:s3:::"] + }, + { + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:PutObjectTagging", + "s3:DeleteObject" + ], + "Resource": ["arn:aws:s3:::/*"], + "Effect": "Allow" + } + ] + } + ``` + + Replace `` with a bucket name of your choice. + +1. The role must define a trust relationship similar to this: + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/oidc.eks..amazonaws.com/id/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "oidc.eks.eu-west-2.amazonaws.com/id/:aud": "sts.amazonaws.com", + "oidc.eks.eu-west-2.amazonaws.com/id/:sub": "system:serviceaccount:fusion-demo:fusion-sa" + } + } + } + ] + } + ``` + +#### Nextflow configuration + +1. Add the following to your `nextflow.conf` file: + + ```groovy + wave.enabled = true + fusion.enabled = true + process.executor = 'k8s' + k8s.context = '' + k8s.namespace = 'fusion-demo' + k8s.serviceAccount = 'fusion-sa' + ``` + + Replace `` with the Kubernetes context in your Kubernetes config. + +1. Run the pipeline with the usual run command: + + ``` + nextflow run -w s3:///work + ``` + + Replace `` with your pipeline Git repository URI and `` with your S3 bucket. + diff --git a/fusion_docs/guide/azure-batch.mdx b/fusion_docs/guide/azure-batch.mdx new file mode 100644 index 00000000..f98543f5 --- /dev/null +++ b/fusion_docs/guide/azure-batch.mdx @@ -0,0 +1,41 @@ +--- +title: Azure Batch +description: "Use Fusion with Azure Batch and Azure Blob storage" +date: "23 Aug 2024" +tags: [fusion, storage, compute, azure batch, blob storage] +--- + +Fusion simplifies and improves the efficiency of Nextflow pipelines in [Azure Batch](https://azure.microsoft.com/en-us/products/batch) in several ways: + +- No need to use the Azure CLI tool for copying data to and from Azure Blob Storage. +- No need to install the Azure CLI tool to the node machine +- By replacing the Azure CLI with a native API client, the transfer is much more robust at scale. +- By streaming relevant data and monitoring the virtual machine storage, Fusion can use more data than the capacity of the attached storage drive + +### Platform Azure Batch compute environments + +Seqera Platform supports Fusion in Batch Forge and manual Azure Batch compute environments. + +See [Azure Batch](https://docs.seqera.io/platform/latest/compute-envs/azure-batch) for compute and storage recommendations and instructions to enable Fusion. + +### Nextflow CLI + +:::tip +We recommend selecting machine types with a local temp storage disk of at least 200 GB and a random read speed of 1000 MBps or more for large and long-lived production pipelines. The suffix `d` after the core number (e.g., `Standard_E16*d*_v5`) denotes a VM with a local temp disk. Select instances with Standard SSDs — Fusion does not support Azure network-attached storage (Premium SSDv2, Ultra Disk, etc.). Larger local storage increases Fusion's throughput and reduces the chance of overloading the machine. See [Sizes for virtual machines in Azure](https://learn.microsoft.com/en-us/azure/virtual-machines/sizes/overview) for more information. +::: + +1. Add the following to your `nextflow.conf` file: + + ```groovy + fusion.enabled = true + wave.enabled = true + process.executor = 'azure-batch' + ``` + +1. Run the pipeline with the usual run command: + + ``` + nextflow run -w az:///scratch + ``` + + Replace `` with your pipeline Git repository URI and `` with your Blob Storage container. \ No newline at end of file diff --git a/fusion_docs/guide/gcp-batch-object.mdx b/fusion_docs/guide/gcp-batch-object.mdx deleted file mode 100644 index 80ddc71c..00000000 --- a/fusion_docs/guide/gcp-batch-object.mdx +++ /dev/null @@ -1,35 +0,0 @@ -# Fusion with Google cloud Batch and Google object storage - -Fusion allows the use of Google Storage as a virtual distributed file system with [Google Cloud Batch](https://cloud.google.com/batch). - -The minimal Nextflow configuration looks like the following: - -```groovy -fusion.enabled = true -wave.enabled = true -process.scratch = false -process.executor = 'google-batch' -google.location = '' -``` - -In the above snippet replace `YOUR GOOGLE LOCATION` with the Google region of your choice e.g. `europe-west2`, -and save it to a file named `nextflow.config` into the pipeline launching directory. - -Then launch the pipeline execution with the usual run command: - -``` -nextflow run -w gs:///work -``` - -Make sure to specify a Google Storage bucket to which you have read-write access as work directory. - -Google credentials should be provided via the `GOOGLE_APPLICATION_CREDENTIALS` environment variable -or by using the `gcloud` auth application-default login command. You can find more details at in the -[Nextflow documentation](https://www.nextflow.io/docs/latest/google.html#credentials). - -:::note - - When Fusion is enabled, by default, only machine types that allow the mount of local SSD disks will be used. - If you specify your own machine type or machine series make sure they allow the use of local SSD disks, otherwise - the job scheduling will fail. -::: diff --git a/fusion_docs/guide/gcp-batch.mdx b/fusion_docs/guide/gcp-batch.mdx new file mode 100644 index 00000000..b4f3a5e9 --- /dev/null +++ b/fusion_docs/guide/gcp-batch.mdx @@ -0,0 +1,56 @@ +--- +title: Google Cloud Batch +description: "Use Fusion with Google Cloud Batch and Google Cloud Storage" +date: "23 Aug 2024" +tags: [fusion, storage, compute, gcp batch, gcs, google cloud] +--- + +Fusion simplifies and improves the efficiency of Nextflow pipelines in [Google Cloud Batch](https://cloud.google.com/batch) in several ways: + +- No need to use the gcloud CLI tool for copying data to and from Google Cloud storage. +- No need to create custom containers to include the gcloud CLI tool. +- Fusion uses an efficient data transfer and caching algorithm that provides much faster throughput compared to gcloud CLI and does not require a local copy of data files. +- Replacing the gcloud CLI with a native API client, the transfer is much more robust at scale. + +### Platform Google Cloud Batch compute environments + +Seqera Platform supports Fusion in Google Cloud Batch compute environments. + +See [Google Cloud Batch](https://docs.seqera.io/platform/latest/compute-envs/google-cloud-batch) for compute and storage recommendations and instructions to enable Fusion. + +### Nextflow CLI + +:::tip +When Fusion v2 is enabled, the following virtual machine settings are applied: + - A 375 GB local NVMe SSD is selected for all compute jobs. + - If you do not specify a machine type, a VM from the following families that support local SSDs will be selected: `n1-*`, `n2-*`, `n2d-*`, `c2-*`, `c2d-*`, `m3-*`. + - Any machine types you specify in the Nextflow config must support local SSDs. + - Local SSDs are only offered in multiples of 375 GB. You can increment the number of SSDs used per process with the `disk` directive to request multiples of 375 GB. + - Fusion v2 can also use persistent disks for caching. Override the disk requested by Fusion using the `disk` directive and the `type: pd-standard`. + - The `machineType` directive can be used to specify a VM instance type, family, or custom machine type in a comma-separated list of patterns. For example, `c2-*`, `n1-standard-1`, `custom-2-4`, `n*`, `m?-standard-*`. +::: + +1. Provide your Google credentials via the `GOOGLE_APPLICATION_CREDENTIALS` environment variable +or with the `gcloud` auth application-default login command. See [Credentials](https://www.nextflow.io/docs/latest/google.html#credentials) for more information. + +1. Add the following to your `nextflow.conf` file: + + ```groovy + fusion.enabled = true + wave.enabled = true + process.scratch = false + process.executor = 'google-batch' + google.location = '' + ``` + + Replace `` with the Google region of your choice, such as `europe-west2`. + +1. Run the pipeline with the usual run command: + + ``` + nextflow run -w gs:///work + ``` + + Replace `` with a Google Cloud Storage bucket to which you have read-write access. + + diff --git a/fusion_docs/guide/gcp-gke-object.mdx b/fusion_docs/guide/gcp-gke-object.mdx deleted file mode 100644 index a2950be6..00000000 --- a/fusion_docs/guide/gcp-gke-object.mdx +++ /dev/null @@ -1,90 +0,0 @@ -# Fusion with Google GKE and Google object storage - -Fusion streamlines the deployment of Nextflow pipeline in a Kubernetes cluster, because it replaces the need to configure -and maintain a shared file system in your cluster. - -:::note - - This feature requires Nextflow 23.02.1-edge or later. -::: - -## Cluster preparation - -1. Create a GKE "standard" cluster ("Autopilot" is not supported yet). See [Google documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-zonal-cluster) for details. -2. Make sure to use instance types with 2 or more CPUs and providing SSD instance storage (families: `n1`, `n2`, `c2`, `m1`, `m2`, `m3`) -3. Make sure to enable the [Workload identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) feature when creating (or updating) the cluster - - "Enable Workload Identity" in the cluster "Security" setting - - "Enable GKE Metadata Server" in the node group "Security" settings - - Configure the cluster following the See the [Google documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubectl) for details. documentation - - The following values were used in this example (replace them with values corresponding your environment): - - `CLUSTER_NAME`: the GKE cluster name e.g. `cluster-1` - - `COMPUTE_REGION`: the GKE cluster region e.g. `europe-west1` - - `NAMESPACE`: the GKE namespace e.g. `fusion-demo` - - `KSA_NAME`: the GKE service account name e.g. `fusion-sa` - - `GSA_NAME`: the Google service account e.g. `gsa-demo` - - `GSA_PROJECT`: the Google project id e.g. `my-nf-project-261815` - - `PROJECT_ID`: the Google project id e.g. `my-nf-project-261815` - - `ROLE_NAME`: the role to grant access permission to the Google Storage bucket e.g. `roles/storage.admin` -4. Create the K8s _role_ and _rolebinding_ required to run Nextflow applying the Kubernetes config shown below: - -```yaml ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: fusion-demo - name: fusion-role -rules: - - apiGroups: [""] - resources: ["pods", "pods/status", "pods/log", "pods/exec"] - verbs: ["get", "list", "watch", "create", "delete"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - namespace: fusion-demo - name: fusion-rolebind -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: fusion-role -subjects: - - kind: ServiceAccount - name: fusion-sa ---- -apiVersion: v1 -kind: Secret -metadata: - namespace: fusion-demo - name: fusion-sa-token - annotations: - kubernetes.io/service-account.name: fusion-sa -type: kubernetes.io/service-account-token -... -``` - -## Nextflow configuration - -The minimal Nextflow configuration looks like the following: - -``` -wave.enabled = true -fusion.enabled = true -process.executor = 'k8s' -process.scratch = false -k8s.context = '' -k8s.namespace = 'fusion-demo' -k8s.serviceAccount = 'fusion-sa' -k8s.pod.nodeSelector = 'iam.gke.io/gke-metadata-server-enabled=true' -``` - -In the above snippet replace `` with the name of the context in you Kubernetes configuration, -and save it to a file named `nextflow.config` into the pipeline launching directory. - -Then launch the pipeline execution with the usual run command: - -``` -nextflow run -w gs:///work -``` - -Make sure to specify a Google Storage bucket to which you have read-write access as work directory. diff --git a/fusion_docs/guide/gcp-gke.mdx b/fusion_docs/guide/gcp-gke.mdx new file mode 100644 index 00000000..bab32793 --- /dev/null +++ b/fusion_docs/guide/gcp-gke.mdx @@ -0,0 +1,103 @@ +--- +title: Google GKE +description: "Use Fusion with Google GKE and Google Cloud Storage" +date: "23 Aug 2024" +tags: [fusion, storage, compute, gke, gcs, google cloud, kubernetes] +--- + +Fusion streamlines the deployment of Nextflow pipelines in Kubernetes because it replaces the need to configure +and maintain a shared file system in your cluster. + +### Platform Google GKE compute environments + +Seqera Platform supports Fusion in Google GKE compute environments. + +See [Google GKE](https://docs.seqera.io/platform/latest/compute-envs/gke) for Platform instructions to enable Fusion. + +### Nextflow CLI + +:::note +This feature requires Nextflow 23.02.1-edge or later. +::: + +To use Fusion directly in Nextflow with a Google GKE cluster, you must configure a cluster, namespace, and service account, and update your Nextflow configuration. + +#### Kubernetes configuration + +1. Create a GKE "standard" cluster ("Autopilot" is not supported). See [Creating a zonal cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-zonal-cluster) for more information. +1. Use instance types with 2 or more CPUs and SSD storage (families: `n1`, `n2`, `c2`, `m1`, `m2`, `m3`). +1. Enable the [Workload identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) feature when creating (or updating) the cluster: + - **Enable Workload Identity** in the cluster **Security** settings. + - **Enable GKE Metadata Server** in the node group **Security** settings. +1. See [Authenticate to Google Cloud APIs from GKE workloads](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubectl) to configure the cluster. +1. Replace the following example values with values corresponding your environment: + - `CLUSTER_NAME`: the GKE cluster name — `cluster-1` + - `COMPUTE_REGION`: the GKE cluster region — `europe-west1` + - `NAMESPACE`: the GKE namespace — `fusion-demo` + - `KSA_NAME`: the GKE service account name — `fusion-sa` + - `GSA_NAME`: the Google service account — `gsa-demo` + - `GSA_PROJECT`: the Google project id — `my-nf-project-261815` + - `PROJECT_ID`: the Google project id — `my-nf-project-261815` + - `ROLE_NAME`: the role to grant access permissions to the Google Storage bucket — `roles/storage.admin` +1. Create the K8s _role_ and _rolebinding_ required to run Nextflow by applying the following Kubernetes config: + + ```yaml + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + namespace: fusion-demo + name: fusion-role + rules: + - apiGroups: [""] + resources: ["pods", "pods/status", "pods/log", "pods/exec"] + verbs: ["get", "list", "watch", "create", "delete"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + namespace: fusion-demo + name: fusion-rolebind + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: fusion-role + subjects: + - kind: ServiceAccount + name: fusion-sa + --- + apiVersion: v1 + kind: Secret + metadata: + namespace: fusion-demo + name: fusion-sa-token + annotations: + kubernetes.io/service-account.name: fusion-sa + type: kubernetes.io/service-account-token + ... + ``` + +#### Nextflow configuration + +1. Add the following to your `nextflow.conf` file: + + ``` + wave.enabled = true + fusion.enabled = true + process.executor = 'k8s' + process.scratch = false + k8s.context = '' + k8s.namespace = 'fusion-demo' + k8s.serviceAccount = 'fusion-sa' + k8s.pod.nodeSelector = 'iam.gke.io/gke-metadata-server-enabled=true' + ``` + + Replace `` with the context name in your Kubernetes configuration. + +1. Run the pipeline with the usual run command: + + ``` + nextflow run -w gs:///work + ``` + + Replace `` with a Google Cloud Storage bucket to which you have read-write access. diff --git a/fusion_docs/guide/local-s3.mdx b/fusion_docs/guide/local-s3.mdx deleted file mode 100644 index f40ca24c..00000000 --- a/fusion_docs/guide/local-s3.mdx +++ /dev/null @@ -1,31 +0,0 @@ -# Fusion using local execution and S3 object storage - -Fusion allow running Nextflow pipeline using the local executor and a S3 bucket as the pipeline scratch directory. This -can be useful to scale vertically your pipeline execution using a large EC2 instance and without requiring to allocate -a large EBS volume for the pipeline temporary data. - -The pipeline configuration looks like the following: - -```groovy -wave.enabled = true -docker.enabled = true -fusion.enabled = true -fusion.exportAwsAccessKeys = true -``` - -Save the above snippet to a file named `nextflow.config` into the pipeline launching directory. - -Then launch the pipeline execution with the usual run command: - -``` -nextflow run -w s3:///work -``` - -Replacing `YOUR PIPELINE SCRIPT` with the URI of your pipeline Git repository -and `YOUR-BUCKET` with a S3 bucket of your choice. - -:::tip - - To achieve best performance make sure to setup a SSD volumes as temporary directory. - See the section [SSD storage](#ssd-storage) for details. -::: diff --git a/fusion_docs/guide/local.mdx b/fusion_docs/guide/local.mdx new file mode 100644 index 00000000..05b9cf61 --- /dev/null +++ b/fusion_docs/guide/local.mdx @@ -0,0 +1,101 @@ +--- +title: Local execution +description: "Use Fusion with the Nextflow local executor and cloud storage" +date: "23 Aug 2024" +tags: [fusion, storage, compute, local, s3] +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +With Fusion, you can run Nextflow pipelines using the local executor and a cloud storage bucket as the pipeline scratch directory. This +is useful to scale your pipeline execution vertically with a large compute instance, without the need to allocate +a large storage volume for temporary pipeline data. + +:::note +This configuration requires the use of Docker (or a similar container engine) for the execution of your pipeline tasks. +::: + + + + + 1. Set `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables to grant Nextflow and Fusion access to your storage credentials. + + 1. Add the following to your `nextflow.conf` file: + + ```groovy + wave.enabled = true + docker.enabled = true + fusion.enabled = true + fusion.exportStorageCredentials = true + ``` + + 1. Run the pipeline with the usual run command: + + ``` + nextflow run -w s3:///work + ``` + + Replace `YOUR PIPELINE SCRIPT` with your pipeline Git repository URI + and `YOUR-BUCKET` with an S3 bucket of your choice. + + :::tip + To achieve optimal performance, set up an SSD volume as the temporary directory. + ::: + + + + + 1. Set `AZURE_STORAGE_ACCOUNT_NAME` and `AZURE_STORAGE_ACCOUNT_KEY` or `AZURE_STORAGE_SAS_TOKEN` environment variables to grant Nextflow and Fusion access to your storage credentials. + + 1. Add the following to your `nextflow.conf` file: + + ```groovy + wave.enabled = true + docker.enabled = true + fusion.enabled = true + fusion.exportStorageCredentials = true + ``` + + 1. Run the pipeline with the usual run command: + + ``` + nextflow run -w s3:///work + ``` + + Replace `YOUR PIPELINE SCRIPT` with your pipeline Git repository URI + and `YOUR-BUCKET` with an S3 bucket of your choice. + + :::tip + To achieve optimal performance, set up an SSD volume as the temporary directory. + ::: + + + + + 1. Set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable with your service account JSON key to grant Nextflow and Fusion access to your storage credentials. + + 1. Add the following to your `nextflow.conf` file: + + ```groovy + wave.enabled = true + docker.enabled = true + fusion.enabled = true + fusion.exportStorageCredentials = true + ``` + + 1. Run the pipeline with the usual run command: + + ``` + nextflow run -w s3:///work + ``` + + Replace `YOUR PIPELINE SCRIPT` with your pipeline Git repository URI + and `YOUR-BUCKET` with an S3 bucket of your choice. + + :::tip + To achieve optimal performance, set up an SSD volume as the temporary directory. + ::: + + + \ No newline at end of file diff --git a/fusion_docs/guide/ssd.mdx b/fusion_docs/guide/ssd.mdx deleted file mode 100644 index bbb2b0d4..00000000 --- a/fusion_docs/guide/ssd.mdx +++ /dev/null @@ -1,23 +0,0 @@ -# SSD storage - -Fusion file system implements a lazy download and upload algorithm that runs in the background to transfer files in -parallel to and from the object storage into the container-local temporary directory (`/tmp`). - -To achieve optimal performance it's recommended the use of SSD volume as temporary directory in the cluster computing -nodes and launch the Nextflow execution setting [scratch directive](https://www.nextflow.io/docs/latest/process.html#scratch) -to `false` to avoid stage-out transfer time - -Add the following snippet in your `nextflow.config` file to apply those settings: - -```groovy -process.scratch = false -process.containerOptions = '-v /path/to/ssd:/tmp' -``` - -In the above snipped replace the path `/path/to/ssd` with the real path where the SSD volume has been mounted. - -:::tip - - Several AWS EC2 instance types include one or more NVMe SSD volumes. Note, however those volumes - need to be formatted to be used. See the [Amazon documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html) for details. -::: diff --git a/fusion_docs/index.mdx b/fusion_docs/index.mdx index 232d316c..e603cc7e 100644 --- a/fusion_docs/index.mdx +++ b/fusion_docs/index.mdx @@ -1,48 +1,46 @@ --- title: Fusion file system +description: "Overview of the Fusion v2 file system" +date: "23 Aug 2024" +tags: [fusion, storage, compute, file system, posix, client] --- -# Fusion file system - -Cloud object stores such as AWS S3 are scalable and cost-effective, but they don't present a POSIX interface. -This means containerized applications must copy data to and from S3 for every task — a slow and inefficient process. +Cloud object stores such as AWS S3 are scalable and cost-effective, but they don't present a [POSIX](https://en.wikipedia.org/wiki/POSIX) (Portable Operating System Interface). This means containerized applications must copy data to and from cloud storage for every task — a slow and inefficient process. Fusion is a virtual, lightweight, distributed file system that bridges the gap between pipelines and cloud-native -storage. Fusion enables seamless filesystem I/O to cloud object stores via a standard POSIX interface resulting in +storage. Fusion enables seamless filesystem I/O to cloud object stores via a standard POSIX interface, resulting in simpler pipeline logic and faster, more efficient pipeline execution. :::note -Fusion requires a license for use beyond limited testing and validation within Seqera Platform or directly within Nextflow. [Contact Seqera](https://seqera.io/contact-us/) for more details. +Fusion requires a license for use in Seqera Platform compute environments or directly in Nextflow. Fusion can be trialed at no cost. [Contact Seqera](https://seqera.io/contact-us/) for more details. ::: -## Features - ### Transparent, automated installation -Traditionally, pipeline developers needed to bundle utilities in containers to copy data in and out of S3 storage. +Traditionally, pipeline developers needed to bundle utilities in containers to copy data in and out of cloud object storage. -With Fusion, there is nothing to install or manage.The Fusion thin client is automatically installed using Wave's container augmentation facilities, enabling containerized applications to read and write to S3 buckets as if they were local storage. +With Fusion, there is nothing to install or manage. The Fusion thin client is automatically installed using [Wave](https://docs.seqera.io/wave)'s container augmentation facilities, enabling containerized applications to read from and write to cloud storage buckets as if they were local storage. ### No shared file system required To share data among pipeline tasks, organizations often turn to shared file systems such as Amazon EFS, Amazon FSx for Lustre, or NFS. -Fusion avoids the need to deploy, manage, and mount shared file systems on every cloud instance by providing the same functionality over S3 – significantly reducing cost and complexity. +Fusion removes the need to deploy, manage, and mount shared file systems on every cloud instance by providing the same functionality directly over cloud object storage – significantly reducing cost and complexity. ### Maximize pipeline performance and efficiency -Copying data to and from S3 adds latency for every task, lengthening the time containers and cloud instances are deployed. This translates into longer runtimes and significantly higher costs for pipelines with thousands of tasks. +Copying data to and from cloud storage adds latency for every task, increasing the time containers and cloud instances are deployed. This translates into longer runtimes and significantly higher costs for pipelines with thousands of tasks. Fusion eliminates these bottlenecks and delays, reducing execution time and cloud spending and using compute instances more efficiently. ### Dramatically reduce data movement -When pipelines run with S3 storage, tasks typically read data from a bucket, copy it to EBS storage for processing, and copy results back to S3. +When pipelines run with cloud storage, tasks typically read data from a bucket, copy it to compute instance storage for processing, and copy the results back to the cloud storage bucket. -The result is significant overhead for every task. Fusion enables direct file access to S3 storage, eliminating unnecessary I/O and dramatically reducing data movement and overall runtime. +The result is significant overhead for every task. Fusion enables direct file access to cloud object storage, eliminating unnecessary I/O and dramatically reducing data movement and overall runtime. ### Seamless access to cloud object storage -While some open-source projects provide a POSIX interface over S3 storage, they require developers to install and configure additional software and package it in containers or VMs. +While some open-source projects provide a POSIX interface over cloud storage, they require developers to install and configure additional software and add packages to containers or VMs. -Unlike third-party solutions, Fusion is optimized for Nextflow and handles these tasks automatically, delivering fast, seamless access to cloud object storage. +Unlike third-party solutions, Fusion is optimized for Nextflow and handles these tasks automatically. Fusion delivers fast, seamless access to cloud object storage. \ No newline at end of file diff --git a/fusion_docs/sidebar.json b/fusion_docs/sidebar.json index 06638c45..90ff5106 100644 --- a/fusion_docs/sidebar.json +++ b/fusion_docs/sidebar.json @@ -1,21 +1,18 @@ { "sidebar": [ "index", + "get-started", { "type": "category", "label": "User guide", "collapsed": false, - "link": { - "type": "doc", - "id": "guide" - }, "items": [ - "guide/local-s3", - "guide/aws-batch-s3", - "guide/aws-eks-s3", - "guide/gcp-batch-object", - "guide/gcp-gke-object", - "guide/ssd" + "guide/aws-batch", + "guide/gcp-batch", + "guide/azure-batch", + "guide/aws-eks", + "guide/gcp-gke", + "guide/local" ] }, "troubleshooting", diff --git a/fusion_docs/troubleshooting.md b/fusion_docs/troubleshooting.mdx similarity index 55% rename from fusion_docs/troubleshooting.md rename to fusion_docs/troubleshooting.mdx index 1657f99f..e20c712f 100644 --- a/fusion_docs/troubleshooting.md +++ b/fusion_docs/troubleshooting.mdx @@ -1,10 +1,10 @@ --- -title: Troubleshooting Fusion issues +title: Troubleshooting --- ## Too many open files -If you're experiencing an error about too many open files, increase the `ulimit` for the container. Append the following configuration to your Nextflow configuration: +If you're experiencing an error about too many open files, increase the `ulimit` for the container. Append the following to your Nextflow configuration: ```groovy process.containerOptions = '--ulimit nofile=1048576:1048576' diff --git a/lychee.toml b/lychee.toml index e642e9a1..70a9640e 100644 --- a/lychee.toml +++ b/lychee.toml @@ -7,5 +7,6 @@ exclude = [ '^https://github.com/organizations/', '^https://github.com/pipeline-repo', '^https://api.tower.nf/ephemeral/example.json', - '^https://api.cloud.seqera.io/workflow/launch' + '^https://api.cloud.seqera.io/workflow/launch', + '^https://git.seqera.io' ] diff --git a/multiqc_docs/multiqc_repo b/multiqc_docs/multiqc_repo index db2d3539..b55f70fc 160000 --- a/multiqc_docs/multiqc_repo +++ b/multiqc_docs/multiqc_repo @@ -1 +1 @@ -Subproject commit db2d3539220b4cbbe609170799142e21550206cd +Subproject commit b55f70fc5898c94e71bb7c51f442481860c550cd diff --git a/package-lock.json b/package-lock.json index 94bb9852..f29890b2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,10 +8,10 @@ "name": "seqera-docs", "version": "0.0.0", "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/plugin-google-gtag": "3.5.2", - "@docusaurus/plugin-google-tag-manager": "3.5.2", - "@docusaurus/preset-classic": "3.5.2", + "@docusaurus/core": "^3.5.2", + "@docusaurus/plugin-google-gtag": "^3.5.2", + "@docusaurus/plugin-google-tag-manager": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", "@mdx-js/react": "^3.0.0", "clsx": "^1.2.1", "docusaurus-remark-plugin-tab-blocks": "^3.1.0", @@ -29,8 +29,8 @@ "remark-yaml-to-table": "github:seqeralabs/remark-yaml-to-table" }, "devDependencies": { - "@docusaurus/eslint-plugin": "^3.1.0", - "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/eslint-plugin": "^3.5.2", + "@docusaurus/module-type-aliases": "^3.5.2", "@tsconfig/docusaurus": "^1.0.5", "autoprefixer": "^10.4.16", "dotenv": "^16.3.1", diff --git a/package.json b/package.json index d054979d..94ac5684 100644 --- a/package.json +++ b/package.json @@ -19,10 +19,10 @@ "markdownlint": "markdownlint-cli2 'platform_versioned_docs/**/*.mdx' 'wave_docs/**/*.mdx' 'fusion_docs/**/*.mdx' --config .markdownlint-cli2.cjs" }, "dependencies": { - "@docusaurus/core": "3.5.2", - "@docusaurus/plugin-google-gtag": "3.5.2", - "@docusaurus/plugin-google-tag-manager": "3.5.2", - "@docusaurus/preset-classic": "3.5.2", + "@docusaurus/core": "^3.5.2", + "@docusaurus/plugin-google-gtag": "^3.5.2", + "@docusaurus/plugin-google-tag-manager": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", "@mdx-js/react": "^3.0.0", "clsx": "^1.2.1", "docusaurus-remark-plugin-tab-blocks": "^3.1.0", @@ -40,8 +40,8 @@ "remark-yaml-to-table": "github:seqeralabs/remark-yaml-to-table" }, "devDependencies": { - "@docusaurus/eslint-plugin": "^3.1.0", - "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/eslint-plugin": "^3.5.2", + "@docusaurus/module-type-aliases": "^3.5.2", "@tsconfig/docusaurus": "^1.0.5", "autoprefixer": "^10.4.16", "dotenv": "^16.3.1", diff --git a/platform_versioned_docs/version-22.4/datasets/overview.mdx b/platform_versioned_docs/version-22.4/datasets/overview.mdx index e211ae72..133c6993 100644 --- a/platform_versioned_docs/version-22.4/datasets/overview.mdx +++ b/platform_versioned_docs/version-22.4/datasets/overview.mdx @@ -50,7 +50,7 @@ The size of the dataset file cannot exceed 10MB. Datasets in Tower can accommodate multiple versions of a dataset. To add a new version for an existing dataset, follow these steps: 1. Select **Edit** next to the dataset you wish to update. -2. In the Edit dialog, select **Add a new version**. +2. Select **Add a new version**. 3. Upload the newer version of the dataset and select **Update**. :::caution @@ -62,7 +62,7 @@ All subsequent versions of a dataset must be in the same format (.csv or .tsv) a To use a dataset with the saved pipelines in your workspace, follow these steps: 1. Open any pipeline that contains a pipeline-schema from the Launchpad. -2. Select the input field for the pipeline, removing any default value. +2. Select the input field for the pipeline, removing any default values. 3. Pick the dataset to use as input to your pipeline. :::note diff --git a/platform_versioned_docs/version-23.1/datasets/overview.mdx b/platform_versioned_docs/version-23.1/datasets/overview.mdx index 279f55fe..f9d17af6 100644 --- a/platform_versioned_docs/version-23.1/datasets/overview.mdx +++ b/platform_versioned_docs/version-23.1/datasets/overview.mdx @@ -63,7 +63,7 @@ The size of the dataset file cannot exceed 10MB. Tower can accommodate multiple versions of a dataset. To add a new version for an existing dataset, follow these steps: 1. Select **Edit** next to the dataset you wish to update. -2. In the Edit dialog, select **Add a new version**. +2. Select **Add a new version**. 3. Upload the newer version of the dataset and select **Update**. :::caution @@ -75,7 +75,7 @@ All subsequent versions of a dataset must be the same format (.csv or .tsv) as t To use a dataset with the saved pipelines in your workspace, follow these steps: 1. Open any pipeline that contains a pipeline-schema from the Launchpad. -2. Select the input field for the pipeline, removing any default value. +2. Select the input field for the pipeline, removing any default values. 3. Pick the dataset to use as input to your pipeline. :::note diff --git a/platform_versioned_docs/version-23.2/datasets/overview.mdx b/platform_versioned_docs/version-23.2/datasets/overview.mdx index 279f55fe..f9d17af6 100644 --- a/platform_versioned_docs/version-23.2/datasets/overview.mdx +++ b/platform_versioned_docs/version-23.2/datasets/overview.mdx @@ -63,7 +63,7 @@ The size of the dataset file cannot exceed 10MB. Tower can accommodate multiple versions of a dataset. To add a new version for an existing dataset, follow these steps: 1. Select **Edit** next to the dataset you wish to update. -2. In the Edit dialog, select **Add a new version**. +2. Select **Add a new version**. 3. Upload the newer version of the dataset and select **Update**. :::caution @@ -75,7 +75,7 @@ All subsequent versions of a dataset must be the same format (.csv or .tsv) as t To use a dataset with the saved pipelines in your workspace, follow these steps: 1. Open any pipeline that contains a pipeline-schema from the Launchpad. -2. Select the input field for the pipeline, removing any default value. +2. Select the input field for the pipeline, removing any default values. 3. Pick the dataset to use as input to your pipeline. :::note diff --git a/platform_versioned_docs/version-23.3/data/datasets.mdx b/platform_versioned_docs/version-23.3/data/datasets.mdx index be0278d7..6dbdfca3 100644 --- a/platform_versioned_docs/version-23.3/data/datasets.mdx +++ b/platform_versioned_docs/version-23.3/data/datasets.mdx @@ -62,7 +62,7 @@ The size of the dataset file must not exceed 10 MB. Seqera can accommodate multiple versions of a dataset. To add a new version for an existing dataset, follow these steps: 1. Select **Edit** next to the dataset you wish to update. -2. In the Edit dialog, select **Add a new version**. +2. Select **Add a new version**. 3. Upload the newer version of the dataset and select **Update**. :::caution @@ -74,7 +74,7 @@ All subsequent versions of a dataset must be the same format (CSV or TSV) as the To use a dataset with the saved pipelines in your workspace: 1. Open any pipeline that contains a pipeline schema from the [Launchpad](../launch/launchpad.mdx). -2. Select the input field for the pipeline, removing any default value. +2. Select the input field for the pipeline, removing any default values. 3. Pick the dataset to use as input to your pipeline. :::note diff --git a/platform_versioned_docs/version-23.3/enterprise/configuration/pipeline_optimization.mdx b/platform_versioned_docs/version-23.3/enterprise/configuration/pipeline_optimization.mdx index 578f165c..cc51e09d 100644 --- a/platform_versioned_docs/version-23.3/enterprise/configuration/pipeline_optimization.mdx +++ b/platform_versioned_docs/version-23.3/enterprise/configuration/pipeline_optimization.mdx @@ -17,15 +17,15 @@ Docker Compose makes use of a separate container to set up the pipeline resource To use the pipeline resource optimization service in a new Docker Compose installation of Seqera Enterprise, use the following steps: -1. To run the service from a custom URL, declare the URL with the `GROUNDSWELL_SERVER_URL` environment variable in the `tower.env` file. A non-zero value for this environment variable activates the optimization service automatically, so the `TOWER_ENABLE_GROUNDSWELL` variable does not need to be set when you declare a custom URL. +1. To run the service from a custom URL, declare the URL with the `GROUNDSWELL_SERVER_URL` environment variable in `tower.env`. A non-zero value for this environment variable activates the optimization service automatically, so `TOWER_ENABLE_GROUNDSWELL` does not need to be set when you declare a custom URL. -2. Set the `TOWER_ENABLE_GROUNDSWELL` environment variable in the `tower.env` file to `true`. This enables the service at the default service URL of `http://groundswell:8090`. +2. Set the `TOWER_ENABLE_GROUNDSWELL` environment variable in `tower.env` to `true`. This enables the service at the default service URL `http://groundswell:8090`. 3. In your [docker-compose.yml](../_templates/docker/docker-compose.yml) file, uncomment the `groundswell` section at the bottom. - To create a schema for the optimization service on the same local MySQL container, uncomment the `init.sql` script in the `volumes` section. -4. Download the [init.sql](../_templates/docker/init.sql) file. Store this file in the mount path of your `docker-compose.yml` file or update the `source: ./init.sql` line in your `docker-compose.yml` with the file path. +4. Download the [init.sql](../_templates/docker/init.sql) file. Store this file in the mount path of your `docker-compose.yml` file, else update the `source: ./init.sql` line in your `docker-compose.yml` with the file path. 5. When the pipeline resource optimization service is active, pipelines that can be optimized display a lightbulb icon in your Launchpad. Any pipeline with at least one successful run can be optimized. @@ -33,7 +33,7 @@ To use the pipeline resource optimization service in a new Docker Compose instal To use the pipeline resource optimization service in an existing Docker Compose installation of Seqera Enterprise, use the following steps: -1. To run the service from a custom URL, declare the URL with the `GROUNDSWELL_SERVER_URL` environment variable. A non-zero value for this environment variable activates the optimization service automatically, so the `TOWER_ENABLE_GROUNDSWELL` variable does not need to be set when you declare a custom URL. +1. To run the service from a custom URL, declare the URL with the `GROUNDSWELL_SERVER_URL` environment variable. A non-zero value for this environment variable activates the optimization service automatically, so `TOWER_ENABLE_GROUNDSWELL` does not need to be set when you declare a custom URL. 2. Set the `TOWER_ENABLE_GROUNDSWELL` environment variable to `true`. This enables the service at the default service URL `http://groundswell:8090`. @@ -53,11 +53,11 @@ To use the pipeline resource optimization service in an existing Docker Compose ```sql CREATE DATABASE IF NOT EXISTS `swell`; CREATE USER 'swell'@'%' IDENTIFIED BY 'swell'; - GRANT ALL PRIVILEGES ON `%`.* TO 'swell'@'%'; + GRANT ALL PRIVILEGES ON `%`.* TO 'swell'@'%'; FLUSH PRIVILEGES; ``` -6. Download the [groundswell.env](../_templates/docker/groundswell.env) file. Store this file in the mount path of your `docker-compose.yml` file. Update the `TOWER_DB_URL` and `SWELL_DB_URL` environment variable values: +6. Download the [groundswell.env](../_templates/docker/groundswell.env) file. Store this file in the mount path of your `docker-compose.yml` file. Update the `TOWER_DB_URL` and `SWELL_DB_URL` values: ```env # Uncomment for container DB instances @@ -75,28 +75,38 @@ To use the pipeline resource optimization service in an existing Docker Compose Kubernetes deployments use an [initContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) that runs during pod initialization to set up the pipeline resource optimization service. To use the service in new or existing Kubernetes installations of Seqera Enterprise, do the following: -1. Download the [groundswell manifest](../_templates/k8s/groundswell.yml). +1. Download the [groundswell manifest](../_templates/k8s/groundswell.yml): -2. Define a set of credentials for the optimization database. This can be the same database used for Seqera, but in a different schema. + ```yaml file=../_templates/k8s/groundswell.yml + ``` -3. Log in to your database server and run the following commands: +1. To run the service from a custom URL, declare the URL with the `GROUNDSWELL_SERVER_URL` environment variable in the `configmap.yml` file that you downloaded for your [Platform installation][platform-k8s]. A non-zero value for this environment variable activates the optimization service automatically, so `TOWER_ENABLE_GROUNDSWELL` does not need to be set when you declare a custom URL. - ```sql - CREATE DATABASE IF NOT EXISTS `swell`; - CREATE USER 'swell'@'%' IDENTIFIED BY 'swell'; - GRANT ALL PRIVILEGES ON *.* TO 'swell'@'%'; - FLUSH PRIVILEGES; - ``` +1. Define a set of credentials for the optimization database. This can be the same database used for Seqera, but in a different schema. -4. If you use Amazon RDS or other managed database services, run the following commands in your database instance: +1. Log in to your database server and run the following commands: - ```sql - CREATE DATABASE IF NOT EXISTS `swell`; - CREATE USER 'swell'@'%' IDENTIFIED BY 'swell'; - GRANT ALL PRIVILEGES ON `%`.* TO 'swell'@'%'; - FLUSH PRIVILEGES; - ``` + - If you use Amazon RDS or other managed database services, run the following commands in your database instance: + + ```sql + CREATE DATABASE IF NOT EXISTS `swell`; + CREATE USER 'swell'@'%' IDENTIFIED BY 'swell'; + GRANT ALL PRIVILEGES ON `%`.* TO 'swell'@'%'; + FLUSH PRIVILEGES; + ``` + + - If you do not use a managed database service, run the following commands in your database instance: + + ```sql + CREATE DATABASE IF NOT EXISTS `swell`; + CREATE USER 'swell'@'%' IDENTIFIED BY 'swell'; + GRANT ALL PRIVILEGES ON *.* TO 'swell'@'%'; + FLUSH PRIVILEGES; + ``` + +The initContainers process will wait until both the Seqera and pipeline resource optimization service databases are ready before starting the migration in the Seqera database and finally starting the optimization container. + +When the pipeline resource optimization service is active, pipelines that can be optimized display a lightbulb icon in your Launchpad. Any pipeline with at least one successful run can be optimized. -5. The initContainers process will wait until both the Seqera and pipeline resource optimization service databases are ready before starting the migration in the Seqera database and finally starting the optimization container. -6. When the pipeline resource optimization service is active, pipelines that can be optimized display a lightbulb icon in your Launchpad. Any pipeline with at least one successful run can be optimized. +[platform-k8s]: ../kubernetes.mdx diff --git a/platform_versioned_docs/version-23.3/monitoring/dashboard.mdx b/platform_versioned_docs/version-23.3/monitoring/dashboard.mdx index b1a74efb..e20ccffa 100644 --- a/platform_versioned_docs/version-23.3/monitoring/dashboard.mdx +++ b/platform_versioned_docs/version-23.3/monitoring/dashboard.mdx @@ -5,25 +5,25 @@ date: "21 Apr 2023" tags: [dashboard, runs, monitoring] --- -The Seqera Platform **Dashboard**, accessed from the user menu, provides an overview of runs in your personal and organization workspaces. +The Seqera Platform **Dashboard**, accessed from the user menu, provides an overview of pipeline runs in your personal and organization workspaces. ### Filters and summary The **Dashboard** view defaults to all organizations and workspaces you can access. Select the **View** dropdown menu to filter by specific organizations and workspaces, or to view statistics for your personal workspace only. -You can filter by time, including a custom date range of up to 12 months. To filter the set of runs, select **Filter**. When a filter is applied, the button icon and color changes. +You can filter by time, including a custom date range of up to 12 months. To filter the set of pipeline runs, select **Filter**. When a filter is applied, the button icon and color changes. ### Export data Select **Export data** in the filter panel near the top of the page to export dashboard data, based on the filters you have applied, in a CSV file. -### Runs per organization +### Pipeline runs per organization -Run totals for your selected filters are displayed for each organization that you have access to. +Pipeline run totals for your selected filters are displayed for each organization that you have access to. -Depending on the filter selected, each card details a separate workspace or organization. Total runs for each organization are arranged by workspace and status. +Depending on the filter selected, each card details a separate workspace or organization. Total pipeline runs for each organization are arranged by workspace and status. For a detailed view of runs, you can do one of the following: -- Select a run integer value in the table to navigate to a run list filtered by the status and time range selected. -- Select a workspace name in the table to navigate to a run list filtered by the workspace selected. +- Select a pipeline run integer value in the table to navigate to a list filtered by the status and time range selected. +- Select a workspace name in the table to navigate to a list filtered by the workspace selected. diff --git a/platform_versioned_docs/version-23.4/api/_images/api_example_call.png b/platform_versioned_docs/version-23.4/api/_images/api_example_call.png deleted file mode 100644 index 0633e4ea..00000000 Binary files a/platform_versioned_docs/version-23.4/api/_images/api_example_call.png and /dev/null differ diff --git a/platform_versioned_docs/version-23.4/api/overview.mdx b/platform_versioned_docs/version-23.4/api/overview.mdx index a603eaab..7df326d6 100644 --- a/platform_versioned_docs/version-23.4/api/overview.mdx +++ b/platform_versioned_docs/version-23.4/api/overview.mdx @@ -5,42 +5,59 @@ date: "15 Mar 2024" tags: [api] --- -The Seqera Platform public API provides endpoints to manage Nextflow workflows programmatically, allowing organizations to incorporate Seqera seamlessly into their existing processes. +import MDXComponents from "@theme-original/MDXComponents"; +import Button from "@site/src/components/Button"; -:::note -As of version 23.4, the Seqera API is live on `https://api.cloud.seqera.io`. The legacy API `https://api.tower.nf` remains fully operational, so existing API integrations will continue to perform as expected. Deprecation of the legacy API will be communicated well in advance to avoid any breaking changes to your integrations. +The Seqera Platform services API is a programmatic interface for all operations available in the Platform web UI. This allows organizations to integrate Platform seamlessly into existing processes. + +### API reference + +The Platform API reference is an interactive list of all API endpoints and includes request and response payload examples to test requests in the browser. + +:::info +The API requires authentication with a Platform access token to perform requests. + + - ); -}; - -export default Button; + ); +} diff --git a/src/components/Button/styles.module.css b/src/components/Button/styles.module.css index 29f2e49f..349c7949 100644 --- a/src/components/Button/styles.module.css +++ b/src/components/Button/styles.module.css @@ -7,7 +7,7 @@ border-radius: 99px; transition: all 0.2s; line-height: 1; - white-space: nowrap; + white-space: normal; } .button:hover { text-decoration: none; diff --git a/src/components/Card/styles.module.css b/src/components/Card/styles.module.css index 337076c5..3d0f3465 100644 --- a/src/components/Card/styles.module.css +++ b/src/components/Card/styles.module.css @@ -26,7 +26,7 @@ box-shadow: 0 0 15px rgba(226, 134, 80); } .card.platform:hover { - box-shadow: 0 0 15px rgba(255, 255, 255, 0.5); + box-shadow: 0 0 15px #110C1C; } .card.platform svg { height: 25px; @@ -41,4 +41,7 @@ html[data-theme="dark"] { fill: var(--color-brand-200) !important; } } + & .card.platform:hover { + box-shadow: 0 0 15px rgba(255, 255, 255, 0.5); + } } diff --git a/src/css/main.css b/src/css/main.css index b3b0b9c4..58b102e7 100644 --- a/src/css/main.css +++ b/src/css/main.css @@ -85,3 +85,69 @@ svg.excalidraw { svg.excalidraw path[fill="#fff"] { @apply fill-transparent; } + +/* Hide the top navbar on desktop */ +@media (min-width: 996px) { + .navbar { + height: 0; + width: 0; + display: none !important; + } +} + + +/* Custom styling for the Algolia search box */ +[data-theme='light'] .DocSearch { + --docsearch-searchbox-background: var(--ifm-color-white); +} +[data-theme='dark'] .DocSearch { + --docsearch-searchbox-background: var(--ifm-color-black); +} +.DocSearch-Button { + width: 100%; +} +[data-theme='light'] .DocSearch-Button { + box-shadow: inset 0 0 0 1px #D0CFD4; +} +[data-theme='dark'] .DocSearch-Button { + box-shadow: inset 0 0 0 1px var(--color-brand-800); +} + +/* Custom styling for sidebar nav links */ +.theme-doc-sidebar-container > div { + top: 74px; +} +nav.menu .menu__list .menu__link { + border-radius: 5px; + font-weight: 400; + font-size: 0.9rem; +} +nav.menu .menu__list .menu__link--active:not(.menu__link--sublist) { + font-weight: 700; +} +nav.menu .menu__list .menu__link--sublist-caret:after { + background: url('data:image/svg+xml;utf8,') + 50% / 24px 24px; + transform: rotate(0deg); +} +nav.menu .menu__list-item--collapsed .menu__link--sublist:after, .menu__list-item--collapsed .menu__caret:before { + transform: rotate(-90deg); +} +[data-theme='light'] nav.menu .menu__list { + & .menu__link { + color: #000; + } + & .menu__link--active:not(.menu__link--sublist) { + background: #E8EBFC; + color: #4256E7; + } +} +[data-theme='dark'] nav.menu .menu__list { + & .menu__link { + color: rgba(255, 255, 255, 0.9); + } + & .menu__link--active:not(.menu__link--sublist) { + background: rgba(0, 0, 0, 0.2); + color: #98a3ff; + } +} diff --git a/src/css/misc.css b/src/css/misc.css index 6c4a7eef..c46fba54 100644 --- a/src/css/misc.css +++ b/src/css/misc.css @@ -13,80 +13,6 @@ html:not(.plugin-id-platform) .navbar .dropdown { font-size: 0.9em; } -/** - * Product theming - */ - -/* Platform */ -html.plugin-id-platform { - & nav.navbar .navbar__items { - & a[href="/platform"]:after { - background: var(--color-product-600); - } - } - & nav.menu .menu__list { - & .menu__link--active { - background: var(--color-product-600); - } - } -} - -/* Nextflow */ -html.plugin-id-nextflow { - & nav.navbar .navbar__items { - & a[href="/nextflow"]:after { - background: var(--color-nextflow); - } - } - & nav.menu .menu__list { - & .menu__link--active { - background: var(--color-nextflow); - } - } -} - -/* MultiQC */ -html.plugin-id-multiqc { - & nav.navbar .navbar__items { - & a[href="/multiqc"]:after { - background: var(--color-multiqc); - } - } - & nav.menu .menu__list { - & .menu__link--active { - background: var(--color-multiqc); - } - } -} - -/* Wave */ -html.plugin-id-wave { - & nav.navbar .navbar__items { - & a[href="/wave"]:after { - background: var(--color-wave); - } - } - & nav.menu .menu__list { - & .menu__link--active { - background: var(--color-wave); - } - } -} - -/* Fusion */ -html.plugin-id-fusion { - & nav.navbar .navbar__items { - & a[href="/fusion"]:after { - background: var(--color-fusion); - } - } - & nav.menu .menu__list { - & .menu__link--active { - background: var(--color-fusion); - } - } -} - ol li > p:first-child { margin-top: 0 !important; } diff --git a/src/css/theme-colors.css b/src/css/theme-colors.css index 8aecb416..66da09e7 100644 --- a/src/css/theme-colors.css +++ b/src/css/theme-colors.css @@ -151,18 +151,6 @@ html[data-theme="dark"] .navbar__items--right button:not(.DocSearch):hover { background-color: var(--color-wave); } -/* Sidebar */ -html[data-theme="dark"] - .menu__link.menu__link--sublist.menu__link--sublist-caret.menu__link--active { - color: var(--color-wave); -} -html[data-theme="dark"] .menu__link--sublist-caret:after { - background-image: url("data:image/svg+xml,%3Csvg width='25' height='24' viewBox='0 0 25 24' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.4943 10.4038L7.7174 15.1615L7.16162 14.6057L12.4943 9.29225L17.8078 14.6057L17.252 15.1615L12.4943 10.4038Z' fill='%23160F26'/%3E%3C/svg%3E%0A"); -} -html[data-theme="dark"] .menu__list-item-collapsible > a.menu__link--active { - color: var(--color-wave) !important; -} - /* Footer */ html[data-theme="dark"] .footer--dark { border-top: 1px solid var(--ifm-toc-border-color); diff --git a/src/modules/Homepage/index.tsx b/src/modules/Homepage/index.tsx index 85b1b939..cbf47eae 100644 --- a/src/modules/Homepage/index.tsx +++ b/src/modules/Homepage/index.tsx @@ -2,6 +2,9 @@ import React from "react"; import Layout from "@theme/Layout"; import clsx from "clsx"; +import useMediaQuery from "../../theme/Navbar/Layout/SeqeraHeader/hooks/useMediaQuery"; +import Sidebar from "../../theme/DocSidebar/Desktop"; + import styles from "./styles.module.css"; import Fusion from "./images/fusion.inline.svg"; @@ -16,80 +19,97 @@ import Grid from "../../components/Grid"; import Resources from "./Resources"; export default function Home(): JSX.Element { + const isMobile = useMediaQuery("(max-width: 996px)"); + const disabled = true; return ( -
-
-
-
-
-

Getting started with Seqera

-

- Welcome to your central resource for analysis development with - Seqera. -

-

- Here you will learn how to compose data analysis pipelines, - optimize resource utilization, safeguard reproducibility, and - ensure data integrity. -

-

- You can also learn how to establish your own centralized hub - for managing & executing pipelines, and how to leverage the - cloud to scale for your data analysis requirements. -

-

Read the docs, take part, and join the community today!

-
-
+
+ {!isMobile && !disabled && ( +
+
-
- -
- - Fully integrated and scalable tools for modern bioinformatics - -
-
- - Open-source orchestrator for deploying workflows - + )} +
+
+
+
+
+
+

Getting started with Seqera

+

+ Welcome to your central resource for analysis development + with Seqera. +

+

+ Here you will learn how to compose data analysis + pipelines, optimize resource utilization, safeguard + reproducibility, and ensure data integrity. +

+

+ You can also learn how to establish your own centralized + hub for managing & executing pipelines, and how to + leverage the cloud to scale for your data analysis + requirements. +

+

+ Read the docs, take part, and join the community today! +

+
+
-
- - Open-source tool to aggregate bioinformatics analysis results - +
+ +
+ + Fully integrated and scalable tools for modern + bioinformatics + +
+
+ + Open-source orchestrator for deploying workflows + +
+
+ + Open-source tool to aggregate bioinformatics analysis + results + +
+
+ + Next-generation container provisioning for data analysis + +
+
+ + Distributed, lightweight file system for cloud data + pipelines + +
+
-
- - Next-generation container provisioning for data analysis - +
+
-
- - Distributed, lightweight file system for cloud data pipelines - -
- -
-
- +
-
+
); diff --git a/src/modules/Homepage/styles.module.css b/src/modules/Homepage/styles.module.css index 6a05f002..c783ab81 100644 --- a/src/modules/Homepage/styles.module.css +++ b/src/modules/Homepage/styles.module.css @@ -12,3 +12,11 @@ html[data-theme="light"] { background-image: url("./images/home-1a-light.jpg"); } } + +.sidebarProductLogos a { + @apply flex justify-between items-center my-4 border-solid border border-gray-500 rounded px-4 py-3 bg-white hover:bg-[#e8ebfc] w-full leading-none; +} +.sidebarContainer { + flex: none; + background: rgba(0, 0, 0, 0.02); +} diff --git a/src/theme/DocBreadcrumbs/Items/Home/index.js b/src/theme/DocBreadcrumbs/Items/Home/index.js new file mode 100644 index 00000000..c6703f83 --- /dev/null +++ b/src/theme/DocBreadcrumbs/Items/Home/index.js @@ -0,0 +1,30 @@ +import React from 'react'; +import { useLocation } from '@docusaurus/router'; +import Home from '@theme-original/DocBreadcrumbs/Items/Home'; + +export default function HomeWrapper(props) { + + const location = useLocation(); + + const getCurrentProduct = () => { + if (location.pathname.startsWith("/fusion")) return "Fusion"; + if (location.pathname.startsWith("/nextflow")) return "Nextflow"; + if (location.pathname.startsWith("/multiqc")) return "MultiQC"; + if (location.pathname.startsWith("/platform")) return "Platform"; + if (location.pathname.startsWith("/wave")) return "Wave"; + return null; + }; + + const currentProduct = getCurrentProduct(); + + return ( + <> + +
  • + + {currentProduct} + +
  • + + ); +} diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/LinkOut.inline.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/LinkOut.inline.svg new file mode 100644 index 00000000..8daf08de --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/LinkOut.inline.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/ProductLogo/index.jsx b/src/theme/DocSidebar/Desktop/ProductSwitcher/ProductLogo/index.jsx new file mode 100644 index 00000000..db2049d6 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/ProductLogo/index.jsx @@ -0,0 +1,71 @@ +import React from "react"; +import clsx from "clsx"; +import { useLocation } from "@docusaurus/router"; +import styles from "./styles.module.css"; + +import Fusion from "../images/fusion.svg"; +import Nextflow from "../images/nextflow.svg"; +import MultiQC from "../images/multiqc.svg"; +import Platform from "../images/platform.svg"; +import Wave from "../images/wave.svg"; + +import FusionDark from "../images/fusion.dark.svg"; +import NextflowDark from "../images/nextflow.dark.svg"; +import MultiQCDark from "../images/multiqc.dark.svg"; +import PlatformDark from "../images/platform.dark.svg"; +import WaveDark from "../images/wave.dark.svg"; + +const ProductSwitcher = ({ product }) => { + const location = useLocation(); + if (product === undefined) { + if (location.pathname.startsWith("/fusion")) { + product = "Fusion"; + } + if (location.pathname.startsWith("/nextflow")) { + product = "Nextflow"; + } + if (location.pathname.startsWith("/multiqc")) { + product = "MultiQC"; + } + if (location.pathname.startsWith("/platform")) { + product = "Platform"; + } + if (location.pathname.startsWith("/wave")) { + product = "Wave"; + } + } + const logos = { + Platform: { + light: Platform, + dark: PlatformDark, + }, + Nextflow: { + light: Nextflow, + dark: NextflowDark, + }, + MultiQC: { + light: MultiQC, + dark: MultiQCDark, + }, + Wave: { + light: Wave, + dark: WaveDark, + }, + Fusion: { + light: Fusion, + dark: FusionDark, + }, + }; + const Logo = logos[product].light; + const LogoDark = logos[product].dark; + return ( + + + + + ); +}; + +export default ProductSwitcher; diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/ProductLogo/styles.module.css b/src/theme/DocSidebar/Desktop/ProductSwitcher/ProductLogo/styles.module.css new file mode 100644 index 00000000..0d98259b --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/ProductLogo/styles.module.css @@ -0,0 +1,15 @@ +[data-theme="dark"] .themeLight, +[data-theme="light"] .themeDark { + display: none; +} + +.item { + max-width: 178px; + display: flex; + align-items: center; + flex: none; + & svg { + width: 100%; + height: 15px; + } +} diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/VersionSwitcher.tsx b/src/theme/DocSidebar/Desktop/ProductSwitcher/VersionSwitcher.tsx new file mode 100644 index 00000000..eb2493e5 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/VersionSwitcher.tsx @@ -0,0 +1,88 @@ +import React, { useState, useRef, useEffect } from "react"; +import { + useVersions, + useDocsVersion, + useDocsPreferredVersion, +} from "@docusaurus/plugin-content-docs/client"; +import { useLocation } from "@docusaurus/router"; +import Link from "@docusaurus/Link"; + +import styles from "./styles.module.css"; +import clsx from "clsx"; + +const VersionSwitcher = ({ isOpen, setIsOpen }) => { + const dropdownRef = useRef(null); + const location = useLocation(); + const { preferredVersion, savePreferredVersionName } = + useDocsPreferredVersion("platform"); + const versions = useVersions("platform"); + const currentVersion = useDocsVersion(); + + useEffect(() => { + const handleClickOutside = (event) => { + if (dropdownRef.current?.contains(event.target)) return; + setTimeout(() => setIsOpen(false), 100); + }; + + document.addEventListener("mousedown", handleClickOutside); + return () => { + document.removeEventListener("mousedown", handleClickOutside); + }; + }, []); + + const toggleDropdown = () => setIsOpen((prev) => !prev); + + function handleSelectVersion(version) { + savePreferredVersionName(version); + } + + if (typeof window === "undefined") return null; + if (!versions) return null; + if (!location.pathname.startsWith("/platform")) return null; + + const items = versions.filter( + (version) => version.label !== currentVersion.label, + ); + + // Extract the part of the URL after the current version + const currentVersionPrefix = `/platform/${currentVersion.label}`; + const urlSuffix = location.pathname.replace(currentVersionPrefix, ""); + + return ( + <> + + {isOpen && ( +
    + {items?.map((version) => ( + handleSelectVersion(version.name)} + > + v{version.label}{" "} + {version.label === versions[0].label ? " (current)" : ""} + + ))} +
    + )} + + ); +}; + +export default VersionSwitcher; diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/caret.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/caret.svg new file mode 100644 index 00000000..a7e8084e --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/caret.svg @@ -0,0 +1,3 @@ + + + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/fusion.dark.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/fusion.dark.svg new file mode 100644 index 00000000..abe1dd8e --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/fusion.dark.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/fusion.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/fusion.svg new file mode 100644 index 00000000..0ef3f928 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/fusion.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/multiqc.dark.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/multiqc.dark.svg new file mode 100644 index 00000000..5bfc9a9c --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/multiqc.dark.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/multiqc.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/multiqc.svg new file mode 100644 index 00000000..cdb1c6f7 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/multiqc.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/nextflow.dark.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/nextflow.dark.svg new file mode 100644 index 00000000..484b7ced --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/nextflow.dark.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/nextflow.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/nextflow.svg new file mode 100644 index 00000000..29df04a6 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/nextflow.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/platform.dark.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/platform.dark.svg new file mode 100644 index 00000000..ff438334 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/platform.dark.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/platform.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/platform.svg new file mode 100644 index 00000000..f7288381 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/platform.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/wave.dark.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/wave.dark.svg new file mode 100644 index 00000000..e3036e51 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/wave.dark.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/images/wave.svg b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/wave.svg new file mode 100644 index 00000000..9feb620a --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/images/wave.svg @@ -0,0 +1 @@ + diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/index.jsx b/src/theme/DocSidebar/Desktop/ProductSwitcher/index.jsx new file mode 100644 index 00000000..2d27c06d --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/index.jsx @@ -0,0 +1,108 @@ +import React, { useState, useRef, useEffect } from "react"; +import clsx from "clsx"; +import { useLocation } from "@docusaurus/router"; +import Link from "@docusaurus/Link"; + +import ProductLogo from "./ProductLogo"; +import LinkOut from "./LinkOut.inline.svg"; + +import styles from "./styles.module.css"; +import VersionSwitcher from "./VersionSwitcher"; + +const products = [ + { + name: "Platform", + url: "/platform/", + }, + { + name: "Nextflow", + url: "https://www.nextflow.io/docs/latest/", + }, + { name: "MultiQC", url: "/multiqc/" }, + { name: "Wave", url: "/wave/" }, + { name: "Fusion", url: "/fusion/" }, +]; + +const ProductSwitcher = ({ isDropdown }) => { + const [isOpen, setIsOpen] = useState(false); + const [isSecondaryOpen, setIsSecondaryOpen] = useState(false); + const dropdownRef = useRef(null); + const location = useLocation(); + + useEffect(() => { + const handleClickOutside = (event) => { + const ref = dropdownRef.current; + if (!ref?.contains(event.target)) { + setTimeout(() => setIsOpen(false), 100); + } + }; + + document.addEventListener("mousedown", handleClickOutside); + return () => { + document.removeEventListener("mousedown", handleClickOutside); + }; + }, []); + + const toggleDropdown = () => setIsOpen(!isOpen); + + const getCurrentProduct = () => { + if (location.pathname.startsWith("/fusion")) return "Fusion"; + if (location.pathname.startsWith("/nextflow")) return "Nextflow"; + if (location.pathname.startsWith("/multiqc")) return "MultiQC"; + if (location.pathname.startsWith("/platform")) return "Platform"; + if (location.pathname.startsWith("/wave")) return "Wave"; + return null; + }; + + const currentProduct = getCurrentProduct(); + + let items = products.filter((product) => product.name !== currentProduct); + if (!isDropdown) items = products; + + return ( +
    + {isDropdown && ( +
    + + +
    + )} +
    + {items.map((product) => ( + + + {product.name === "Nextflow" && } + + ))} +
    +
    + ); +}; + +export default ProductSwitcher; diff --git a/src/theme/DocSidebar/Desktop/ProductSwitcher/styles.module.css b/src/theme/DocSidebar/Desktop/ProductSwitcher/styles.module.css new file mode 100644 index 00000000..761e8cdb --- /dev/null +++ b/src/theme/DocSidebar/Desktop/ProductSwitcher/styles.module.css @@ -0,0 +1,144 @@ +.switcher { + position: relative; + margin: 8px 0 0 0; + .button { + border-bottom-color: var(--color-brand-200) !important; + z-index: 10; + &:after { + background: url('data:image/svg+xml;utf8,') + 50% / 24px 24px; + content: ""; + margin-left: auto; + min-width: 1.25rem; + height: 100%; + transform: rotate(180deg); + width: 1.25rem; + transition: transform var(--ifm-transition-fast) linear; + transform: rotate(-90deg); + } + } +} + +.item { + height: 34px; + display: flex; + align-items: center; + justify-content: space-between; + padding: 0 12px; + cursor: pointer; + position: relative; + width: 100%; + border: none; + border-bottom: 1px solid var(--color-brand-300); + background: transparent; + font-size: 13px; + font-family: Inter, sans-serif; + & .caret { + fill: var(--color-brand-800) !important; + width: 18px; + height: 18px; + fill: #000; + transform: rotate(-90deg); + transition: transform 200ms ease-in-out; + } + &:hover { + background: #e8ebfc; + text-decoration: none; + color: var(--color-brand); + } + &:last-child { + border-bottom: 1px solid var(--color-brand-300); + } +} + +.items { + background-color: white; + border-radius: 9px; + border: 1px solid var(--color-brand-300); + &.active { + border-bottom-left-radius: 0px; + border-bottom-right-radius: 0px; + & .button { + border-bottom-left-radius: 0px; + border-bottom-right-radius: 0px; + &:after { + transform: rotate(0); + } + } + } +} +.item { + text-decoration: none; + width: 100%; + transition: background 100ms; + @apply text-gray-1000; + border-bottom: 1px solid var(--color-brand-200); + &:hover { + background: #e8ebfc; + } + &:last-child { + border-bottom: none; + border-bottom-left-radius: 8px; + border-bottom-right-radius: 8px; + } + &:first-child { + border-top-left-radius: 8px; + border-top-right-radius: 8px; + } +} +.dropdown { + visibility: hidden; + transition: + opacity 100ms ease-in-out, + visibility 100ms ease-in-out; + position: absolute; + top: 34px; + left: 0; + right: 0; + opacity: 0; + border-top-left-radius: 0px; + border-top-right-radius: 0px; + z-index: 1000; + border-top-color: var(--color-brand-200); + &.secondary { + top: calc(100% - 2px); + } + & .item:first-child { + border-top-left-radius: 0px; + border-top-right-radius: 0px; + } + &.open { + opacity: 1; + visibility: visible; + } +} + +/* Dark theme */ + +[data-theme="dark"] { + & .switcher { + & .button { + border-bottom-color: var(--color-brand-800) !important; + & .caret { + fill: #fff !important; + } + } + & .items { + border-color: var(--color-brand-800); + & .item { + background-color: var(--color-brand-1400); + border-color: var(--color-brand-800); + color: #fff; + &:hover { + background-color: var(--color-brand-1300); + } + } + } + } + & .dropdown.open.secondary:after { + background: var(--color-brand-1400); + border-left: 1px solid var(--color-brand-800); + border-right: 1px solid var(--color-brand-800); + border-bottom: 1px solid var(--color-brand-800); + } +} diff --git a/src/theme/DocSidebar/Desktop/index.tsx b/src/theme/DocSidebar/Desktop/index.tsx new file mode 100644 index 00000000..87410d36 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/index.tsx @@ -0,0 +1,44 @@ +import React from "react"; +import TOC from "@theme-original/DocSidebar/Desktop"; +import SearchBar from "@theme-original/SearchBar"; +import NavbarColorModeToggle from "@theme/Navbar/ColorModeToggle"; +import ProductSwitcher from "./ProductSwitcher"; + +import styles from "./styles.module.css"; + +type SidebarItem = { + docId?: string; + href?: string; + label: string; + type: "category" | "link"; + items?: SidebarItem[]; + collapsed?: boolean; + collapsible?: boolean; + unlisted?: boolean; +}; + +type Props = { + sidebar?: SidebarItem[]; + isHidden?: boolean; + onCollapse?: () => void; + path: string; +}; + +const DesktopWrapper: React.FC = (props) => { + return ( +
    +
    + + +
    +
    +
    + {!!props.sidebar && } +
    + +
    +
    + ); +}; + +export default DesktopWrapper; diff --git a/src/theme/DocSidebar/Desktop/styles.module.css b/src/theme/DocSidebar/Desktop/styles.module.css new file mode 100644 index 00000000..89327094 --- /dev/null +++ b/src/theme/DocSidebar/Desktop/styles.module.css @@ -0,0 +1,57 @@ +:global(.theme-doc-sidebar-container) { + border-right: 0 !important; + margin-top: unset !important; +} +[data-theme="light"] :global(.theme-doc-sidebar-container) { + background: rgb(249, 249, 249); +} +[data-theme="dark"] :global(.theme-doc-sidebar-container) { + background: rgb(27, 24, 35); +} + +.sidebar { + padding-top: 30px; + height: 100dvh; + width: 300px; + position: relative; + & nav { + padding: 0; + } + /* Remove strange padding around docs sidebar nav */ + & div[class^="sidebar"] { + padding-top: 0 !important; + width: auto !important; + } +} +.sidebarHeader { + position: relative; + z-index: 10; + padding: 0 30px; +} +.sidebarHeaderFade { + height: 15px; + background: linear-gradient(180deg, rgba(249,249,249,1) 0%, rgba(249,249,249,0) 100%); +} +[data-theme="dark"] .sidebarHeaderFade { + background: linear-gradient(180deg, rgba(27, 24, 35,0.5) 0%, rgba(27, 24, 35,0) 100%) ; +} +.sidebarNav { + margin-top: -15px; + padding: 30px; + height: calc(100% - 180px); + overflow-y: auto; + & > div { + margin-top: 15px; + } + & div { + height: auto; + } +} + +.colorModeToggle { + margin-bottom: 16px; +} + +[data-theme="dark"] hr { + opacity: 0.5; +} diff --git a/src/theme/MDXComponents.js b/src/theme/MDXComponents.js index 0f8821fe..36b89b17 100644 --- a/src/theme/MDXComponents.js +++ b/src/theme/MDXComponents.js @@ -2,9 +2,11 @@ import React from "react"; // Import the original mapper import MDXComponents from "@theme-original/MDXComponents"; import CodeBlock from "@theme-original/CodeBlock"; +import Button from "@site/src/components/Button"; export default { // Re-use the default mapping ...MDXComponents, CodeBlock, + Button, }; diff --git a/src/theme/Navbar/Layout/index.jsx b/src/theme/Navbar/Layout/index.jsx index a1455931..9f70683a 100644 --- a/src/theme/Navbar/Layout/index.jsx +++ b/src/theme/Navbar/Layout/index.jsx @@ -25,6 +25,9 @@ function NavbarBackdrop(props) { ); } +{ + /* Desktop nav */ +} function Container({ children, isMobile }) { if (isMobile) return children; return ( @@ -32,13 +35,22 @@ function Container({ children, isMobile }) {
    -
    -
    {children}
    -
    + {/* Navbar normally goes here. + Docusaurus expects a classname, so we have a dummy element + with zero dimensions and display:none in main.css. + See https://github.com/facebook/docusaurus/issues/7505 + + Note - navbar is used for mobile styles, so CSS + only hides it on bigger screens. + */} +
    ); } +{ + /* Mobile nav */ +} export default function NavbarLayout({ children }) { const { navbar: { hideOnScroll, style }, diff --git a/src/theme/Navbar/Layout/styles.custom.module.css b/src/theme/Navbar/Layout/styles.custom.module.css index ef1ec519..1b18bf1b 100644 --- a/src/theme/Navbar/Layout/styles.custom.module.css +++ b/src/theme/Navbar/Layout/styles.custom.module.css @@ -1,9 +1,6 @@ html { @media (min-width: 997px) { scroll-padding-top: 110px; - & :global(.theme-doc-toc-desktop) { - top: calc(var(--ifm-navbar-height) + 6.5rem) !important; - } } } .siteHeader { @@ -23,7 +20,8 @@ html { } .seqeraHeader { position: relative; - height: 72px; + height: 73px; + border-bottom: 1px solid var(--ifm-toc-border-color); & > header { z-index: 301; position: absolute; @@ -36,7 +34,6 @@ html { .siteNav { background: var(--ifm-background-surface-color); border-bottom: 1px solid var(--ifm-toc-border-color); - border-top: 1px solid var(--ifm-toc-border-color); & :global(.navbar__brand) { display: none; } @@ -79,16 +76,6 @@ html { } } } -/* Fix for new header height (safe?) */ -aside { - margin-top: -165px !important; - & > div > div { - padding-top: 165px !important; - & nav { - margin-right: 0.5rem; - } - } -} html[data-theme="light"] { & .siteHeader { diff --git a/wave_docs/wave_repo b/wave_docs/wave_repo index 02fde8c7..2f5a1fe6 160000 --- a/wave_docs/wave_repo +++ b/wave_docs/wave_repo @@ -1 +1 @@ -Subproject commit 02fde8c781fdd40b640837f1131f7e545b66c387 +Subproject commit 2f5a1fe6811b49378c62531c18898c49162d992b