diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 9ec5ea16e..ea84709bc 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ FROM ghcr.io/nextflow-io/training:latest ENV NXF_VER=24.10.0 -ENV NXF_EDGE=0 \ No newline at end of file +ENV NXF_EDGE=0 diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..4399db27c --- /dev/null +++ b/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +indent_size = 4 +indent_style = space + +[*.{md,yml,yaml,html,css,scss,js}] +indent_size = 2 + +# ignore python and markdown +[*.{py,md}] +indent_style = unset diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 6e41a7fec..bc91d3ae7 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,40 +5,40 @@ name: Build GitPod Docker image on: - pull_request: - push: - branches: [master] - release: - types: [published] + pull_request: + push: + branches: [master] + release: + types: [published] jobs: - push_to_registry: - if: github.repository == 'nextflow-io/training' - name: Build + Push Gitpod Docker image - runs-on: ubuntu-latest - steps: - - name: Check out the repo - uses: actions/checkout@v4 + push_to_registry: + if: github.repository == 'nextflow-io/training' + name: Build + Push Gitpod Docker image + runs-on: ubuntu-latest + steps: + - name: Check out the repo + uses: actions/checkout@v4 - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} - - name: Build / push latest image - uses: docker/build-push-action@v5 - if: github.event_name != 'release' - with: - file: .github/gitpod.Dockerfile - push: ${{ github.event_name == 'push' }} - tags: ghcr.io/nextflow-io/training:latest + - name: Build / push latest image + uses: docker/build-push-action@v5 + if: github.event_name != 'release' + with: + file: .github/gitpod.Dockerfile + push: ${{ github.event_name == 'push' }} + tags: ghcr.io/nextflow-io/training:latest - - name: Push release image - uses: docker/build-push-action@v5 - if: github.event_name == 'release' - with: - file: .github/gitpod.Dockerfile - push: true - tags: ghcr.io/nextflow-io/training:${{ github.event.release.tag_name }} + - name: Push release image + uses: docker/build-push-action@v5 + if: github.event_name == 'release' + with: + file: .github/gitpod.Dockerfile + push: true + tags: ghcr.io/nextflow-io/training:${{ github.event.release.tag_name }} diff --git a/.github/workflows/mkdocs-docker.yml b/.github/workflows/mkdocs-docker.yml index 5aa285947..21877a34f 100644 --- a/.github/workflows/mkdocs-docker.yml +++ b/.github/workflows/mkdocs-docker.yml @@ -3,29 +3,29 @@ name: Build mkdocs docker image on: - pull_request: - push: - branches: [master] + pull_request: + push: + branches: [master] jobs: - mkdocs_docker: - if: github.repository == 'nextflow-io/training' - name: Build + push mkdocs docker image - runs-on: ubuntu-latest - steps: - - name: Check out the repo - uses: actions/checkout@v4 + mkdocs_docker: + if: github.repository == 'nextflow-io/training' + name: Build + push mkdocs docker image + runs-on: ubuntu-latest + steps: + - name: Check out the repo + uses: actions/checkout@v4 - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} - - name: Build / push latest image - uses: docker/build-push-action@v5 - with: - file: .github/mkdocs.Dockerfile - push: ${{ github.event_name == 'push' }} - tags: ghcr.io/nextflow-io/training-mkdocs:latest + - name: Build / push latest image + uses: docker/build-push-action@v5 + with: + file: .github/mkdocs.Dockerfile + push: ${{ github.event_name == 'push' }} + tags: ghcr.io/nextflow-io/training-mkdocs:latest diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml new file mode 100644 index 000000000..02801e8e8 --- /dev/null +++ b/.github/workflows/precommit.yml @@ -0,0 +1,15 @@ +name: Pre-commit checks + +on: + push: + pull_request: + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - uses: pre-commit/action@v3.0.1 diff --git a/.github/workflows/prettier.yml b/.github/workflows/prettier.yml deleted file mode 100644 index 7f3dccc2e..000000000 --- a/.github/workflows/prettier.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Check formatting with Prettier - -on: - push: - pull_request: - -jobs: - prettier: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-node@v3 - with: - node-version: 16 - cache: "npm" - cache-dependency-path: .prettierrc - - name: Run prettier - run: npx prettier --check docs/ diff --git a/.gitpod.yml b/.gitpod.yml index 3cdc01b60..da79e25ce 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -2,51 +2,51 @@ workspaceLocation: gitpod/gitpod-ws.code-workspace checkoutLocation: gitpod github: - prebuilds: - # enable for the master/default branch (defaults to true) - master: true - # enable for all branches in this repo (defaults to false) - branches: false - # enable for pull requests coming from this repo (defaults to true) - pullRequests: true - # enable for pull requests coming from forks (defaults to false) - pullRequestsFromForks: true - # add a "Review in Gitpod" button as a comment to pull requests (defaults to true) - addComment: true - # add a "Review in Gitpod" button to pull requests (defaults to false) - addBadge: false + prebuilds: + # enable for the master/default branch (defaults to true) + master: true + # enable for all branches in this repo (defaults to false) + branches: false + # enable for pull requests coming from this repo (defaults to true) + pullRequests: true + # enable for pull requests coming from forks (defaults to false) + pullRequestsFromForks: true + # add a "Review in Gitpod" button as a comment to pull requests (defaults to true) + addComment: true + # add a "Review in Gitpod" button to pull requests (defaults to false) + addBadge: false # See .github/Dockerfile image: ghcr.io/nextflow-io/training:latest tasks: - - before: printf 'unset JAVA_TOOL_OPTIONS\n' >> $HOME/.bashrc && exit + - before: printf 'unset JAVA_TOOL_OPTIONS\n' >> $HOME/.bashrc && exit - - name: Start web server - command: gp ports await 23000 && gp preview https://training.nextflow.io/hello_nextflow + - name: Start web server + command: gp ports await 23000 && gp preview https://training.nextflow.io/hello_nextflow - - name: Load Nextflow Tutorial - command: docker pull -q nextflow/rnaseq-nf + - name: Load Nextflow Tutorial + command: docker pull -q nextflow/rnaseq-nf - - name: Start Nextflow Tutorial - env: - NXF_HOME: "/workspace/gitpod/.nextflow" - command: | - cd hello-nextflow - source $HOME/.bashrc - export PS1='\[\e[3;36m\]${PWD/*\//} ->\[\e[0m\] ' - unset JAVA_TOOL_OPTIONS - clear + - name: Start Nextflow Tutorial + env: + NXF_HOME: "/workspace/gitpod/.nextflow" + command: | + cd hello-nextflow + source $HOME/.bashrc + export PS1='\[\e[3;36m\]${PWD/*\//} ->\[\e[0m\] ' + unset JAVA_TOOL_OPTIONS + clear vscode: - extensions: # based on nf-core.nf-core-extensionpack - - codezombiech.gitignore # Language support for .gitignore files - - cssho.vscode-svgviewer # SVG viewer - - esbenp.prettier-vscode # Markdown/CommonMark linting and style checking for Visual Studio Code - - EditorConfig.EditorConfig # override user/workspace settings with settings found in .editorconfig files - - Gruntfuggly.todo-tree # Display TODO and FIXME in a tree view in the activity bar - # - mechatroner.rainbow-csv # Highlight columns in csv files in different colors - - nextflow.nextflow # Nextflow syntax highlighting - # - oderwat.indent-rainbow # Highlight indentation level - - streetsidesoftware.code-spell-checker # Spelling checker for source code - - ms-vscode.live-server + extensions: # based on nf-core.nf-core-extensionpack + - codezombiech.gitignore # Language support for .gitignore files + - cssho.vscode-svgviewer # SVG viewer + - esbenp.prettier-vscode # Markdown/CommonMark linting and style checking for Visual Studio Code + - EditorConfig.EditorConfig # override user/workspace settings with settings found in .editorconfig files + - Gruntfuggly.todo-tree # Display TODO and FIXME in a tree view in the activity bar + # - mechatroner.rainbow-csv # Highlight columns in csv files in different colors + - nextflow.nextflow # Nextflow syntax highlighting + # - oderwat.indent-rainbow # Highlight indentation level + - streetsidesoftware.code-spell-checker # Spelling checker for source code + - ms-vscode.live-server diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bc079d9cd..bae00d00c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,23 +1,23 @@ repos: - - repo: https://github.com/pre-commit/mirrors-prettier - rev: "v3.1.0" - hooks: - - id: prettier - additional_dependencies: - - prettier@3.2.5 + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.1.0" + hooks: + - id: prettier + additional_dependencies: + - prettier@3.2.5 - - repo: https://github.com/editorconfig-checker/editorconfig-checker.python - rev: "2.7.3" - hooks: - - id: editorconfig-checker - alias: ec + - repo: https://github.com/editorconfig-checker/editorconfig-checker.python + rev: "2.7.3" + hooks: + - id: editorconfig-checker + alias: ec - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 - hooks: - - id: trailing-whitespace - exclude_types: - - svg - - id: end-of-file-fixer - exclude_types: - - svg + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + exclude_types: + - svg + - id: end-of-file-fixer + exclude_types: + - svg diff --git a/.prettierrc b/.prettierrc deleted file mode 100644 index 55ea2b2d1..000000000 --- a/.prettierrc +++ /dev/null @@ -1,4 +0,0 @@ -{ - "tabWidth": 4, - "useTabs": false -} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fb3d0876a..f60257e03 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,20 +2,20 @@ Table of contents: -- [Contributing](#contributing) - - [Contribution model](#contribution-model) - - [Installation](#installation) - - [Docker](#docker) - - [Python](#python) - - [Social cards](#social-cards) - - [Figures \& diagrams](#figures--diagrams) - - [Content style and formatting](#content-style-and-formatting) - - [Formatting / linting](#formatting--linting) - - [Admonitions](#admonitions) - - [Known limitations](#known-limitations) - - [Code annotations](#code-annotations) - - [Word highlighting](#word-highlighting) - - [TODO / FIXME](#todo--fixme) +- [Contributing](#contributing) + - [Contribution model](#contribution-model) + - [Installation](#installation) + - [Docker](#docker) + - [Python](#python) + - [Social cards](#social-cards) + - [Figures \& diagrams](#figures--diagrams) + - [Content style and formatting](#content-style-and-formatting) + - [Formatting / linting](#formatting--linting) + - [Admonitions](#admonitions) + - [Known limitations](#known-limitations) + - [Code annotations](#code-annotations) + - [Word highlighting](#word-highlighting) + - [TODO / FIXME](#todo--fixme) ## Contribution model @@ -92,11 +92,11 @@ If there is an announcement banner, you can enable and customise it using the fo ```yaml extra: - # Announcement banner for upcoming training - announcement: - active: false - date_text: March 5-6, 2024 - register_url: https://nf-co.re/events/2024/training-foundational-march + # Announcement banner for upcoming training + announcement: + active: false + date_text: March 5-6, 2024 + register_url: https://nf-co.re/events/2024/training-foundational-march ``` If you need more customisation, edit `docs/assets/overrides/main.html` @@ -136,9 +136,9 @@ There is a GitHub action that checks pull-requests for valid formatting. We use admonitions extensively to make certain pieces of content stand out. Please see the [official docs](https://squidfunk.github.io/mkdocs-material/reference/admonitions/) for an explanation. -- Note that we have two custom admonitions: `exercise` and `result` (alias `solution`). -- `!!!` does a regular admonition, `???` makes it collapsed (click to expand). -- Intendation is important! Make sure you check the rendered site, as it's easy to make a mistake. +- Note that we have two custom admonitions: `exercise` and `result` (alias `solution`). +- `!!!` does a regular admonition, `???` makes it collapsed (click to expand). +- Intendation is important! Make sure you check the rendered site, as it's easy to make a mistake. ## Known limitations @@ -168,4 +168,4 @@ I recommend the [Todo Tree VSCode extension](https://marketplace.visualstudio.co A list of key ones also included here: -- Remove plugin install from Phil's GitHub fork in `requirements.txt` and `.github/mkdocs.Dockerfile` when [this PR](https://github.com/timvink/mkdocs-enumerate-headings-plugin/pull/33) is merged +- Remove plugin install from Phil's GitHub fork in `requirements.txt` and `.github/mkdocs.Dockerfile` when [this PR](https://github.com/timvink/mkdocs-enumerate-headings-plugin/pull/33) is merged diff --git a/LICENSE.md b/LICENSE.md index d693cdeff..8d58a93b8 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -8,18 +8,18 @@ You are free to: -- Share — copy and redistribute the material in any medium or format +- Share — copy and redistribute the material in any medium or format -- Adapt — remix, transform, and build upon the material +- Adapt — remix, transform, and build upon the material -- The licensor cannot revoke these freedoms as long as you follow the license terms. +- The licensor cannot revoke these freedoms as long as you follow the license terms. Under the following terms: -- Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. +- Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. -- NonCommercial — You may not use the material for commercial purposes. +- NonCommercial — You may not use the material for commercial purposes. -- ShareAlike — If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. +- ShareAlike — If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. -- No additional restrictions — You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits. +- No additional restrictions — You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits. diff --git a/README.md b/README.md index edc78da2e..a4f38e420 100644 --- a/README.md +++ b/README.md @@ -7,11 +7,11 @@ Welcome to the Nextflow training repository! We are excited to have you on the path to writing reproducible and scalable scientific workflows using Nextflow. -- 👉🏻 Written training material: +- 👉🏻 Written training material: -- 👩🏻‍💻 Instructions on loading this repository within a GitPod environment: +- 👩🏻‍💻 Instructions on loading this repository within a GitPod environment: -- 📚 Nextflow documentation: +- 📚 Nextflow documentation: ## Contributions diff --git a/TRANSLATING.md b/TRANSLATING.md index 255fea119..97a01f465 100644 --- a/TRANSLATING.md +++ b/TRANSLATING.md @@ -6,9 +6,9 @@ The typical workflow for contributing with translation is as follows: 1. Make a _fork_ of the GitHub repository to your own account 2. Work locally (see below) and make your changes - - Check if the language you want to translate to is already enabled in the `mkdocs.yml` file. If it isn't do it according to [this](https://github.com/nextflow-io/training/pull/163/files#diff-98d0f806abc9af24e6a7c545d3d77e8f9ad57643e27211d7a7b896113e420ed2) example. - - If you want to improve an already existing translation, the file already exists and the language is already set up. Simply open the file and work on it. Otherwise, create the new file with the following pattern: If the original file in English is `filename.md`, you will create in the same folder a new file named `filename.language_code.md`, where `language_code` is the language code that you can find [here](https://en.wikipedia.org/wiki/ISO_639-1), for the language you wish to translate to. Pay attention to the language code, as it has to be the same that is specified in the `mkdocs.yml` file for that language. - - Ideally, for new files, copy-paste the original English contents into the new file and commit _before_ starting to translate. This way, it's easier to review your pull request by seeing the original file in English vs the translated changes you did. + - Check if the language you want to translate to is already enabled in the `mkdocs.yml` file. If it isn't do it according to [this](https://github.com/nextflow-io/training/pull/163/files#diff-98d0f806abc9af24e6a7c545d3d77e8f9ad57643e27211d7a7b896113e420ed2) example. + - If you want to improve an already existing translation, the file already exists and the language is already set up. Simply open the file and work on it. Otherwise, create the new file with the following pattern: If the original file in English is `filename.md`, you will create in the same folder a new file named `filename.language_code.md`, where `language_code` is the language code that you can find [here](https://en.wikipedia.org/wiki/ISO_639-1), for the language you wish to translate to. Pay attention to the language code, as it has to be the same that is specified in the `mkdocs.yml` file for that language. + - Ideally, for new files, copy-paste the original English contents into the new file and commit _before_ starting to translate. This way, it's easier to review your pull request by seeing the original file in English vs the translated changes you did. 3. Commit and push to your forked repository 4. Open a pull-request against the main repo, which can be reviewed and merged 5. Tag other contributors with @ requesting a review. Pull Requests should only be merged if they have at least a single review. @@ -68,23 +68,23 @@ Even though it's required that you read this document in order to contribute wit In order to keep consistency in translations, every language should have a translation glossary where common technical terms and terms related to Nextflow have an official translation to be followed by future translators. Ideally, these links should point to an online spreadsheet where anyone can comment and make suggestions, but not edit. -- [Portuguese](https://docs.google.com/spreadsheets/d/1HUa3BO2kwukhX4EXQ-1blXeP5iueUdM23OwDRpfarDg/edit?usp=sharing) +- [Portuguese](https://docs.google.com/spreadsheets/d/1HUa3BO2kwukhX4EXQ-1blXeP5iueUdM23OwDRpfarDg/edit?usp=sharing) ## Merging Aiming at a more comprehensive git history, pull request commits will be [squashed](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/about-pull-request-merges#squash-and-merge-your-commits) with a commit message outlining: -- What file(s)/page(s) the PR translated (preferably, 1 PR per file) -- What training material the translated file(s)/page(s) is/are part of -- What language it was translated into +- What file(s)/page(s) the PR translated (preferably, 1 PR per file) +- What training material the translated file(s)/page(s) is/are part of +- What language it was translated into The squashing is done by the maintainers of the GitHub repository, so if you're doing a contribution, you don't have to worry about that. This practice gives more freedom/space for contributors to get into more detail in their commit title/message without having to add the info outlined above. For example: PR: Translate to Brazilian Portuguese the Seqera Platform section of the basic training -- Commit #1: Add missing translations to sections 1 and 2 -- Commit #2: Add translated image to section 3 -- Commit #3: Translate sections 4, 5 and 6 +- Commit #1: Add missing translations to sections 1 and 2 +- Commit #2: Add translated image to section 3 +- Commit #3: Translate sections 4, 5 and 6 Squashed commit to merge: diff --git a/docs/assets/overrides/main.html b/docs/assets/overrides/main.html index 57e455dd5..8fc886089 100644 --- a/docs/assets/overrides/main.html +++ b/docs/assets/overrides/main.html @@ -1,32 +1,32 @@ {% extends "base.html" %} {% block announce %}
- Next community training: - {{ config.extra.announcement.date_text }} on YouTube, with - support on Slack - - {% include ".icons/fontawesome/brands/youtube.svg" %} - - - {% include ".icons/fontawesome/brands/slack.svg" %} - - - Register here - - - + Next community training: + {{ config.extra.announcement.date_text }} on YouTube, with + support on Slack + + {% include ".icons/fontawesome/brands/youtube.svg" %} + + + {% include ".icons/fontawesome/brands/slack.svg" %} + + + Register here + + +
{% if not config.extra.announcement.active %} {% endif %} {% endblock %} diff --git a/docs/assets/overrides/partials/copyright.html b/docs/assets/overrides/partials/copyright.html index 99d31f47c..556db4993 100644 --- a/docs/assets/overrides/partials/copyright.html +++ b/docs/assets/overrides/partials/copyright.html @@ -2,33 +2,33 @@ https://github.com/squidfunk/mkdocs-material/blob/6c8c6155c6eb7eebfd8a1ac30495330960e816af/material/partials/copyright.html -#} diff --git a/docs/assets/overrides/partials/social.html b/docs/assets/overrides/partials/social.html index a439be5a6..fe8d86fc5 100644 --- a/docs/assets/overrides/partials/social.html +++ b/docs/assets/overrides/partials/social.html @@ -1,10 +1,10 @@ diff --git a/docs/assets/overrides/partials/toc.html b/docs/assets/overrides/partials/toc.html index e9286fa23..47279588b 100644 --- a/docs/assets/overrides/partials/toc.html +++ b/docs/assets/overrides/partials/toc.html @@ -4,143 +4,134 @@ config.mdx_configs.toc.title %} {% set title = config.mdx_configs.toc.title %} {% endif %} diff --git a/docs/assets/stylesheets/extra.css b/docs/assets/stylesheets/extra.css index a853f8ad6..15b8385c1 100644 --- a/docs/assets/stylesheets/extra.css +++ b/docs/assets/stylesheets/extra.css @@ -1,242 +1,242 @@ :root, [data-md-color-scheme="default"], [data-md-color-scheme="slate"] { - --md-primary-fg-color: #58bd9f; - --md-primary-fg-color--light: #c3ead3; - --md-primary-fg-color--dark: #1a7540; - --md-admonition-icon--exercise: url('data:image/svg+xml;charset=utf-8,'); - --md-admonition-icon--result: url('data:image/svg+xml;charset=utf-8,'); - --md-admonition-icon--solution: url('data:image/svg+xml;charset=utf-8,'); + --md-primary-fg-color: #58bd9f; + --md-primary-fg-color--light: #c3ead3; + --md-primary-fg-color--dark: #1a7540; + --md-admonition-icon--exercise: url('data:image/svg+xml;charset=utf-8,'); + --md-admonition-icon--result: url('data:image/svg+xml;charset=utf-8,'); + --md-admonition-icon--solution: url('data:image/svg+xml;charset=utf-8,'); } :root > * { - --md-code-hl-comment-color: var(--md-default-fg-color--lighter); + --md-code-hl-comment-color: var(--md-default-fg-color--lighter); } .md-footer { - z-index: 1; + z-index: 1; } /* Seqera style typography */ .md-typeset h1 { - color: var(--md-default-fg-color); - font-size: 2rem; - font-family: "Degular", sans-serif; - line-height: 2.5rem; - font-weight: 600; - letter-spacing: 0; - margin-bottom: 0.5em; + color: var(--md-default-fg-color); + font-size: 2rem; + font-family: "Degular", sans-serif; + line-height: 2.5rem; + font-weight: 600; + letter-spacing: 0; + margin-bottom: 0.5em; } .md-typeset h2 { - font-size: 1.5rem; - font-family: "Degular", sans-serif; - line-height: 2rem; - font-weight: 600; - letter-spacing: 0; + font-size: 1.5rem; + font-family: "Degular", sans-serif; + line-height: 2rem; + font-weight: 600; + letter-spacing: 0; } .md-typeset h3 { - font-size: 1.2rem; - font-family: "Degular", sans-serif; - line-height: 1.5rem; - font-weight: 600; - letter-spacing: 0; + font-size: 1.2rem; + font-family: "Degular", sans-serif; + line-height: 1.5rem; + font-weight: 600; + letter-spacing: 0; } @media (min-width: 768px) { - .md-typeset h1 { - font-family: "Degular", sans-serif; - font-size: 3.2rem; - line-height: 4rem; - } - .md-typeset h2 { - font-family: "Degular", sans-serif; - font-size: 2rem; - line-height: 2.5rem; - } - .md-typeset h3 { - font-family: "Degular", sans-serif; - font-size: 1.5rem; - line-height: 2rem; - } + .md-typeset h1 { + font-family: "Degular", sans-serif; + font-size: 3.2rem; + line-height: 4rem; + } + .md-typeset h2 { + font-family: "Degular", sans-serif; + font-size: 2rem; + line-height: 2.5rem; + } + .md-typeset h3 { + font-family: "Degular", sans-serif; + font-size: 1.5rem; + line-height: 2rem; + } } /* Dark mode figures */ body[data-md-color-scheme="slate"] figure img, body[data-md-color-scheme="slate"] .excalidraw svg { - filter: invert(100%) hue-rotate(180deg); + filter: invert(100%) hue-rotate(180deg); } .excalidraw svg rect { - fill: transparent; + fill: transparent; } /* Only show heading enumerations at levels we care about */ .enumerate-headings-plugin { - display: none; + display: none; } h2 .enumerate-headings-plugin, h3 .enumerate-headings-plugin { - display: inline; + display: inline; } .md-typeset h1 .headerlink { - display: none; + display: none; } /* Homepage Seqera-style buttons */ *:is(.md-typeset .md-button, .md-banner .md-button) { - border-radius: 2rem; - transition: padding 0.2s; - transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); - position: relative; + border-radius: 2rem; + transition: padding 0.2s; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); + position: relative; } *:is(.md-typeset .md-button, .md-banner .md-button) .twemoji { - height: 1.15rem; - position: absolute; - right: 0.5rem; - top: 25%; - opacity: 0; + height: 1.15rem; + position: absolute; + right: 0.5rem; + top: 25%; + opacity: 0; } .md-banner .md-button { - padding: 0.3em 0.8em; - font-size: 0.8em; + padding: 0.3em 0.8em; + font-size: 0.8em; } .md-banner .md-button .twemoji { - height: 0.6rem; + height: 0.6rem; } *:is(.md-typeset .md-button, .md-banner .md-button) svg { - transition: opacity 0.1s; - transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); - opacity: 1; - height: 1.15rem; - width: 1.15rem; + transition: opacity 0.1s; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); + opacity: 1; + height: 1.15rem; + width: 1.15rem; } *:is(.md-typeset .md-button, .md-banner .md-button):hover, *:is(.md-typeset .md-button, .md-banner .md-button):focus, *:is(.md-typeset .md-button, .md-banner .md-button):active { - padding-right: 2.4rem; - background-color: transparent; - border-color: var(--md-primary-fg-color); - color: var(--md-primary-fg-color); + padding-right: 2.4rem; + background-color: transparent; + border-color: var(--md-primary-fg-color); + color: var(--md-primary-fg-color); } .md-banner .md-button:hover, .md-banner .md-button:focus, .md-banner .md-button:active { - padding-right: 1.6rem; + padding-right: 1.6rem; } *:is(.md-typeset .md-button, .md-banner .md-button).md-button--primary:hover, *:is(.md-typeset .md-button, .md-banner .md-button).md-button--primary:focus, *:is(.md-typeset .md-button, .md-banner .md-button).md-button--primary:active { - background-color: var(--md-primary-fg-color); - color: var(--md-primary-bg-color); + background-color: var(--md-primary-fg-color); + color: var(--md-primary-bg-color); } *:is(.md-typeset .md-button, .md-banner .md-button):hover .twemoji, *:is(.md-typeset .md-button, .md-banner .md-button):focus .twemoji, *:is(.md-typeset .md-button, .md-banner .md-button):active .twemoji { - opacity: 1; + opacity: 1; } .md-banner { - background-color: #17191e; + background-color: #17191e; } /* Homepage logos */ .homepage_logos { - text-align: center; + text-align: center; } .homepage_logos img { - height: 2rem; - max-width: 100%; - margin: 1rem auto 0; + height: 2rem; + max-width: 100%; + margin: 1rem auto 0; } /* Custom right-hand-side sidebar */ .sidebar_resources { - margin-top: 2rem; + margin-top: 2rem; } .sidebar_resources svg { - fill: currentcolor; - max-height: 100%; - width: 1.125em; - margin-right: 0.5rem; - opacity: 0.6; + fill: currentcolor; + max-height: 100%; + width: 1.125em; + margin-right: 0.5rem; + opacity: 0.6; } .sidebar_resources .md-nav__link { - justify-content: left; + justify-content: left; } @media screen and (min-width: 59.9375em) { - .md-sidebar--secondary .md-sidebar__inner > nav > .md-nav__list { - border-left: 3px solid var(--md-default-fg-color--lightest); - } + .md-sidebar--secondary .md-sidebar__inner > nav > .md-nav__list { + border-left: 3px solid var(--md-default-fg-color--lightest); + } } @media screen and (max-width: 59.9375em) { - .sidebar_resources { - display: none !important; - } + .sidebar_resources { + display: none !important; + } } /* Custom Footer */ .footer_cc_link svg { - fill: currentcolor; - width: 0.8rem; - height: 0.8rem; - vertical-align: -18%; + fill: currentcolor; + width: 0.8rem; + height: 0.8rem; + vertical-align: -18%; } .footer_seqera_logo img { - opacity: 0.5; - height: 1.5rem; - margin-top: 0.6rem; + opacity: 0.5; + height: 1.5rem; + margin-top: 0.6rem; } .footer_seqera_logo:hover img, .footer_seqera_logo:active img, .footer_seqera_logo:focus img { - opacity: 0.8; + opacity: 0.8; } /* Custom admonitions */ .md-typeset .admonition.exercise, .md-typeset details.exercise { - border-color: rgb(142, 142, 142); - font-size: 0.8rem; + border-color: rgb(142, 142, 142); + font-size: 0.8rem; } .md-typeset .exercise > .admonition-title, .md-typeset .exercise > summary { - background-color: rgba(153, 153, 153, 0.1); + background-color: rgba(153, 153, 153, 0.1); } .md-typeset .exercise > .admonition-title::before, .md-typeset .exercise > summary::before { - background-color: rgb(146, 146, 146); - -webkit-mask-image: var(--md-admonition-icon--exercise); - mask-image: var(--md-admonition-icon--exercise); + background-color: rgb(146, 146, 146); + -webkit-mask-image: var(--md-admonition-icon--exercise); + mask-image: var(--md-admonition-icon--exercise); } .md-typeset .admonition.result, .md-typeset details.result { - border-color: rgb(0, 200, 83); - font-size: 0.8rem; + border-color: rgb(0, 200, 83); + font-size: 0.8rem; } .md-typeset .result > .admonition-title, .md-typeset .result > summary { - background-color: rgba(0, 200, 83, 0.102); + background-color: rgba(0, 200, 83, 0.102); } .md-typeset .result > .admonition-title::before, .md-typeset .result > summary::before { - background-color: rgb(0, 200, 83); - -webkit-mask-image: var(--md-admonition-icon--result); - mask-image: var(--md-admonition-icon--result); + background-color: rgb(0, 200, 83); + -webkit-mask-image: var(--md-admonition-icon--result); + mask-image: var(--md-admonition-icon--result); } .md-typeset .admonition.solution, .md-typeset details.solution { - border-color: rgb(0, 200, 83); - font-size: 0.8rem; + border-color: rgb(0, 200, 83); + font-size: 0.8rem; } .md-typeset .solution > .admonition-title, .md-typeset .solution > summary { - background-color: rgba(0, 200, 83, 0.102); + background-color: rgba(0, 200, 83, 0.102); } .md-typeset .solution > .admonition-title::before, .md-typeset .solution > summary::before { - background-color: rgb(0, 200, 83); - -webkit-mask-image: var(--md-admonition-icon--result); - mask-image: var(--md-admonition-icon--result); + background-color: rgb(0, 200, 83); + -webkit-mask-image: var(--md-admonition-icon--result); + mask-image: var(--md-admonition-icon--result); } /* Bigger table font size */ .md-typeset table:not([class]) { - font-size: 0.8rem; + font-size: 0.8rem; } diff --git a/docs/assets/stylesheets/fonts.css b/docs/assets/stylesheets/fonts.css index a632a56b9..6208fd025 100644 --- a/docs/assets/stylesheets/fonts.css +++ b/docs/assets/stylesheets/fonts.css @@ -1,15 +1,15 @@ @font-face { - font-family: Degular; - src: url("../fonts/degular/Degular-Semibold.woff2") format("woff2"); - src: url("../fonts/degular/Degular-Semibold.woff") format("woff"); - font-weight: 600; - font-style: normal; + font-family: Degular; + src: url("../fonts/degular/Degular-Semibold.woff2") format("woff2"); + src: url("../fonts/degular/Degular-Semibold.woff") format("woff"); + font-weight: 600; + font-style: normal; } @font-face { - font-family: "Virgil"; - src: url("../fonts/excalidraw/Virgil.woff2") format("woff2"); + font-family: "Virgil"; + src: url("../fonts/excalidraw/Virgil.woff2") format("woff2"); } @font-face { - font-family: "Cascadia"; - src: url("../fonts/excalidraw/Cascadia.woff2") format("woff2"); + font-family: "Cascadia"; + src: url("../fonts/excalidraw/Cascadia.woff2") format("woff2"); } diff --git a/docs/basic_training/cache_and_resume.fr.md b/docs/basic_training/cache_and_resume.fr.md index ea993951a..2f4a1e146 100644 --- a/docs/basic_training/cache_and_resume.fr.md +++ b/docs/basic_training/cache_and_resume.fr.md @@ -173,18 +173,18 @@ Enfin, l'option `-t` permet de créer un rapport de provenance personnalisé de ```html
-

${name}

-
- Script: -
${script}
-
- -
    -
  • Exit: ${exit}
  • -
  • Status: ${status}
  • -
  • Work dir: ${workdir}
  • -
  • Container: ${container}
  • -
+

${name}

+
+ Script: +
${script}
+
+ +
    +
  • Exit: ${exit}
  • +
  • Status: ${status}
  • +
  • Work dir: ${workdir}
  • +
  • Container: ${container}
  • +
``` diff --git a/docs/basic_training/cache_and_resume.md b/docs/basic_training/cache_and_resume.md index 264f829f9..3d4713c08 100644 --- a/docs/basic_training/cache_and_resume.md +++ b/docs/basic_training/cache_and_resume.md @@ -181,18 +181,18 @@ Finally, the `-t` option enables the creation of a basic custom provenance repor ```html
-

${name}

-
- Script: -
${script}
-
- -
    -
  • Exit: ${exit}
  • -
  • Status: ${status}
  • -
  • Work dir: ${workdir}
  • -
  • Container: ${container}
  • -
+

${name}

+
+ Script: +
${script}
+
+ +
    +
  • Exit: ${exit}
  • +
  • Status: ${status}
  • +
  • Work dir: ${workdir}
  • +
  • Container: ${container}
  • +
``` diff --git a/docs/basic_training/cache_and_resume.pt.md b/docs/basic_training/cache_and_resume.pt.md index 274dd22e6..313c2b3ef 100644 --- a/docs/basic_training/cache_and_resume.pt.md +++ b/docs/basic_training/cache_and_resume.pt.md @@ -173,18 +173,18 @@ Finalmente, a opção `-t` permite a criação de um relatório básico e custom ```html
-

${name}

-
- Script: -
${script}
-
- -
    -
  • Exit: ${exit}
  • -
  • Status: ${status}
  • -
  • Work dir: ${workdir}
  • -
  • Contêiner: ${container}
  • -
+

${name}

+
+ Script: +
${script}
+
+ +
    +
  • Exit: ${exit}
  • +
  • Status: ${status}
  • +
  • Work dir: ${workdir}
  • +
  • Contêiner: ${container}
  • +
``` diff --git a/docs/basic_training/config.md b/docs/basic_training/config.md index 7b2aab997..8358eeba8 100644 --- a/docs/basic_training/config.md +++ b/docs/basic_training/config.md @@ -41,7 +41,7 @@ Instead of including each parameter on the command line, parameters can also be ```json linenums="1" title="params.json" { - "greeting": "Bonjour le monde!" + "greeting": "Bonjour le monde!" } ``` diff --git a/docs/basic_training/index.es.md b/docs/basic_training/index.es.md index caa9cb4a1..3bb4cde19 100644 --- a/docs/basic_training/index.es.md +++ b/docs/basic_training/index.es.md @@ -1,7 +1,7 @@ --- description: Descripción general del material de formación básico de Nextflow hide: - - toc + - toc --- # Bienvenido diff --git a/docs/basic_training/index.fr.md b/docs/basic_training/index.fr.md index 2202ffe5e..3c07c22ad 100644 --- a/docs/basic_training/index.fr.md +++ b/docs/basic_training/index.fr.md @@ -1,7 +1,7 @@ --- description: Aperçu du matériel de formation Nextflow de base hide: - - toc + - toc --- # Bienvenue diff --git a/docs/basic_training/index.pt.md b/docs/basic_training/index.pt.md index 10016e6fc..7300f6ca5 100644 --- a/docs/basic_training/index.pt.md +++ b/docs/basic_training/index.pt.md @@ -1,7 +1,7 @@ --- description: Visão geral do material de treinamento básico do Nextflow hide: - - toc + - toc --- # Bem vindo ao treinamento básico do Nextflow diff --git a/docs/basic_training/operators.md b/docs/basic_training/operators.md index 338394c20..8ad11f659 100644 --- a/docs/basic_training/operators.md +++ b/docs/basic_training/operators.md @@ -638,8 +638,8 @@ You can also parse JSON files directly: ```json title="file.json" [ - { "name": "Bob", "height": 180, "champion": false }, - { "name": "Alice", "height": 170, "champion": false } + { "name": "Bob", "height": 180, "champion": false }, + { "name": "Alice", "height": 170, "champion": false } ] ``` diff --git a/docs/basic_training/seqera_platform.fr.md b/docs/basic_training/seqera_platform.fr.md index 22b32c7d8..fd5c787b9 100644 --- a/docs/basic_training/seqera_platform.fr.md +++ b/docs/basic_training/seqera_platform.fr.md @@ -163,10 +163,10 @@ En bref, voici les étapes à suivre pour mettre en place une filière. 6. Entrez le(s) nom(s) de chacun des **profils de configuration** de Nextflow suivi de la touche `Enter`. Voir la documentation Nextflow [Configuration des profiles](https://www.nextflow.io/docs/latest/config.html#config-profiles) pour plus de détails. 7. Saisissez les paramètres du workflow au format YAML ou JSON. Exemple YAML : - ```yaml - reads: "s3://nf-bucket/exome-data/ERR013140_{1,2}.fastq.bz2" - paired_end: true - ``` + ```yaml + reads: "s3://nf-bucket/exome-data/ERR013140_{1,2}.fastq.bz2" + paired_end: true + ``` 8. Sélectionnez Launch (Lancer) pour commencer l'exécution du pipeline. diff --git a/docs/basic_training/seqera_platform.md b/docs/basic_training/seqera_platform.md index e85f413a7..86c74671e 100644 --- a/docs/basic_training/seqera_platform.md +++ b/docs/basic_training/seqera_platform.md @@ -165,10 +165,10 @@ In brief, these are the steps you need to follow to set up a pipeline. 6. Enter the name(s) of each of the Nextflow **Config profiles** followed by the `Enter` key. See the Nextflow [Config profiles](https://www.nextflow.io/docs/latest/config.html#config-profiles) documentation for more details. 7. Enter any workflow parameters in YAML or JSON format. YAML example: - ```yaml - reads: "s3://nf-bucket/exome-data/ERR013140_{1,2}.fastq.bz2" - paired_end: true - ``` + ```yaml + reads: "s3://nf-bucket/exome-data/ERR013140_{1,2}.fastq.bz2" + paired_end: true + ``` 8. Select Launch to begin the pipeline execution. diff --git a/docs/basic_training/seqera_platform.pt.md b/docs/basic_training/seqera_platform.pt.md index 836372286..cc5ee7d66 100644 --- a/docs/basic_training/seqera_platform.pt.md +++ b/docs/basic_training/seqera_platform.pt.md @@ -164,10 +164,10 @@ Em resumo, essas são as etapas que você precisa seguir para configurar um pipe 6. Digite o(s) nome(s) de cada um dos **perfis de configuração** do Nextflow seguido da tecla `enter`. Veja mais [na documentação oficial](https://www.nextflow.io/docs/latest/config.html#config-profiles) sobre a configuração de perfis. 7. Insira quaisquer parâmetros do fluxo de trabalho no formato YAML ou JSON. Exemplo com YAML: - ```yaml - leituras: "s3://nf-bucket/exome-data/ERR013140_{1,2}.fastq.bz2" - pares_de_leituras: true - ``` + ```yaml + leituras: "s3://nf-bucket/exome-data/ERR013140_{1,2}.fastq.bz2" + pares_de_leituras: true + ``` 8. Selecione Launch para iniciar a execução do pipeline. diff --git a/docs/basic_training/setup.fr.md b/docs/basic_training/setup.fr.md index 83b43c5f7..42032ac2c 100644 --- a/docs/basic_training/setup.fr.md +++ b/docs/basic_training/setup.fr.md @@ -94,7 +94,7 @@ Un environnement de développement Nextflow préconfiguré est disponible via Gi Pour exécuter Gitpod : - Cliquez sur l'URL suivante : - - Il s'agit de l'URL de notre repositoire GitHub, préfixée par `https://gitpod.io/#`. + - Il s'agit de l'URL de notre repositoire GitHub, préfixée par `https://gitpod.io/#`. - Connectez-vous à votre compte GitHub (et autorisez l'accès). Une fois que vous vous êtes connecté, Gitpod devrait se charger (sautez le prebuild si on vous le demande). diff --git a/docs/basic_training/setup.pt.md b/docs/basic_training/setup.pt.md index cd38a6005..52f2a4619 100644 --- a/docs/basic_training/setup.pt.md +++ b/docs/basic_training/setup.pt.md @@ -94,7 +94,7 @@ Um ambiente de desenvolvimento Nextflow pré-configurado está disponível no Gi Para executar o Gitpod: - Clique na URL a seguir: - - Essa URL é o link do repositório do treinamento no GitHub, prefixado com `https://gitpod.io/#` + - Essa URL é o link do repositório do treinamento no GitHub, prefixado com `https://gitpod.io/#` - Faça login na sua conta do GitHub (e permita a autorização). Depois de fazer login, o Gitpod deve carregar (pule a pré-compilação, se solicitado). diff --git a/docs/hands_on/02_workflow.md b/docs/hands_on/02_workflow.md index e35f5f9d7..f3c089931 100644 --- a/docs/hands_on/02_workflow.md +++ b/docs/hands_on/02_workflow.md @@ -15,12 +15,12 @@ Documentation for all software used in the workflow can be found at the followin - [STAR](http://labshare.cshl.edu/shares/gingeraslab/www-data/dobin/STAR/STAR.posix/doc/STARmanual.pdf) - [vcftools](https://vcftools.github.io/man_latest.html) - [GATK tools](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/index) - - [`SplitNCigarReads`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_rnaseq_SplitNCigarReads.php) - - [`BaseRecalibrator`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_bqsr_BaseRecalibrator.php) - - [`PrintReads`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_readutils_PrintReads.php) - - [`HaplotypeCaller`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_haplotypecaller_HaplotypeCaller.php) - - [`VariantFiltration`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_filters_VariantFiltration.php) - - [`ASEReadCounter`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_rnaseq_ASEReadCounter.php) + - [`SplitNCigarReads`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_rnaseq_SplitNCigarReads.php) + - [`BaseRecalibrator`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_bqsr_BaseRecalibrator.php) + - [`PrintReads`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_readutils_PrintReads.php) + - [`HaplotypeCaller`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_haplotypecaller_HaplotypeCaller.php) + - [`VariantFiltration`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_filters_VariantFiltration.php) + - [`ASEReadCounter`](https://software.broadinstitute.org/gatk/gatkdocs/3.6-0/org_broadinstitute_gatk_tools_walkers_rnaseq_ASEReadCounter.php) ## Pipeline steps diff --git a/docs/hands_on/04_implementation.md b/docs/hands_on/04_implementation.md index c11455446..24a567147 100644 --- a/docs/hands_on/04_implementation.md +++ b/docs/hands_on/04_implementation.md @@ -7,13 +7,13 @@ A first step in any pipeline is to prepare the input data. You will find all the There are four data inputs that we will use in this tutorial: 1. **Genome File** (`data/genome.fa`) - - Human chromosome 22 in FASTA file format + - Human chromosome 22 in FASTA file format 2. **Read Files** (`data/reads/`) - - Sample ENCSR000COQ1: 76bp paired-end reads (`ENCSR000COQ1_1.fq.gz` and `ENCSR000COQ1_2.fq.gz`). + - Sample ENCSR000COQ1: 76bp paired-end reads (`ENCSR000COQ1_1.fq.gz` and `ENCSR000COQ1_2.fq.gz`). 3. **Variants File** (`data/known_variants.vcf.gz`) - - Known variants, gzipped as a Variant Calling File (VCF) format. + - Known variants, gzipped as a Variant Calling File (VCF) format. 4. **Blacklist File** (`data/blacklist.bed`) - - Genomic locations which are known to produce artifacts and spurious variants in Browser Extensible Data (BED) format. + - Genomic locations which are known to produce artifacts and spurious variants in Browser Extensible Data (BED) format. ## Input parameters @@ -406,8 +406,8 @@ The next process has the following structure: - **Name**: `prepare_vcf_file` - **Command**: create a filtered and recoded set of variants - **Input**: - - the variants file - - the blacklisted regions file + - the variants file + - the blacklisted regions file - **Output**: a tuple containing the filtered/recoded VCF file and the tab index (TBI) file. !!! exercise "Problem #5" @@ -527,9 +527,9 @@ The process has the following structure: - **Name**: `rnaseq_mapping_star` - **Command**: mapping of the RNA-Seq reads using STAR - **Input**: - - the genome fasta file - - the STAR genome index - - a tuple containing the replicate id and paired read files + - the genome fasta file + - the STAR genome index + - a tuple containing the replicate id and paired read files - **Output**: a tuple containing replicate id, aligned bam file & aligned bam file index !!! Exercise "Problem #6" @@ -728,10 +728,10 @@ The next process has the following structure: - **Name**: `rnaseq_gatk_splitNcigar` - **Command**: split reads on Ns in CIGAR string using GATK - **Input**: - - the genome fasta file - - the genome index made with samtools - - the genome dictionary made with picard - - a tuple containing replicate id, aligned bam file and aligned bam file index from the STAR mapping + - the genome fasta file + - the genome index made with samtools + - the genome dictionary made with picard + - a tuple containing replicate id, aligned bam file and aligned bam file index from the STAR mapping - **Output**: a tuple containing the replicate id, the split bam file and the split bam index file !!! exercise "Problem #7" @@ -888,11 +888,11 @@ The next process has the following structure: - **Name**: `rnaseq_gatk_recalibrate` - **Command**: recalibrate reads from each replicate using GATK - **Input** - - the genome fasta file - - the genome index made with samtools - - the genome dictionary made with picard - - a tuple containing replicate id, aligned bam file and aligned bam file index from process 3 - - a tuple containing the filtered/recoded VCF file and the tab index (TBI) file from process 1D + - the genome fasta file + - the genome index made with samtools + - the genome dictionary made with picard + - a tuple containing replicate id, aligned bam file and aligned bam file index from process 3 + - a tuple containing the filtered/recoded VCF file and the tab index (TBI) file from process 1D - **Output**: a tuple containing the sample id, the unique bam file and the unique bam index file !!! exercise "Problem #8" @@ -1097,10 +1097,10 @@ The next process has the following structure: - **Name**: `rnaseq_call_variants` - **Command**: variant calling of each sample using GATK - **Input**: - - the genome fasta file - - the genome index made with samtools - - the genome dictionary made with picard - - a tuple containing replicate id, aligned bam file and aligned bam file index from process 4 + - the genome fasta file + - the genome index made with samtools + - the genome dictionary made with picard + - a tuple containing replicate id, aligned bam file and aligned bam file index from process 4 - **Output**: a tuple containing the sample id the resulting variant calling file (vcf) !!! exercise "Problem #9" @@ -1292,19 +1292,19 @@ We must process the VCF result to prepare variants file for allele specific expr You should implement two processes having the following structure: - _1st process_ - - **Name**: `post_process_vcf` - - **Command**: post-process the variant calling file (vcf) of each sample - - **Input**: - - tuple containing the sample ID and vcf file - - a tuple containing the filtered/recoded VCF file and the tab index (TBI) file from process 1D - - **Output**: a tuple containing the sample id, the variant calling file (vcf) and a file containing common SNPs + - **Name**: `post_process_vcf` + - **Command**: post-process the variant calling file (vcf) of each sample + - **Input**: + - tuple containing the sample ID and vcf file + - a tuple containing the filtered/recoded VCF file and the tab index (TBI) file from process 1D + - **Output**: a tuple containing the sample id, the variant calling file (vcf) and a file containing common SNPs - _2nd process_ - - **Name**: `prepare_vcf_for_ase` - - **Command**: prepare the VCF for allele specific expression (ASE) and generate a figure in R. - - **Input**: a tuple containing the sample id, the variant calling file (vcf) and a file containing common SNPs - - **Output**: - - a tuple containing the sample ID and known SNPs in the sample for ASE - - a figure of the SNPs generated in R as a PDF file + - **Name**: `prepare_vcf_for_ase` + - **Command**: prepare the VCF for allele specific expression (ASE) and generate a figure in R. + - **Input**: a tuple containing the sample id, the variant calling file (vcf) and a file containing common SNPs + - **Output**: + - a tuple containing the sample ID and known SNPs in the sample for ASE + - a figure of the SNPs generated in R as a PDF file !!! exercise "Problem #10" @@ -1618,10 +1618,10 @@ The next process has the following structure: - **Name**: `ASE_knownSNPs` - **Command**: calculate allele counts at a set of positions with GATK tools - **Input**: - - genome fasta file - - genome index file from samtools - - genome dictionary file - - the `grouped_vcf_bam_bai_ch` channel + - genome fasta file + - genome index file from samtools + - genome dictionary file + - the `grouped_vcf_bam_bai_ch` channel - **Output**: the allele specific expression file (`ASE.tsv`) !!! exercise "Problem #12" diff --git a/docs/hands_on/index.md b/docs/hands_on/index.md index 3a5857051..b0f42f00c 100644 --- a/docs/hands_on/index.md +++ b/docs/hands_on/index.md @@ -1,7 +1,7 @@ --- title: Introduction hide: - - toc + - toc --- # Nextflow course - Hands-on diff --git a/docs/hello_nextflow/05_hello_operators.md b/docs/hello_nextflow/05_hello_operators.md index 946c6640e..fcceff174 100644 --- a/docs/hello_nextflow/05_hello_operators.md +++ b/docs/hello_nextflow/05_hello_operators.md @@ -561,15 +561,15 @@ With a concrete example, it looks like this: 1. We have three files: - `[A.ext, B.ext, C.ext]` + `[A.ext, B.ext, C.ext]` 2. The closure modifies each one to create the strings: - `"-V A.ext", "-V B.ext", "-V C.ext"` + `"-V A.ext", "-V B.ext", "-V C.ext"` 3. The `.join(' ')` operation generates the final string: - `"-V A.ext -V B.ext -V C.ext"` + `"-V A.ext -V B.ext -V C.ext"` Once we have that string, we can assign it to a local variable, `gvcfs_line`, defined with the `def` keyword: diff --git a/docs/hello_nextflow/06_hello_config.md b/docs/hello_nextflow/06_hello_config.md index d6f672f99..a780aaef4 100644 --- a/docs/hello_nextflow/06_hello_config.md +++ b/docs/hello_nextflow/06_hello_config.md @@ -38,17 +38,17 @@ hello-config - **`nextflow.config`** is a copy of the original `nextflow.config` file from the `hello-nextflow` directory, one level up (where we've been working so far). Whenever there is a file named `nextflow.config` in the current directory, Nextflow will automatically load configuration from it. The one we have been using contains the following lines: - ```console title="nextflow.config" linenums="1" - docker.fixOwnership = true - docker.enabled = true - ``` + ```console title="nextflow.config" linenums="1" + docker.fixOwnership = true + docker.enabled = true + ``` - The `docker.fixOwnership = true` line is not really interesting. - It's a workaround for an issue that sometimes occur with containerized tools that set the wrong permissions on the files they write (which is the case with GenomicsDBImport in the GATK container image in our workflow). + The `docker.fixOwnership = true` line is not really interesting. + It's a workaround for an issue that sometimes occur with containerized tools that set the wrong permissions on the files they write (which is the case with GenomicsDBImport in the GATK container image in our workflow). - The `docker.enabled = true` line is what we care about here. - It specifies that Nextflow should use Docker to run process calls that specify a container image. - We're going to be playing with that shortly. + The `docker.enabled = true` line is what we care about here. + It specifies that Nextflow should use Docker to run process calls that specify a container image. + We're going to be playing with that shortly. !!!note @@ -956,13 +956,13 @@ The values are the same input files and reference files we've been using so far. ```json title="demo-params.json" linenums="1" { - "reads_bam": "data/sample_bams.txt", - "outdir": "results_genomics", - "reference": "data/ref/ref.fasta", - "reference_index": "data/ref/ref.fasta.fai", - "reference_dict": "data/ref/ref.dict", - "intervals": "data/ref/intervals.bed", - "cohort_name": "family_trio" + "reads_bam": "data/sample_bams.txt", + "outdir": "results_genomics", + "reference": "data/ref/ref.fasta", + "reference_index": "data/ref/ref.fasta.fai", + "reference_dict": "data/ref/ref.dict", + "intervals": "data/ref/intervals.bed", + "cohort_name": "family_trio" } ``` diff --git a/docs/hello_nextflow/09_hello_nf-core.md b/docs/hello_nextflow/09_hello_nf-core.md index 0f8b01861..c0e3ade52 100644 --- a/docs/hello_nextflow/09_hello_nf-core.md +++ b/docs/hello_nextflow/09_hello_nf-core.md @@ -297,24 +297,24 @@ Template features can be flexibly included or excluded at the time of creation, 3. Select **Custom** on the Choose pipeline type screen 4. Enter your pipeline details, replacing < YOUR NAME > with your own name, then select **Next** - - **GitHub organisation:** myorg - - **Workflow name:** myfirstpipeline - - **A short description of your pipeline:** My first pipeline - - **Name of the main author / authors:** < YOUR NAME > + - **GitHub organisation:** myorg + - **Workflow name:** myfirstpipeline + - **A short description of your pipeline:** My first pipeline + - **Name of the main author / authors:** < YOUR NAME > 5. On the Template features screen, turn **off**: - - `Use a GitHub repository` - - `Add Github CI tests` - - `Use reference genomes` - - `Add Github badges` - - `Include citations` - - `Include a gitpod environment` - - `Include GitHub Codespaces` - - `Use fastqc` - - `Add a changelog` - - `Support Microsoft Teams notifications` - - `Support Slack notifications` + - `Use a GitHub repository` + - `Add Github CI tests` + - `Use reference genomes` + - `Add Github badges` + - `Include citations` + - `Include a gitpod environment` + - `Include GitHub Codespaces` + - `Use fastqc` + - `Add a changelog` + - `Support Microsoft Teams notifications` + - `Support Slack notifications` 6. Select **Finish** on the Final details screen 7. Wait for the pipeline to be created, then select **Continue** diff --git a/docs/hello_nextflow/index.md b/docs/hello_nextflow/index.md index 02797df1e..cfcb9c8bc 100644 --- a/docs/hello_nextflow/index.md +++ b/docs/hello_nextflow/index.md @@ -1,7 +1,7 @@ --- title: Hello Nextflow hide: - - toc + - toc --- # Hello Nextflow diff --git a/docs/hello_nextflow/index.pt.md b/docs/hello_nextflow/index.pt.md index ce1166743..761a67fd6 100644 --- a/docs/hello_nextflow/index.pt.md +++ b/docs/hello_nextflow/index.pt.md @@ -1,7 +1,7 @@ --- title: Hello Nextflow hide: - - toc + - toc --- # Hello Nextflow diff --git a/docs/hello_nextflow/seqera/02_run_with_launchpad.md b/docs/hello_nextflow/seqera/02_run_with_launchpad.md index 4b2f41fb1..2b3786895 100644 --- a/docs/hello_nextflow/seqera/02_run_with_launchpad.md +++ b/docs/hello_nextflow/seqera/02_run_with_launchpad.md @@ -62,10 +62,10 @@ Click on the task to see the task details: 1. Find the following details on the "About" page for the the task you're inspecting: - - [ ] How long did the task script run (not including scheduling time)? - - [ ] How many CPUs were allocated to the task? - - [ ] What was the virtual machine type that the task ran on? - - [ ] What was the estimated cost of the task? + - [ ] How long did the task script run (not including scheduling time)? + - [ ] How many CPUs were allocated to the task? + - [ ] What was the virtual machine type that the task ran on? + - [ ] What was the estimated cost of the task? 2. Explore the Execution Log tab. What information is available here? diff --git a/docs/index.es.md b/docs/index.es.md index 0b6b221b1..3d1164d3d 100644 --- a/docs/index.es.md +++ b/docs/index.es.md @@ -2,9 +2,9 @@ title: Nextflow Training description: ¡Bienvenido al portal de capacitación de la comunidad Nextflow! hide: - - navigation - - toc - - footer + - navigation + - toc + - footer --- # Capacitación en Nextflow diff --git a/docs/index.fr.md b/docs/index.fr.md index 0fe324968..2e2590c06 100644 --- a/docs/index.fr.md +++ b/docs/index.fr.md @@ -2,9 +2,9 @@ title: Formation Nextflow description: Bienvenue sur le portail de formation de la communauté Nextflow ! hide: - - navigation - - toc - - footer + - navigation + - toc + - footer --- # Formation Nextflow diff --git a/docs/index.md b/docs/index.md index 8ef2b9010..d1a01d491 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,9 +2,9 @@ title: Nextflow Training description: Welcome to the Nextflow community training portal! hide: - - navigation - - toc - - footer + - navigation + - toc + - footer --- # Nextflow Training diff --git a/docs/index.pt.md b/docs/index.pt.md index 72286bfeb..45eafbabe 100644 --- a/docs/index.pt.md +++ b/docs/index.pt.md @@ -2,9 +2,9 @@ title: Portal de Treinamento Nextflow description: Seja bem vindo ao portal de treinamento da comunidade do Nextflow! hide: - - navigation - - toc - - footer + - navigation + - toc + - footer --- # Treinamentos Nextflow diff --git a/docs/nf_customize/02_nf-core.md b/docs/nf_customize/02_nf-core.md index 00fe254c5..bbda8f7ad 100644 --- a/docs/nf_customize/02_nf-core.md +++ b/docs/nf_customize/02_nf-core.md @@ -11,17 +11,17 @@ One of the key benefits of nf-core is that it promotes open development, testing **Key Features of nf-core pipelines** - **Documentation** - - nf-core pipelines have extensive documentation covering installation, usage, and description of output files to ensure that you won't be left in the dark. + - nf-core pipelines have extensive documentation covering installation, usage, and description of output files to ensure that you won't be left in the dark. - **CI Testing** - - Every time a change is made to the pipeline code, nf-core pipelines use continuous-integration testing to ensure that nothing has broken. + - Every time a change is made to the pipeline code, nf-core pipelines use continuous-integration testing to ensure that nothing has broken. - **Stable Releases** - - nf-core pipelines use GitHub releases to tag stable versions of the code and software, making pipeline runs totally reproducible. + - nf-core pipelines use GitHub releases to tag stable versions of the code and software, making pipeline runs totally reproducible. - **Packaged software** - - Pipeline dependencies are automatically downloaded and handled using Docker, Singularity, Conda, or other software management tools. There is no need for any software installations. + - Pipeline dependencies are automatically downloaded and handled using Docker, Singularity, Conda, or other software management tools. There is no need for any software installations. - **Portable and reproducible** - - nf-core pipelines follow best practices to ensure maximum portability and reproducibility. The large community makes the pipelines exceptionally well-tested and easy to execute. + - nf-core pipelines follow best practices to ensure maximum portability and reproducibility. The large community makes the pipelines exceptionally well-tested and easy to execute. - **Cloud-ready** - - nf-core pipelines are tested on AWS after every major release. You can even browse results live on the website and use outputs for your own benchmarking. + - nf-core pipelines are tested on AWS after every major release. You can even browse results live on the website and use outputs for your own benchmarking. nf-core is published in Nature Biotechnology: [Nat Biotechnol 38, 276–278 (2020). Nature Biotechnology](https://www.nature.com/articles/s41587-020-0439-x). An updated preprint is available at [bioRxiv](https://www.biorxiv.org/content/10.1101/2024.05.10.592912v1). diff --git a/docs/nf_customize/03_execution.md b/docs/nf_customize/03_execution.md index de6933278..d4c170a5c 100644 --- a/docs/nf_customize/03_execution.md +++ b/docs/nf_customize/03_execution.md @@ -17,18 +17,18 @@ The [`nf-core/demo`](https://nf-co.re/demo/) pipeline consists of three processe [`nf-core/demo`](https://nf-co.re/demo/) takes a samplesheet that contains paths to FASTQ files as an input and will produce four output folders with logs and reports: - `fastqc/` - - `*_fastqc.html`: FastQC report containing quality metrics. - - `*_fastqc.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images. + - `*_fastqc.html`: FastQC report containing quality metrics. + - `*_fastqc.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images. - `fq/` - - `*.fastp.html`: Trimmed fq files. + - `*.fastp.html`: Trimmed fq files. - `multiqc/` - - `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. - - `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. - - `multiqc_plots/`: directory containing static images from the report in various formats. + - `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. + - `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. + - `multiqc_plots/`: directory containing static images from the report in various formats. - `pipeline_info/` - - Reports generated by Nextflow - - Reports generated by nf-core - - Parameters file + - Reports generated by Nextflow + - Reports generated by nf-core + - Parameters file You can view the code for this pipeline on the [`nf-core/demo` GitHub repository](https://github.com/nf-core/demo). diff --git a/docs/nf_customize/04_config.md b/docs/nf_customize/04_config.md index 1eaafe7e8..24ef31101 100644 --- a/docs/nf_customize/04_config.md +++ b/docs/nf_customize/04_config.md @@ -57,23 +57,23 @@ All parameters have a default configuration that is defined using the `nextflow. There are also several `includeConfig` statements in the `nextflow.config` file that are used to include additional `.config` files from the `conf/` folder. Each additional `.config` file contains categorized configuration information for your pipeline execution, some of which can be optionally included: - `base.config` - - Included by the pipeline by default. - - Generous resource allocations using labels. - - Does not specify any method for software management and expects software to be available (or specified elsewhere). + - Included by the pipeline by default. + - Generous resource allocations using labels. + - Does not specify any method for software management and expects software to be available (or specified elsewhere). - `igenomes.config` - - Included by the pipeline by default. - - Default configuration to access reference files stored on [AWS iGenomes](https://ewels.github.io/AWS-iGenomes/). + - Included by the pipeline by default. + - Default configuration to access reference files stored on [AWS iGenomes](https://ewels.github.io/AWS-iGenomes/). - `igenomes_ignored.config` - - Empty genomes dictionary to use when igenomes is ignored. + - Empty genomes dictionary to use when igenomes is ignored. - `modules.config` - - Included by the pipeline by default. - - Module-specific configuration options (both mandatory and optional). + - Included by the pipeline by default. + - Module-specific configuration options (both mandatory and optional). - `test.config` - - Only included if specified as a profile. - - A configuration profile to test the pipeline with a small test dataset. + - Only included if specified as a profile. + - A configuration profile to test the pipeline with a small test dataset. - `test_full.config` - - Only included if specified as a profile. - - A configuration profile to test the pipeline with a full-size test dataset. + - Only included if specified as a profile. + - A configuration profile to test the pipeline with a full-size test dataset. !!! note @@ -82,9 +82,9 @@ There are also several `includeConfig` statements in the `nextflow.config` file Profiles used by nf-core pipelines can be broadly categorized into two groups: - **Software management profiles** - - Profiles for the management of software using software management tools, for example, `docker`, `singularity`, and `conda`. + - Profiles for the management of software using software management tools, for example, `docker`, `singularity`, and `conda`. - **Test profiles** - - Profiles to execute the pipeline with a standardized set of test data and parameters, for example, `test` and `test_full`. + - Profiles to execute the pipeline with a standardized set of test data and parameters, for example, `test` and `test_full`. nf-core pipelines are required to define software containers and environments that can be activated using profiles. Although it is possible to run the pipelines with software installed by other methods (e.g., environment modules or manual installation), using Docker or Singularity is more sharable, convenient, and reproducible. @@ -103,8 +103,8 @@ Nextflow will also look for files that are external to the pipeline project dire - The config file `$HOME/.nextflow/config` - A config file named `nextflow.config` in your current directory - Custom files specified using the command line - - A parameter file that is provided using the `-params-file` option - - A config file that are provided using the `-c` option + - A parameter file that is provided using the `-params-file` option + - A config file that are provided using the `-c` option **You do not need to use all of these files to run your pipeline.** @@ -114,9 +114,9 @@ Parameter files are `.json` files that can contain an unlimited number of parame ```json title="my-params.json" linenums="1" { - "": 1, - "": "", - "": true + "": 1, + "": "", + "": true } ``` diff --git a/docs/nf_customize/05_tools.md b/docs/nf_customize/05_tools.md index 82ff170e5..802e14c95 100644 --- a/docs/nf_customize/05_tools.md +++ b/docs/nf_customize/05_tools.md @@ -102,14 +102,14 @@ nf-core pipelines download The download tool will interactively prompt you for the required information is no arguments are supplied. Each prompt option has a flag and if all flags are supplied then it will run without a request for any additional user input: - **Pipeline name** - - Name of pipeline you would like to download. + - Name of pipeline you would like to download. - **Pipeline revision** - - The revision you would like to download. + - The revision you would like to download. - **Pull containers** - - If you would like to download Singularity images. - - The path to a folder where you would like to store these images if you have not set your `NXF_SINGULARITY_CACHEDIR`. + - If you would like to download Singularity images. + - The path to a folder where you would like to store these images if you have not set your `NXF_SINGULARITY_CACHEDIR`. - **Choose compression type** - - The compression type for Singularity images. + - The compression type for Singularity images. Alternatively, you could build your own execution command with the command line options. diff --git a/docs/nf_develop/1_02_create.md b/docs/nf_develop/1_02_create.md index 1eb1ccc77..57da74e6d 100644 --- a/docs/nf_develop/1_02_create.md +++ b/docs/nf_develop/1_02_create.md @@ -229,42 +229,42 @@ The `.nf-core.yml` file is used to specify the repository type and manage lintin ```yml title=".nf-core.yml" linenums="1" bump_version: null lint: - files_exist: - - CODE_OF_CONDUCT.md - - assets/nf-core-myfirstpipeline_logo_light.png - - docs/images/nf-core-myfirstpipeline_logo_light.png - - docs/images/nf-core-myfirstpipeline_logo_dark.png - - .github/ISSUE_TEMPLATE/config.yml - - .github/workflows/awstest.yml - - .github/workflows/awsfulltest.yml - files_unchanged: - - CODE_OF_CONDUCT.md - - assets/nf-core-myfirstpipeline_logo_light.png - - docs/images/nf-core-myfirstpipeline_logo_light.png - - docs/images/nf-core-myfirstpipeline_logo_dark.png - - .github/ISSUE_TEMPLATE/bug_report.yml - multiqc_config: - - report_comment - nextflow_config: - - manifest.name - - manifest.homePage - - validation.help.beforeText - - validation.help.afterText - - validation.summary.beforeText - - validation.summary.afterText + files_exist: + - CODE_OF_CONDUCT.md + - assets/nf-core-myfirstpipeline_logo_light.png + - docs/images/nf-core-myfirstpipeline_logo_light.png + - docs/images/nf-core-myfirstpipeline_logo_dark.png + - .github/ISSUE_TEMPLATE/config.yml + - .github/workflows/awstest.yml + - .github/workflows/awsfulltest.yml + files_unchanged: + - CODE_OF_CONDUCT.md + - assets/nf-core-myfirstpipeline_logo_light.png + - docs/images/nf-core-myfirstpipeline_logo_light.png + - docs/images/nf-core-myfirstpipeline_logo_dark.png + - .github/ISSUE_TEMPLATE/bug_report.yml + multiqc_config: + - report_comment + nextflow_config: + - manifest.name + - manifest.homePage + - validation.help.beforeText + - validation.help.afterText + - validation.summary.beforeText + - validation.summary.afterText nf_core_version: 3.0.1 org_path: null repository_type: pipeline template: - author: Chris - description: My first pipeline - force: true - is_nfcore: false - name: myfirstpipeline - org: myorg - outdir: . - skip_features: [] - version: 1.0.0dev + author: Chris + description: My first pipeline + force: true + is_nfcore: false + name: myfirstpipeline + org: myorg + outdir: . + skip_features: [] + version: 1.0.0dev update: null ``` diff --git a/docs/nf_develop/extra.md b/docs/nf_develop/extra.md index 9e10eda1a..c5c17c30e 100644 --- a/docs/nf_develop/extra.md +++ b/docs/nf_develop/extra.md @@ -84,8 +84,8 @@ However, if you wish to remove or modify this file you would need to ignore this ```yml title=".nf-core.yml" repository_type: pipeline lint: - files_unchanged: - - CODE_OF_CONDUCT.md + files_unchanged: + - CODE_OF_CONDUCT.md ``` If you run `nf-core lint` again, you would see that the test is now ignored and there are no more failed tests. diff --git a/hands-on/bin/gghist.R b/hands-on/bin/gghist.R index 603e6ee2e..107aecc8d 100755 --- a/hands-on/bin/gghist.R +++ b/hands-on/bin/gghist.R @@ -12,92 +12,92 @@ suppressPackageStartupMessages(library("optparse")) option_list <- list( make_option(c("-i", "--input"), default="stdin", - help="File or stdin [default=%default]"), + help="File or stdin [default=%default]"), make_option(c("-o", "--output"), default="gghist.out.pdf", - help="Output file name [default=%default]"), + help="Output file name [default=%default]"), make_option(c("-x", "--x_axis"), default=1, - help="Index of the column with values, or labels if you already have counts [default=%default]"), + help="Index of the column with values, or labels if you already have counts [default=%default]"), make_option(c("-y", "--y_axis"), default=NULL, type="integer", - help="Index of the column with values, in case x provides counts. This will plot identity. Leave empty for default histogram [default=%default]"), + help="Index of the column with values, in case x provides counts. This will plot identity. Leave empty for default histogram [default=%default]"), make_option(c("--header"), action="store_true", default=FALSE, - help="Use this if the input has a header [default=%default]"), + help="Use this if the input has a header [default=%default]"), make_option(c("--position"), default='dodge', - help="Position for histogram [default=%default]"), + help="Position for histogram [default=%default]"), make_option(c("--scale_x_log10"), action="store_true", default=FALSE, - help="log10-transform x scale [default=%default]"), + help="log10-transform x scale [default=%default]"), make_option(c("--scale_y_log10"), action="store_true", default=FALSE, - help="log10-transform y scale [default=%default]"), + help="log10-transform y scale [default=%default]"), make_option(c("--y_title"), type="character", default="count", - help="Title for the y axis [default=%default]"), + help="Title for the y axis [default=%default]"), make_option(c("--x_title"), type="character", default=NULL, - help="Title for the x axis [default=%default]"), + help="Title for the x axis [default=%default]"), make_option(c("--title"), type="character", default=NULL, - help="Title for the plot [default=%default]"), + help="Title for the plot [default=%default]"), make_option(c("-f", "--fill"), default="aquamarine", - help="choose the color which you want to fill the histogram with"), + help="choose the color which you want to fill the histogram with"), make_option(c("-c", "--color"), default="white", - help="choose the color which you want to contour the histogram with"), + help="choose the color which you want to contour the histogram with"), make_option(c("-F", "--fill_by"), type='numeric', - help="the column index with the factor to fill by. Leave empty for no factor."), + help="the column index with the factor to fill by. Leave empty for no factor."), make_option(c("-C", "--color_by"), type='numeric', - help="the column index with the factor to color by. Leave empty for no factor."), + help="the column index with the factor to color by. Leave empty for no factor."), make_option(c("-A", "--alpha_by"), type='numeric', - help="the column index with the factor to fill by. Leave empty for no factor."), + help="the column index with the factor to fill by. Leave empty for no factor."), make_option(c("-P", "--palette"), help='File with colors for the lines. Leave empty to use even color spacing'), make_option(c("--sort"), action="store_true", default=FALSE, - help="Sort the columns in decreasing order [default=%default]"), + help="Sort the columns in decreasing order [default=%default]"), make_option(c("--facet_by"), type='numeric', - help="the column index with the factor to facet by. Leave empty for no factor."), + help="the column index with the factor to facet by. Leave empty for no factor."), make_option(c("--facet_scale"), type='character', default="fixed", - help="the scale of faceting: [default=%default]"), + help="the scale of faceting: [default=%default]"), make_option(c("--facet_nrow"), type="numeric", - help="Number of row for faceting. Leave empty for auto [default=%default]"), + help="Number of row for faceting. Leave empty for auto [default=%default]"), make_option(c("-W", "--width"), default=7, - help="width of the plot in inches. [default=%default]"), + help="width of the plot in inches. [default=%default]"), make_option(c("-H", "--height"), default=5, - help="height of the plot in inches. [default=%default]"), + help="height of the plot in inches. [default=%default]"), make_option(c("-B", "--base_size"), default=20, - help="BAse size. [default=%default]"), + help="BAse size. [default=%default]"), make_option(c("-b", "--binwidth"), type="double", - help="Specify binwidth. Leave empty for default"), + help="Specify binwidth. Leave empty for default"), make_option(c("--flip"), action="store_true", default=FALSE, - help="Flip coordinates [default=%default]"), + help="Flip coordinates [default=%default]"), make_option(c("-v", "--verbose"), action="store_true", default=FALSE, - help="if you want more output [default=%default]") + help="if you want more output [default=%default]") ) parser <- OptionParser( - usage = "%prog [options] file", - option_list=option_list, - description = "Reads the values on the first column and outputs a histogram" + usage = "%prog [options] file", + option_list=option_list, + description = "Reads the values on the first column and outputs a histogram" ) arguments <- parse_args(parser, positional_arguments = TRUE) opt <- arguments$options @@ -136,20 +136,20 @@ if (!is.null(opt$color_by)) {C_col = colnames(df)[opt$color_by]} if (!is.null(opt$alpha_by)) {A_col = colnames(df)[opt$alpha_by]} if (!is.null(opt$fill_by)) { - if (F_col == x_col) { - df[paste(x_col, "fill", sep=".")] = df[,F_col] - F_col = paste(x_col, "fill", sep=".") - } + if (F_col == x_col) { + df[paste(x_col, "fill", sep=".")] = df[,F_col] + F_col = paste(x_col, "fill", sep=".") + } } # Read palette if (!is.null(opt$palette)) { - palette = as.character(read.table(opt$palette, h=F, comment.char="%")$V1) + palette = as.character(read.table(opt$palette, h=F, comment.char="%")$V1) } # Correct newlines if column is character if (is.character(df[,x_col])) { - df[,x_col] <- gsub("\\\\n", "\n", df[,x_col]) + df[,x_col] <- gsub("\\\\n", "\n", df[,x_col]) } #================ @@ -158,10 +158,10 @@ if (is.character(df[,x_col])) { theme_set(theme_bw(base_size=opt$base_size)) theme_update( - axis.text.x=element_text(angle=45, hjust=1, vjust=1), - legend.key = element_rect(color='white'), - panel.grid.minor = element_blank(), - panel.grid.major = element_blank() + axis.text.x=element_text(angle=45, hjust=1, vjust=1), + legend.key = element_rect(color='white'), + panel.grid.minor = element_blank(), + panel.grid.major = element_blank() ) @@ -169,15 +169,15 @@ theme_update( # Sort bars by abundance if (opt$sort) { - oldLev = levels(factor(df[,x_col])) - if (is.null(opt$y_axis)) { - lev = levels(as.factor(df[,x_col]))[order(table(df[,x_col]), decreasing=TRUE)] - df[x_col] <- factor(df[,x_col], levels=lev) - } - if (!is.null(opt$y_axis)) { - lev = df[order(df[,y_col], decreasing=TRUE), x_col] # have to remove duplicated x - df[x_col] <- factor(df[,x_col], levels=lev) - } + oldLev = levels(factor(df[,x_col])) + if (is.null(opt$y_axis)) { + lev = levels(as.factor(df[,x_col]))[order(table(df[,x_col]), decreasing=TRUE)] + df[x_col] <- factor(df[,x_col], levels=lev) + } + if (!is.null(opt$y_axis)) { + lev = df[order(df[,y_col], decreasing=TRUE), x_col] # have to remove duplicated x + df[x_col] <- factor(df[,x_col], levels=lev) + } } # Params @@ -192,7 +192,7 @@ geom_params$binwidth = opt$binwidth stat = "bin" if (is.factor(df[,x_col]) | is.character(df[,x_col])) { - stat = "count" + stat = "count" } # Stat parameters @@ -200,8 +200,8 @@ stat = ifelse(is.null(opt$y_axis), stat, "identity") stat_params = list( - right=TRUE, - include.lowest=TRUE + right=TRUE, + include.lowest=TRUE ) mapping = list() @@ -209,27 +209,27 @@ mapping = list() mapping <- modifyList(mapping, aes_string(x=x_col)) if (!is.null(opt$y_axis)) { - mapping <- modifyList(mapping, aes_string(y=y_col)) + mapping <- modifyList(mapping, aes_string(y=y_col)) } # specify fill column if (!is.null(opt$fill_by)) { - mapping <- modifyList(mapping, aes_string(fill=F_col, order=rev(F_col))) + mapping <- modifyList(mapping, aes_string(fill=F_col, order=rev(F_col))) } else { - geom_params$fill = opt$fill + geom_params$fill = opt$fill } # specify color column if (!is.null(opt$color_by)) { - mapping <- modifyList(mapping, aes_string(color=F_col, order=rev(F_col))) + mapping <- modifyList(mapping, aes_string(color=F_col, order=rev(F_col))) } else { - geom_params$color = opt$color + geom_params$color = opt$color } # specify alpha column if (!is.null(opt$alpha_by)) { - mapping <- modifyList(mapping, aes_string(alpha=A_col)) + mapping <- modifyList(mapping, aes_string(alpha=A_col)) } @@ -239,8 +239,8 @@ class(mapping) <- "uneval" histLayer <- layer( geom = "bar", params = geom_params, - position = opt$position, - mapping = mapping, + position = opt$position, + mapping = mapping, stat = stat ) @@ -249,36 +249,36 @@ histLayer <- layer( gp = ggplot(df) + histLayer if (!is.character(df[,x_col]) & !is.factor(df[,x_col])) { - avg = mean(df[,x_col], na.rm=TRUE) - med = median(df[,x_col], na.rm=TRUE) - gp = gp + geom_point(aes(x=avg, y=0), size=2) - gp = gp + geom_vline(xintercept=med, linetype=2) + avg = mean(df[,x_col], na.rm=TRUE) + med = median(df[,x_col], na.rm=TRUE) + gp = gp + geom_point(aes(x=avg, y=0), size=2) + gp = gp + geom_vline(xintercept=med, linetype=2) } # Fill scale if (!is.null(opt$fill_by)) { - if (!is.null(opt$palette)) { - gp = gp + scale_fill_manual(values=palette) - } else { - gp = gp + scale_fill_hue() - } + if (!is.null(opt$palette)) { + gp = gp + scale_fill_manual(values=palette) + } else { + gp = gp + scale_fill_hue() + } } # Color scale if (!is.null(opt$color_by)) { - if (!is.null(opt$palette)) { - gp = gp + scale_color_manual(values=palette) - } else { - gp = gp + scale_color_hue() - } + if (!is.null(opt$palette)) { + gp = gp + scale_color_manual(values=palette) + } else { + gp = gp + scale_color_hue() + } } if (!is.null(opt$alpha_by)) { - gp = gp + scale_alpha_discrete(range=rev(c(0.4,1))) + gp = gp + scale_alpha_discrete(range=rev(c(0.4,1))) } if (!is.null(opt$facet_by)) { - gp = gp + facet_wrap(facet_formula, scales=opt$facet_scale, nrow=opt$facet_nrow) + gp = gp + facet_wrap(facet_formula, scales=opt$facet_scale, nrow=opt$facet_nrow) } if (opt$scale_x_log10) {gp = gp + scale_x_log10()} @@ -292,7 +292,7 @@ gp = gp + labs(y=opt$y_title) gp = gp + coord_cartesian() if (opt$flip) { - gp = gp + coord_flip() + gp = gp + coord_flip() } #gp = gp + geom_density(aes_string(x=x_col)) diff --git a/hands-on/final_main.nf b/hands-on/final_main.nf index f029bf0fd..608f0d946 100755 --- a/hands-on/final_main.nf +++ b/hands-on/final_main.nf @@ -66,9 +66,9 @@ process prepare_star_genome_index { mkdir -p genome_dir STAR --runMode genomeGenerate \ - --genomeDir genome_dir \ - --genomeFastaFiles ${genome} \ - --runThreadN ${task.cpus} + --genomeDir genome_dir \ + --genomeFastaFiles ${genome} \ + --runThreadN ${task.cpus} """ } @@ -85,14 +85,14 @@ process prepare_vcf_file { output: tuple path("${variantsFile.baseName}.filtered.recode.vcf.gz"), - path("${variantsFile.baseName}.filtered.recode.vcf.gz.tbi") + path("${variantsFile.baseName}.filtered.recode.vcf.gz.tbi") script: """ vcftools --gzvcf ${variantsFile} -c \ - --exclude-bed ${blacklisted} \ - --recode | bgzip -c \ - > ${variantsFile.baseName}.filtered.recode.vcf.gz + --exclude-bed ${blacklisted} \ + --recode | bgzip -c \ + > ${variantsFile.baseName}.filtered.recode.vcf.gz tabix ${variantsFile.baseName}.filtered.recode.vcf.gz """ @@ -112,42 +112,42 @@ process rnaseq_mapping_star { output: tuple val(replicateId), - path('Aligned.sortedByCoord.out.bam'), - path('Aligned.sortedByCoord.out.bam.bai') + path('Aligned.sortedByCoord.out.bam'), + path('Aligned.sortedByCoord.out.bam.bai') script: """ # ngs-nf-dev Align reads to genome STAR --genomeDir ${genomeDir} \ - --readFilesIn ${reads} \ - --runThreadN ${task.cpus} \ - --readFilesCommand zcat \ - --outFilterType BySJout \ - --alignSJoverhangMin 8 \ - --alignSJDBoverhangMin 1 \ - --outFilterMismatchNmax 999 + --readFilesIn ${reads} \ + --runThreadN ${task.cpus} \ + --readFilesCommand zcat \ + --outFilterType BySJout \ + --alignSJoverhangMin 8 \ + --alignSJDBoverhangMin 1 \ + --outFilterMismatchNmax 999 # 2nd pass (improve alignments using table of splice # junctions and create a new index) mkdir -p genomeDir STAR --runMode genomeGenerate \ - --genomeDir genomeDir \ - --genomeFastaFiles ${genome} \ - --sjdbFileChrStartEnd SJ.out.tab \ - --sjdbOverhang 75 \ - --runThreadN ${task.cpus} + --genomeDir genomeDir \ + --genomeFastaFiles ${genome} \ + --sjdbFileChrStartEnd SJ.out.tab \ + --sjdbOverhang 75 \ + --runThreadN ${task.cpus} # Final read alignments STAR --genomeDir genomeDir \ - --readFilesIn ${reads} \ - --runThreadN ${task.cpus} \ - --readFilesCommand zcat \ - --outFilterType BySJout \ - --alignSJoverhangMin 8 \ - --alignSJDBoverhangMin 1 \ - --outFilterMismatchNmax 999 \ - --outSAMtype BAM SortedByCoordinate \ - --outSAMattrRGline ID:${replicateId} LB:library PL:illumina \ + --readFilesIn ${reads} \ + --runThreadN ${task.cpus} \ + --readFilesCommand zcat \ + --outFilterType BySJout \ + --alignSJoverhangMin 8 \ + --alignSJDBoverhangMin 1 \ + --outFilterMismatchNmax 999 \ + --outSAMtype BAM SortedByCoordinate \ + --outSAMattrRGline ID:${replicateId} LB:library PL:illumina \ PU:machine SM:GM12878 # Index the BAM file @@ -168,8 +168,8 @@ process rnaseq_gatk_splitNcigar { path index path genome_dict tuple val(replicateId), - path(bam), - path(bai) + path(bam), + path(bai) output: tuple val(replicateId), path('split.bam'), path('split.bai') @@ -178,12 +178,12 @@ process rnaseq_gatk_splitNcigar { """ # SplitNCigarReads and reassign mapping qualities java -jar /usr/gitc/GATK35.jar -T SplitNCigarReads \ - -R ${genome} -I ${bam} \ - -o split.bam \ - -rf ReassignOneMappingQuality \ - -RMQF 255 -RMQT 60 \ - -U ALLOW_N_CIGAR_READS \ - --fix_misencoded_quality_scores + -R ${genome} -I ${bam} \ + -o split.bam \ + -rf ReassignOneMappingQuality \ + -RMQF 255 -RMQT 60 \ + -U ALLOW_N_CIGAR_READS \ + --fix_misencoded_quality_scores """ } @@ -205,30 +205,30 @@ process rnaseq_gatk_recalibrate { output: tuple val(sampleId), - path("${replicateId}.final.uniq.bam"), - path("${replicateId}.final.uniq.bam.bai") + path("${replicateId}.final.uniq.bam"), + path("${replicateId}.final.uniq.bam.bai") script: sampleId = replicateId.replaceAll(/[12]$/,'') """ # Indel Realignment and Base Recalibration gatk3 -T BaseRecalibrator \ - --default_platform illumina \ - -cov ReadGroupCovariate \ - -cov QualityScoreCovariate \ - -cov CycleCovariate \ - -knownSites ${prepared_variants_file} \ - -cov ContextCovariate \ - -R ${genome} -I ${bam} \ - --downsampling_type NONE \ - -nct ${task.cpus} \ - -o final.rnaseq.grp + --default_platform illumina \ + -cov ReadGroupCovariate \ + -cov QualityScoreCovariate \ + -cov CycleCovariate \ + -knownSites ${prepared_variants_file} \ + -cov ContextCovariate \ + -R ${genome} -I ${bam} \ + --downsampling_type NONE \ + -nct ${task.cpus} \ + -o final.rnaseq.grp gatk3 -T PrintReads \ - -R ${genome} -I ${bam} \ - -BQSR final.rnaseq.grp \ - -nct ${task.cpus} \ - -o final.bam + -R ${genome} -I ${bam} \ + -BQSR final.rnaseq.grp \ + -nct ${task.cpus} \ + -o final.bam # Select only unique alignments, no multimaps (samtools view -H final.bam; samtools view final.bam | \ @@ -240,9 +240,9 @@ process rnaseq_gatk_recalibrate { } - /* - * Process 5: GATK Variant Calling - */ +/* +* Process 5: GATK Variant Calling +*/ process rnaseq_call_variants { container 'quay.io/broadinstitute/gotc-prod-gatk:1.0.0-4.1.8.0-1626439571' @@ -270,11 +270,11 @@ process rnaseq_call_variants { # Variant filtering java -jar /usr/gitc/GATK35.jar -T VariantFiltration \ - -R ${genome} -V output.gatk.vcf.gz \ - -window 35 -cluster 3 \ - -filterName FS -filter "FS > 30.0" \ - -filterName QD -filter "QD < 2.0" \ - -o final.vcf + -R ${genome} -V output.gatk.vcf.gz \ + -window 35 -cluster 3 \ + -filterName FS -filter "FS > 30.0" \ + -filterName QD -filter "QD < 2.0" \ + -o final.vcf """ } @@ -290,12 +290,12 @@ process post_process_vcf { input: tuple val(sampleId), path('final.vcf') tuple path('filtered.recode.vcf.gz'), - path('filtered.recode.vcf.gz.tbi') + path('filtered.recode.vcf.gz.tbi') output: tuple val(sampleId), - path('final.vcf'), - path('commonSNPs.diff.sites_in_files') + path('final.vcf'), + path('commonSNPs.diff.sites_in_files') script: ''' @@ -304,7 +304,7 @@ process post_process_vcf { if($dp>=8){print $_."\\n"};' > result.DP8.vcf vcftools --vcf result.DP8.vcf --gzdiff filtered.recode.vcf.gz --diff-site \ - --out commonSNPs + --out commonSNPs ''' } @@ -315,8 +315,8 @@ process prepare_vcf_for_ase { input: tuple val(sampleId), - path('final.vcf'), - path('commonSNPs.diff.sites_in_files') + path('final.vcf'), + path('commonSNPs.diff.sites_in_files') output: tuple val(sampleId), path('known_snps.vcf'), emit: vcf_for_ASE @@ -327,7 +327,7 @@ process prepare_vcf_for_ase { awk 'BEGIN{OFS="\t"} $4~/B/{print $1,$2,$3}' commonSNPs.diff.sites_in_files > test.bed vcftools --vcf final.vcf --bed test.bed --recode --keep-INFO-all \ - --stdout > known_snps.vcf + --stdout > known_snps.vcf grep -v '#' known_snps.vcf | awk -F '\\t' '{print $10}' \ | awk -F ':' '{print $2}' | perl -ne 'chomp($_); \ @@ -362,10 +362,10 @@ process ASE_knownSNPs { echo "${bam.join('\n')}" > bam.list java -jar /usr/gitc/GATK35.jar -R ${genome} \ - -T ASEReadCounter \ - -o ASE.tsv \ - -I bam.list \ - -sites ${vcf} + -T ASEReadCounter \ + -o ASE.tsv \ + -I bam.list \ + -sites ${vcf} """ } @@ -385,11 +385,11 @@ workflow { prepare_genome_picard.out, rnaseq_mapping_star.out) - rnaseq_gatk_recalibrate(params.genome, - prepare_genome_samtools.out, - prepare_genome_picard.out, - rnaseq_gatk_splitNcigar.out, - prepare_vcf_file.out) + rnaseq_gatk_recalibrate(params.genome, + prepare_genome_samtools.out, + prepare_genome_picard.out, + rnaseq_gatk_splitNcigar.out, + prepare_vcf_file.out) // New channel to aggregate bam from different replicates into sample level. rnaseq_gatk_recalibrate.out @@ -397,9 +397,9 @@ workflow { | set { recalibrated_samples } rnaseq_call_variants(params.genome, - prepare_genome_samtools.out, - prepare_genome_picard.out, - recalibrated_samples) + prepare_genome_samtools.out, + prepare_genome_picard.out, + recalibrated_samples) post_process_vcf(rnaseq_call_variants.out, prepare_vcf_file.out) @@ -412,7 +412,7 @@ workflow { .set { grouped_vcf_bam_bai_ch } ASE_knownSNPs(params.genome, - prepare_genome_samtools.out, - prepare_genome_picard.out, - grouped_vcf_bam_bai_ch) + prepare_genome_samtools.out, + prepare_genome_picard.out, + grouped_vcf_bam_bai_ch) } diff --git a/hello-nextflow/hello-modules/nextflow.config b/hello-nextflow/hello-modules/nextflow.config index 2efec7cb4..86142d64b 100644 --- a/hello-nextflow/hello-modules/nextflow.config +++ b/hello-nextflow/hello-modules/nextflow.config @@ -7,7 +7,7 @@ docker.fixOwnership = true params { // Primary input (file of input files, one per line) reads_bam = null - + // Output directory params.outdir = "results_genomics" @@ -45,8 +45,8 @@ profiles { // Primary input (file of input files, one per line) params.reads_bam = "data/sample_bams.txt" - // Output directory - params.outdir = "results_genomics" + // Output directory + params.outdir = "results_genomics" // Accessory files params.reference = "data/ref/ref.fasta" diff --git a/hello-nextflow/hello-nf-core/data/sequencer_samplesheet.csv b/hello-nextflow/hello-nf-core/data/sequencer_samplesheet.csv index 3d02bafef..f2c3b7805 100644 --- a/hello-nextflow/hello-nf-core/data/sequencer_samplesheet.csv +++ b/hello-nextflow/hello-nf-core/data/sequencer_samplesheet.csv @@ -2,4 +2,4 @@ sample,sequencer,fastq_1,fastq_2 SAMPLE1_PE,sequencer1,https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/illumina/amplicon/sample1_R1.fastq.gz,https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/illumina/amplicon/sample1_R2.fastq.gz SAMPLE2_PE,sequencer2,https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/illumina/amplicon/sample2_R1.fastq.gz,https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/illumina/amplicon/sample2_R2.fastq.gz SAMPLE3_SE,sequencer3,https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/illumina/amplicon/sample1_R1.fastq.gz, -SAMPLE3_SE,sequencer3,https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/illumina/amplicon/sample2_R1.fastq.gz, \ No newline at end of file +SAMPLE3_SE,sequencer3,https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/illumina/amplicon/sample2_R1.fastq.gz, diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/conf/test.config b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/conf/test.config index 50ae17eee..474ef3196 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/conf/test.config +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/conf/test.config @@ -27,5 +27,5 @@ params { // TODO nf-core: Give any required params for the test so that command line flags are not needed input = params.pipelines_testdata_base_path + 'viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv' - + } diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/main.nf b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/main.nf index a84e393a6..de383a8bd 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/main.nf +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/main.nf @@ -77,7 +77,7 @@ workflow { params.plaintext_email, params.outdir, params.monochrome_logs, - + MYORG_MYFIRSTPIPELINE.out.multiqc_report ) } diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/modules/nf-core/multiqc/tests/main.nf.test.snap b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/modules/nf-core/multiqc/tests/main.nf.test.snap index 2fcbb5ff7..261dc0fac 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/modules/nf-core/multiqc/tests/main.nf.test.snap +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/modules/nf-core/multiqc/tests/main.nf.test.snap @@ -38,4 +38,4 @@ }, "timestamp": "2024-10-02T17:52:09.185842" } -} \ No newline at end of file +} diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/modules/nf-core/seqtk/trim/tests/main.nf.test.snap b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/modules/nf-core/seqtk/trim/tests/main.nf.test.snap index da181dcf6..90da25d2b 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/modules/nf-core/seqtk/trim/tests/main.nf.test.snap +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/modules/nf-core/seqtk/trim/tests/main.nf.test.snap @@ -75,4 +75,4 @@ }, "timestamp": "2024-05-03T06:11:38.487227" } -} \ No newline at end of file +} diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/local/utils_nfcore_myfirstpipeline_pipeline/main.nf b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/local/utils_nfcore_myfirstpipeline_pipeline/main.nf index 107036dd4..4e4a4ece4 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/local/utils_nfcore_myfirstpipeline_pipeline/main.nf +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/local/utils_nfcore_myfirstpipeline_pipeline/main.nf @@ -105,7 +105,7 @@ workflow PIPELINE_COMPLETION { plaintext_email // boolean: Send plain-text email instead of HTML outdir // path: Path to output directory where results will be published monochrome_logs // boolean: Disable ANSI colour codes in log output - + multiqc_report // string: Path to MultiQC report main: @@ -164,7 +164,7 @@ def toolCitationText() { // Uncomment function in methodsDescriptionText to render in MultiQC report def citation_text = [ "Tools used in the workflow included:", - + "MultiQC (Ewels et al. 2016)", "." ].join(' ').trim() @@ -177,7 +177,7 @@ def toolBibliographyText() { // Can use ternary operators to dynamically construct based conditions, e.g. params["run_xyz"] ? "
  • Author (2023) Pub name, Journal, DOI
  • " : "", // Uncomment function in methodsDescriptionText to render in MultiQC report def reference_text = [ - + "
  • Ewels, P., Magnusson, M., Lundin, S., & Käller, M. (2016). MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics , 32(19), 3047–3048. doi: /10.1093/bioinformatics/btw354
  • " ].join(' ').trim() @@ -220,4 +220,3 @@ def methodsDescriptionText(mqc_methods_yaml) { return description_html.toString() } - diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap index e3f0baf47..846287c41 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nextflow_pipeline/tests/main.function.nf.test.snap @@ -17,4 +17,4 @@ }, "timestamp": "2024-02-28T12:02:12.425833" } -} \ No newline at end of file +} diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test.snap b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test.snap index 1037232c9..facbdcfdc 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test.snap +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.function.nf.test.snap @@ -163,4 +163,4 @@ }, "timestamp": "2024-02-28T12:03:21.714424" } -} \ No newline at end of file +} diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap index 859d1030f..84ee1e1d1 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfcore_pipeline/tests/main.workflow.nf.test.snap @@ -16,4 +16,4 @@ }, "timestamp": "2024-02-28T12:03:25.726491" } -} \ No newline at end of file +} diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfschema_plugin/main.nf b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfschema_plugin/main.nf index 4994303ea..93de2a524 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfschema_plugin/main.nf +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfschema_plugin/main.nf @@ -43,4 +43,3 @@ workflow UTILS_NFSCHEMA_PLUGIN { emit: dummy_emit = true } - diff --git a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow.config b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow.config index 0907ac58f..478fb8a05 100644 --- a/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow.config +++ b/hello-nextflow/hello-nf-core/solution/myorg-myfirstpipeline/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow.config @@ -5,4 +5,4 @@ plugins { validation { parametersSchema = "${projectDir}/subworkflows/nf-core/utils_nfschema_plugin/tests/nextflow_schema.json" monochromeLogs = true -} \ No newline at end of file +} diff --git a/hello-nextflow/hello-nf-test/main.nf b/hello-nextflow/hello-nf-test/main.nf index 8d380487e..3e55a0d15 100644 --- a/hello-nextflow/hello-nf-test/main.nf +++ b/hello-nextflow/hello-nf-test/main.nf @@ -27,7 +27,7 @@ workflow { ref_dict_file, intervals_file ) - + // Collect variant calling outputs across samples all_gvcfs_ch = GATK_HAPLOTYPECALLER.out.vcf.collect() all_idxs_ch = GATK_HAPLOTYPECALLER.out.idx.collect() diff --git a/hello-nextflow/hello-nf-test/nextflow.config b/hello-nextflow/hello-nf-test/nextflow.config index 2efec7cb4..86142d64b 100644 --- a/hello-nextflow/hello-nf-test/nextflow.config +++ b/hello-nextflow/hello-nf-test/nextflow.config @@ -7,7 +7,7 @@ docker.fixOwnership = true params { // Primary input (file of input files, one per line) reads_bam = null - + // Output directory params.outdir = "results_genomics" @@ -45,8 +45,8 @@ profiles { // Primary input (file of input files, one per line) params.reads_bam = "data/sample_bams.txt" - // Output directory - params.outdir = "results_genomics" + // Output directory + params.outdir = "results_genomics" // Accessory files params.reference = "data/ref/ref.fasta" diff --git a/hello-nextflow/solutions/hello-config/final-main.nf b/hello-nextflow/solutions/hello-config/final-main.nf index 0f46c1dac..316b0c3a8 100644 --- a/hello-nextflow/solutions/hello-config/final-main.nf +++ b/hello-nextflow/solutions/hello-config/final-main.nf @@ -15,7 +15,7 @@ process SAMTOOLS_INDEX { output: tuple path(input_bam), path("${input_bam}.bai") - + script: """ samtools index '$input_bam' diff --git a/hello-nextflow/solutions/hello-genomics/hello-genomics-2.nf b/hello-nextflow/solutions/hello-genomics/hello-genomics-2.nf index 459227759..bb414c783 100644 --- a/hello-nextflow/solutions/hello-genomics/hello-genomics-2.nf +++ b/hello-nextflow/solutions/hello-genomics/hello-genomics-2.nf @@ -53,7 +53,7 @@ process GATK_HAPLOTYPECALLER { output: path "${input_bam}.vcf" , emit: vcf - path "${input_bam}.vcf.idx" , emit: idx + path "${input_bam}.vcf.idx" , emit: idx script: """ diff --git a/hello-nextflow/solutions/hello-genomics/hello-genomics-3.nf b/hello-nextflow/solutions/hello-genomics/hello-genomics-3.nf index 43c64f5e4..3b38f252e 100644 --- a/hello-nextflow/solutions/hello-genomics/hello-genomics-3.nf +++ b/hello-nextflow/solutions/hello-genomics/hello-genomics-3.nf @@ -56,7 +56,7 @@ process GATK_HAPLOTYPECALLER { output: path "${input_bam}.vcf" , emit: vcf - path "${input_bam}.vcf.idx" , emit: idx + path "${input_bam}.vcf.idx" , emit: idx script: """ diff --git a/hello-nextflow/solutions/hello-genomics/hello-genomics-4.nf b/hello-nextflow/solutions/hello-genomics/hello-genomics-4.nf index e613d4688..bb3b821a3 100644 --- a/hello-nextflow/solutions/hello-genomics/hello-genomics-4.nf +++ b/hello-nextflow/solutions/hello-genomics/hello-genomics-4.nf @@ -52,7 +52,7 @@ process GATK_HAPLOTYPECALLER { output: path "${input_bam}.vcf" , emit: vcf - path "${input_bam}.vcf.idx" , emit: idx + path "${input_bam}.vcf.idx" , emit: idx script: """ diff --git a/hello-nextflow/solutions/hello-nf-test/modules/local/gatk/haplotypecaller/main.nf b/hello-nextflow/solutions/hello-nf-test/modules/local/gatk/haplotypecaller/main.nf index c2e1a5633..63c615daa 100644 --- a/hello-nextflow/solutions/hello-nf-test/modules/local/gatk/haplotypecaller/main.nf +++ b/hello-nextflow/solutions/hello-nf-test/modules/local/gatk/haplotypecaller/main.nf @@ -19,7 +19,7 @@ process GATK_HAPLOTYPECALLER { output: path "${input_bam}.g.vcf" , emit: vcf - path "${input_bam}.g.vcf.idx" , emit: idx + path "${input_bam}.g.vcf.idx" , emit: idx script: """ diff --git a/hello-nextflow/solutions/hello-operators/hello-operators-1.nf b/hello-nextflow/solutions/hello-operators/hello-operators-1.nf index d6ec48c6d..a21486b1d 100644 --- a/hello-nextflow/solutions/hello-operators/hello-operators-1.nf +++ b/hello-nextflow/solutions/hello-operators/hello-operators-1.nf @@ -52,7 +52,7 @@ process GATK_HAPLOTYPECALLER { output: path "${input_bam}.g.vcf" , emit: vcf - path "${input_bam}.g.vcf.idx" , emit: idx + path "${input_bam}.g.vcf.idx" , emit: idx script: """ diff --git a/hello-nextflow/solutions/hello-operators/hello-operators-2.nf b/hello-nextflow/solutions/hello-operators/hello-operators-2.nf index a1dc8dfe9..5ed7c738e 100644 --- a/hello-nextflow/solutions/hello-operators/hello-operators-2.nf +++ b/hello-nextflow/solutions/hello-operators/hello-operators-2.nf @@ -55,7 +55,7 @@ process GATK_HAPLOTYPECALLER { output: path "${input_bam}.g.vcf" , emit: vcf - path "${input_bam}.g.vcf.idx" , emit: idx + path "${input_bam}.g.vcf.idx" , emit: idx script: """ diff --git a/hello-nextflow/solutions/hello-operators/hello-operators-3.nf b/hello-nextflow/solutions/hello-operators/hello-operators-3.nf index e3aaf6a80..1d7439107 100644 --- a/hello-nextflow/solutions/hello-operators/hello-operators-3.nf +++ b/hello-nextflow/solutions/hello-operators/hello-operators-3.nf @@ -55,7 +55,7 @@ process GATK_HAPLOTYPECALLER { output: path "${input_bam}.g.vcf" , emit: vcf - path "${input_bam}.g.vcf.idx" , emit: idx + path "${input_bam}.g.vcf.idx" , emit: idx script: """ diff --git a/mkdocs.yml b/mkdocs.yml index ad3ff7520..6e981c7ce 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -4,211 +4,211 @@ repo_url: https://github.com/nextflow-io/training repo_name: nextflow-io/training nav: - - Home: index.md - - Environment setup: - - envsetup/index.md - - envsetup/01_setup.md - - envsetup/02_local.md - - Hello Nextflow: - - hello_nextflow/index.md - - hello_nextflow/01_orientation.md - - hello_nextflow/02_hello_world.md - - hello_nextflow/03_hello_containers.md - - hello_nextflow/04_hello_genomics.md - - hello_nextflow/05_hello_operators.md - - hello_nextflow/06_hello_config.md - - hello_nextflow/07_hello_modules.md - - hello_nextflow/08_hello_nf-test.md - - hello_nextflow/09_hello_nf-core.md - - hello_nextflow/10_hello_seqera.md - - Fundamentals Training: - - basic_training/index.md - - basic_training/orientation.md - - basic_training/intro.md - - basic_training/rnaseq_pipeline.md - - basic_training/containers.md - - basic_training/channels.md - - basic_training/processes.md - - basic_training/operators.md - - basic_training/groovy.md - - basic_training/modules.md - - basic_training/config.md - - basic_training/executors.md - - basic_training/seqera_platform.md - - basic_training/cache_and_resume.md - - basic_training/debugging.md - - Advanced Training: - - advanced/index.md - - advanced/orientation.md - - advanced/operators.md - - advanced/metadata.md - - advanced/grouping.md - - advanced/groovy.md - - advanced/structure.md - - advanced/configuration.md - - advanced/summary.md - - advanced/support.md - - Configure nf-core: - - nf_customize/index.md - - nf_customize/01_orientation.md - - nf_customize/02_nf-core.md - - nf_customize/03_execution.md - - nf_customize/04_config.md - - nf_customize/05_tools.md - - Develop nf-core: - - nf_develop/index.md - - nf_develop/1_01_orientation.md - - nf_develop/1_02_create.md - - nf_develop/1_03_pipeline.md - - nf_develop/1_04_parameters.md - - Troubleshooting: - - troubleshoot/index.md - - troubleshoot/01_orientation.md - - troubleshoot/01_exercise.md - - troubleshoot/02_exercise.md - - troubleshoot/03_exercise.md - - troubleshoot/04_exercise.md - - troubleshoot/05_exercise.md - - troubleshoot/06_exercise.md + - Home: index.md + - Environment setup: + - envsetup/index.md + - envsetup/01_setup.md + - envsetup/02_local.md + - Hello Nextflow: + - hello_nextflow/index.md + - hello_nextflow/01_orientation.md + - hello_nextflow/02_hello_world.md + - hello_nextflow/03_hello_containers.md + - hello_nextflow/04_hello_genomics.md + - hello_nextflow/05_hello_operators.md + - hello_nextflow/06_hello_config.md + - hello_nextflow/07_hello_modules.md + - hello_nextflow/08_hello_nf-test.md + - hello_nextflow/09_hello_nf-core.md + - hello_nextflow/10_hello_seqera.md + - Fundamentals Training: + - basic_training/index.md + - basic_training/orientation.md + - basic_training/intro.md + - basic_training/rnaseq_pipeline.md + - basic_training/containers.md + - basic_training/channels.md + - basic_training/processes.md + - basic_training/operators.md + - basic_training/groovy.md + - basic_training/modules.md + - basic_training/config.md + - basic_training/executors.md + - basic_training/seqera_platform.md + - basic_training/cache_and_resume.md + - basic_training/debugging.md + - Advanced Training: + - advanced/index.md + - advanced/orientation.md + - advanced/operators.md + - advanced/metadata.md + - advanced/grouping.md + - advanced/groovy.md + - advanced/structure.md + - advanced/configuration.md + - advanced/summary.md + - advanced/support.md + - Configure nf-core: + - nf_customize/index.md + - nf_customize/01_orientation.md + - nf_customize/02_nf-core.md + - nf_customize/03_execution.md + - nf_customize/04_config.md + - nf_customize/05_tools.md + - Develop nf-core: + - nf_develop/index.md + - nf_develop/1_01_orientation.md + - nf_develop/1_02_create.md + - nf_develop/1_03_pipeline.md + - nf_develop/1_04_parameters.md + - Troubleshooting: + - troubleshoot/index.md + - troubleshoot/01_orientation.md + - troubleshoot/01_exercise.md + - troubleshoot/02_exercise.md + - troubleshoot/03_exercise.md + - troubleshoot/04_exercise.md + - troubleshoot/05_exercise.md + - troubleshoot/06_exercise.md theme: - name: material - custom_dir: docs/assets/overrides - language: en - logo: assets/img/nextflow-logo-white.png - favicon: assets/img/nextflow-icon.png - palette: - # Palette toggle for dark mode - - media: "(prefers-color-scheme: dark)" - scheme: slate - toggle: - icon: material/weather-night - name: Switch to light mode - # Palette toggle for light mode - - media: "(prefers-color-scheme: light)" - scheme: default - toggle: - icon: material/weather-sunny - name: Switch to dark mode - font: - text: Inter - code: Roboto Mono - features: - - content.action.edit - - content.code.annotate - - content.code.copy - - navigation.footer - - navigation.tabs - - navigation.top - - navigation.tracking - - search.share - - toc.follow - icon: - repo: fontawesome/brands/github + name: material + custom_dir: docs/assets/overrides + language: en + logo: assets/img/nextflow-logo-white.png + favicon: assets/img/nextflow-icon.png + palette: + # Palette toggle for dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + toggle: + icon: material/weather-night + name: Switch to light mode + # Palette toggle for light mode + - media: "(prefers-color-scheme: light)" + scheme: default + toggle: + icon: material/weather-sunny + name: Switch to dark mode + font: + text: Inter + code: Roboto Mono + features: + - content.action.edit + - content.code.annotate + - content.code.copy + - navigation.footer + - navigation.tabs + - navigation.top + - navigation.tracking + - search.share + - toc.follow + icon: + repo: fontawesome/brands/github extra_css: - - assets/stylesheets/fonts.css - - assets/stylesheets/extra.css + - assets/stylesheets/fonts.css + - assets/stylesheets/extra.css # Set in docs/assets/overrides/partials/copyright.html # so that we can have links and stuff # copyright: Seqera extra: - # Announcement banner for upcoming training - announcement: - active: false - date_text: March 5-6, 2024 - register_url: https://nf-co.re/events/2024/training-foundational-march - # Analytics - analytics: - provider: google - property: G-244N3GEN75 - consent: - title: Cookie consent - description: >- - We use cookies to recognize your repeated visits and preferences, as well - as to measure the effectiveness of our documentation and whether users - find what they're searching for. With your consent, you're helping us to - make our documentation better. - # Overridden in docs/assets/overrides/partials/social.html - # Still needs to be set to something here so that the partial is used - social: - - icon: fontawesome/brands/twitter - link: https://twitter.com/nextflowio - name: Nextflow on twitter - version: - provider: mike - alias: true + # Announcement banner for upcoming training + announcement: + active: false + date_text: March 5-6, 2024 + register_url: https://nf-co.re/events/2024/training-foundational-march + # Analytics + analytics: + provider: google + property: G-244N3GEN75 + consent: + title: Cookie consent + description: >- + We use cookies to recognize your repeated visits and preferences, as well + as to measure the effectiveness of our documentation and whether users + find what they're searching for. With your consent, you're helping us to + make our documentation better. + # Overridden in docs/assets/overrides/partials/social.html + # Still needs to be set to something here so that the partial is used + social: + - icon: fontawesome/brands/twitter + link: https://twitter.com/nextflowio + name: Nextflow on twitter + version: + provider: mike + alias: true markdown_extensions: - - admonition - - attr_list - - md_in_html - - pymdownx.details - - pymdownx.emoji: - emoji_index: !!python/name:material.extensions.emoji.twemoji - emoji_generator: !!python/name:material.extensions.emoji.to_svg - - pymdownx.highlight: - anchor_linenums: true - - pymdownx.inlinehilite - - pymdownx.keys - - pymdownx.snippets: - base_path: ["."] - - pymdownx.snippets - - pymdownx.superfences: - preserve_tabs: true - - pymdownx.tabbed: - alternate_style: true - - pymdownx.tasklist: - custom_checkbox: true - clickable_checkbox: true - - tables - - toc: - title: On this page - permalink: true + - admonition + - attr_list + - md_in_html + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.keys + - pymdownx.snippets: + base_path: ["."] + - pymdownx.snippets + - pymdownx.superfences: + preserve_tabs: true + - pymdownx.tabbed: + alternate_style: true + - pymdownx.tasklist: + custom_checkbox: true + clickable_checkbox: true + - tables + - toc: + title: On this page + permalink: true plugins: - - enumerate-headings: - restart_increment_after: - - envsetup/01_setup.md - - hello_nextflow/01_orientation.md - - basic_training/orientation.md - - advanced/orientation.md - - nf_customize/01_orientation.md - - nf_develop/1_01_orientation.md - - troubleshoot/01_orientation.md + - enumerate-headings: + restart_increment_after: + - envsetup/01_setup.md + - hello_nextflow/01_orientation.md + - basic_training/orientation.md + - advanced/orientation.md + - nf_customize/01_orientation.md + - nf_develop/1_01_orientation.md + - troubleshoot/01_orientation.md - exclude: - - index.md - - help.md - - envsetup/index.md - - basic_training/index.md - - advanced/index.md - - hello_nextflow/index.md - - nf_customize/index.md - - nf_develop/index.md - - troubleshoot/index.md - - hands_on/index.md - - hands_on/solutions/*md - - hello_nextflow/*.md - - i18n: - docs_structure: suffix - fallback_to_default: true - languages: - - build: true - default: true - locale: en - name: English - - build: true - default: false - locale: pt - name: Português - - build: true - default: false - locale: es - name: Español - - build: true - default: false - locale: fr - name: Français - reconfigure_material: true - reconfigure_search: true - - search + exclude: + - index.md + - help.md + - envsetup/index.md + - basic_training/index.md + - advanced/index.md + - hello_nextflow/index.md + - nf_customize/index.md + - nf_develop/index.md + - troubleshoot/index.md + - hands_on/index.md + - hands_on/solutions/*md + - hello_nextflow/*.md + - i18n: + docs_structure: suffix + fallback_to_default: true + languages: + - build: true + default: true + locale: en + name: English + - build: true + default: false + locale: pt + name: Português + - build: true + default: false + locale: es + name: Español + - build: true + default: false + locale: fr + name: Français + reconfigure_material: true + reconfigure_search: true + - search diff --git a/nf-training/data/index/params.yml b/nf-training/data/index/params.yml index 8ef073fa9..458b31fa0 100644 --- a/nf-training/data/index/params.yml +++ b/nf-training/data/index/params.yml @@ -1,11 +1,11 @@ --- samples: - - - gut - - /data/ggal/gut_1.fq - - /data/ggal/gut_2.fq - - - lung - - /data/ggal/lung_1.fq - - /data/ggal/lung_2.fq - - - liver - - /data/ggal/liver_1.fq - - /data/ggal/liver_2.fq + - - gut + - /data/ggal/gut_1.fq + - /data/ggal/gut_2.fq + - - lung + - /data/ggal/lung_1.fq + - /data/ggal/lung_2.fq + - - liver + - /data/ggal/liver_1.fq + - /data/ggal/liver_2.fq diff --git a/nf-training/env.yml b/nf-training/env.yml index 7e8b003d5..dc1f0e977 100644 --- a/nf-training/env.yml +++ b/nf-training/env.yml @@ -1,10 +1,10 @@ name: nf-tutorial channels: - - conda-forge - - defaults - - bioconda + - conda-forge + - defaults + - bioconda dependencies: - - bioconda::salmon=1.5.1 - - bioconda::fastqc=0.11.9 - - bioconda::multiqc=1.12 - - conda-forge::tbb=2020.2 + - bioconda::salmon=1.5.1 + - bioconda::fastqc=0.11.9 + - bioconda::multiqc=1.12 + - conda-forge::tbb=2020.2