1
0
mirror of synced 2025-12-30 03:01:36 -05:00

Merge branch 'main' into liquidjs

This commit is contained in:
Courtney Wilson
2022-09-01 22:06:30 -05:00
committed by GitHub
1223 changed files with 52520 additions and 10871 deletions

View File

@@ -53,6 +53,14 @@ jobs:
# Don't care about CDN caching image URLs
DISABLE_REWRITE_ASSET_URLS: true
run: |
# Note as of Aug 2022, we *don't* check external links
# on the pages you touched in the PR. We could enable that
# but it has the added risk of false positives blocking CI.
# We are using this script for the daily/nightly checker that
# checks external links too. Once we're confident it really works
# well, we can consider enabling it here on every content PR too.
./script/rendered-content-link-checker.js \
--language en \
--max 100 \

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 232 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 178 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 188 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 132 KiB

View File

@@ -81,7 +81,8 @@ export function getCssTheme(cookieValue = ''): CssColorTheme {
darkTheme: filterTheme(dark_theme) || defaultCSSTheme.darkTheme,
}
} catch (err) {
console.warn("Unable to parse 'color_mode' cookie", err)
if (process.env.NODE_ENV === 'development')
console.warn("Unable to parse 'color_mode' cookie", err)
return defaultCSSTheme
}
}

View File

@@ -45,7 +45,7 @@ export function getShellExample(operation: Operation, codeSample: CodeSample) {
const args = [
operation.verb !== 'get' && `-X ${operation.verb.toUpperCase()}`,
`-H "Accept: ${defaultAcceptHeader}" \\ \n -H "Authorization: token <TOKEN>"`,
`-H "Accept: ${defaultAcceptHeader}" \\ \n -H "Authorization: Bearer <YOUR-TOKEN>"`,
`${operation.serverUrl}${requestPath}`,
requestBodyParams,
].filter(Boolean)
@@ -86,12 +86,12 @@ export function getGHExample(operation: Operation, codeSample: CodeSample) {
requestBodyParams = Object.keys(codeSample.request.bodyParameters)
.map((key) => {
if (typeof codeSample.request.bodyParameters[key] === 'string') {
return `-f ${key}='${codeSample.request.bodyParameters[key]}'\n`
return `-f ${key}='${codeSample.request.bodyParameters[key]}' `
} else {
return `-F ${key}=${codeSample.request.bodyParameters[key]}\n`
return `-F ${key}=${codeSample.request.bodyParameters[key]} `
}
})
.join(' ')
.join('\\\n ')
}
const args = [
operation.verb !== 'get' && `--method ${operation.verb.toUpperCase()}`,
@@ -141,11 +141,7 @@ export function getJSExample(operation: Operation, codeSample: CodeSample) {
}
}
const comment = `// Octokit.js\n// https://github.com/octokit/core.js#readme\n`
const require = `const octokit = new Octokit(${stringify(
{ auth: 'personal-access-token123' },
null,
2
)})\n\n`
const require = `const octokit = new Octokit(${stringify({ auth: 'YOUR-TOKEN' }, null, 2)})\n\n`
return `${comment}${require}await octokit.request('${operation.verb.toUpperCase()} ${
operation.requestPath

View File

@@ -53,18 +53,26 @@ const article: PlaygroundArticleT = {
To set up your repository to use a custom dev container, you will need to create one or more \`devcontainer.json\` files. You can add these either from a template, in Visual Studio Code, or you can write your own. For more information on dev container configurations, see "[Introduction to dev containers](/codespaces/setting-up-your-codespace/configuring-codespaces-for-your-project)".
1. Access the Command Palette (<kbd>Shift</kbd> + <kbd>Command</kbd> + <kbd>P</kbd> / <kbd>Ctrl</kbd> + <kbd>Shift</kbd> + <kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
![Codespaces: Add Development Container Configuration Files... in the command palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
![Codespaces: Add Development Container Configuration Files... in the Command Palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
2. For this example, click **C# (.NET)**. If you need additional features you can select any container thats specific to C# (.NET) or a combination of tools such as C# (.NET) and MS SQL.
![Select C# (.NET) option from the list](/assets/images/help/codespaces/add-dotnet-prebuilt-container.png)
![Select C# (.NET) option from the list](/assets/images/help/codespaces/add-dotnet-prebuilt-container.png)
3. Click the recommended version of .NET.
![.NET version selection](/assets/images/help/codespaces/add-dotnet-version.png)
![.NET version selection](/assets/images/help/codespaces/add-dotnet-version.png)
4. Accept the default option to add Node.js to your customization.
![Add Node.js selection](/assets/images/help/codespaces/dotnet-options.png)
![Add Node.js selection](/assets/images/help/codespaces/dotnet-options.png)
5. Select any additional features to install and click **OK**.
6. Access the command palette (<kbd>Shift</kbd> + <kbd>Command</kbd> + <kbd>P</kbd> / <kbd>Ctrl</kbd> + <kbd>Shift</kbd> + <kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
6. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
`,
},
{
@@ -197,13 +205,16 @@ const article: PlaygroundArticleT = {
"postCreateCommand": "dotnet restore",
\`\`\`
4. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
4. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
5. Check your changes were successfully applied by verifying the "Code Spell Checker" extension was installed.
![Extensions list](/assets/images/help/codespaces/dotnet-extensions.png)
![Extensions list](/assets/images/help/codespaces/dotnet-extensions.png)
`,
},
{
@@ -217,9 +228,10 @@ const article: PlaygroundArticleT = {
1. Run your application by pressing \`F5\` or entering \`dotnet watch run\` in your terminal.
2. When your project starts, you should see a message in the bottom right corner with a prompt to connect to the port your project uses.
2. When your project starts, you should see a message in the bottom right corner with a prompt to connect to the port your project uses.
![Port forwarding toast](/assets/images/help/codespaces/python-port-forwarding.png)
![Port forwarding toast](/assets/images/help/codespaces/python-port-forwarding.png)
`,
},
{

View File

@@ -51,15 +51,23 @@ const article: PlaygroundArticleT = {
To set up your repository to use a custom dev container, you will need to create one or more \`devcontainer.json\` files. You can add these either from a template, in Visual Studio Code, or you can write your own. For more information on dev container configurations, see "[Introduction to dev containers](/codespaces/setting-up-your-codespace/configuring-codespaces-for-your-project)".
1. Access the Command Palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the command palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the Command Palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
2. For this example, click **Java**. In practice, you could select any container thats specific to Java or a combination of tools such as Java and Azure Functions.
![Select Java option from the list](/assets/images/help/codespaces/add-java-prebuilt-container.png)
![Select Java option from the list](/assets/images/help/codespaces/add-java-prebuilt-container.png)
3. Click the recommended version of Java.
![Java version selection](/assets/images/help/codespaces/add-java-version.png)
![Java version selection](/assets/images/help/codespaces/add-java-version.png)
4. Select any additional features to install and click **OK**.
5. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
5. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
`,
},
{
@@ -185,9 +193,11 @@ const article: PlaygroundArticleT = {
For more information about \`devcontainer.json\` properties, see the Visual Studio Code documentation: "[devcontainer.json reference](https://code.visualstudio.com/docs/remote/devcontainerjson-reference)."
4. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
4. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
`,
},
{

View File

@@ -29,7 +29,7 @@ const article: PlaygroundArticleT = {
content: dedent`
1. Under the repository name, use the **Code** drop-down menu, and in the **Codespaces** tab, click **Create codespace on BRANCH**.
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
If you dont see this option, GitHub Codespaces isn't available for your project. See [Access to GitHub Codespaces](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-codespaces) for more information.
@@ -51,15 +51,23 @@ const article: PlaygroundArticleT = {
To set up your repository to use a custom dev container, you will need to create one or more \`devcontainer.json\` files. You can add these either from a template, in Visual Studio Code, or you can write your own. For more information on dev container configurations, see "[Introduction to dev containers](/codespaces/setting-up-your-codespace/configuring-codespaces-for-your-project)".
1. Access the Command Palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the command palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the Command Palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
2. For this example, click **Node.js**. If you need additional features you can select any container thats specific to Node or a combination of tools such as Node and MongoDB.
![Select Node option from the list](/assets/images/help/codespaces/add-node-prebuilt-container.png)
![Select Node option from the list](/assets/images/help/codespaces/add-node-prebuilt-container.png)
3. Click the recommended version of Node.js.
![Node.js version selection](/assets/images/help/codespaces/add-node-version.png)
![Node.js version selection](/assets/images/help/codespaces/add-node-version.png)
4. Select any additional features to install and click **OK**.
5. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
5. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
`,
},
{
@@ -173,7 +181,9 @@ const article: PlaygroundArticleT = {
With your dev container configuration added and a basic understanding of what everything does, you can now make changes to customize your environment further. In this example, you'll add properties to install npm when your codespace launches and make a list of ports inside the container available locally.
1. In the Explorer, select the \`devcontainer.json\` file from the tree to open it. You might have to expand the \`.devcontainer\` folder to see it.
![devcontainer.json file in the Explorer](/assets/images/help/codespaces/devcontainers-options.png)
![devcontainer.json file in the Explorer](/assets/images/help/codespaces/devcontainers-options.png)
2. Add the following lines to your \`devcontainer.json\` file after \`extensions\`:
\`\`\`js{:copy}
@@ -182,10 +192,11 @@ const article: PlaygroundArticleT = {
\`\`\`
For more information about \`devcontainer.json\` properties, see the Visual Studio Code documentation: "[devcontainer.json reference](https://code.visualstudio.com/docs/remote/devcontainerjson-reference)."
1. Access the command palette (\`Shift + Command + P\`/ \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
3. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
Rebuilding inside your codespace ensures your changes work as expected before you commit the changes to the repository. If something does result in a failure, youll be placed in a codespace with a recovery container that you can rebuild from to keep adjusting your container.
`,
},
{

View File

@@ -29,9 +29,9 @@ const article: PlaygroundArticleT = {
content: dedent`
1. Under the repository name, use the **Code** drop-down menu, and in the **Codespaces** tab, click **Create codespace on BRANCH**.
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
If you dont see this option, GitHub Codespaces isn't available for your project. See [Access to GitHub Codespaces](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-codespaces) for more information.
If you dont see this option, GitHub Codespaces isn't available for your project. See [Access to GitHub Codespaces](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-codespaces) for more information.
When you create a codespace, your project is created on a remote VM that is dedicated to you. By default, the container for your codespace has many languages and runtimes including Python, pip, and Miniconda. It also includes a common set of tools like git, wget, rsync, openssh, and nano.
@@ -53,17 +53,27 @@ const article: PlaygroundArticleT = {
To set up your repository to use a custom dev container, you will need to create one or more \`devcontainer.json\` files. You can add these either from a template, in Visual Studio Code, or you can write your own. For more information on dev container configurations, see "[Introduction to dev containers](/codespaces/setting-up-your-codespace/configuring-codespaces-for-your-project)".
1. Access the command palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the command palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "dev container". Select **Codespaces: Add Development Container Configuration Files...**.
!["Codespaces: Add Development Container Configuration Files..." in the Command Palette](/assets/images/help/codespaces/add-prebuilt-container-command.png)
2. For this example, click **Python 3**. If you need additional features you can select any container thats specific to Python or a combination of tools such as Python 3 and PostgreSQL.
![Select Python option from the list](/assets/images/help/codespaces/add-python-prebuilt-container.png)
![Select Python option from the list](/assets/images/help/codespaces/add-python-prebuilt-container.png)
3. Click the recommended version of Python.
![Python version selection](/assets/images/help/codespaces/add-python-version.png)
![Python version selection](/assets/images/help/codespaces/add-python-version.png)
4. Accept the default option to add Node.js to your customization.
![Add Node.js selection](/assets/images/help/codespaces/add-nodejs-selection.png)
![Add Node.js selection](/assets/images/help/codespaces/add-nodejs-selection.png)
5. Select any additional features to install and click **OK**.
6. Access the command palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
6. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)
`,
},
{
@@ -197,7 +207,7 @@ const article: PlaygroundArticleT = {
"postCreateCommand": "pip3 install --user -r requirements.txt",
\`\`\`
4. Access the command palette (\`Shift + Command + P\` / \`Ctrl + Shift + P\`), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
4. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd>), then start typing "rebuild". Select **Codespaces: Rebuild Container**.
![Rebuild container option](/assets/images/help/codespaces/codespaces-rebuild.png)

View File

@@ -13,6 +13,6 @@
table-layout: fixed !important;
}
.codeBlock code {
.codeBlock code:not(td *) {
word-break: break-all;
}

View File

@@ -1,4 +1,3 @@
import React from 'react'
import { Heading, NavList } from '@primer/react'
import cx from 'classnames'

View File

@@ -227,7 +227,7 @@ defaultPlatform: linux
### `defaultTool`
- Purpose: Override the initial tool selection for a page, where tool refers to the application the reader is using to work with GitHub (such as GitHub.com's web UI, the GitHub CLI, or GitHub Desktop) or the GitHub APIs (such as cURL or the GitHub CLI). For more information about the tool selector, see [Markup reference for GitHub Docs](../contributing/content-markup-reference.md#tool-tags). If this frontmatter is omitted, then the tool-specific content matching the GitHub web UI is shown by default. If a user has indicated a tool preference (by clicking on a tool tab), then the user's preference will be applied instead of the default value.
- Purpose: Override the initial tool selection for a page, where the tool refers to the application the reader is using to work with GitHub (such as GitHub.com's web UI, the GitHub CLI, or GitHub Desktop) or the GitHub APIs (such as cURL or the GitHub CLI). For more information about the tool selector, see [Markup reference for GitHub Docs](../contributing/content-markup-reference.md#tool-tags). If this frontmatter is omitted, then the tool-specific content matching the GitHub web UI is shown by default. If a user has indicated a tool preference (by clicking on a tool tab), then the user's preference will be applied instead of the default value.
- Type: `String`, one of: `webui`, `cli`, `desktop`, `curl`, `codespaces`, `vscode`, `importer_cli`, `graphql`, `powershell`, `bash`, `javascript`.
- Optional.
@@ -263,9 +263,9 @@ includeGuides:
- Optional.
### `topics`
- Purpose: Indicate the topics covered by the article. The topics are used to filter guides on some landing pages. For example, the guides at the bottom of [this page](https://docs.github.com/en/actions/guides) can be filtered by topics and the topics are listed under the guide intro. Topics are also added to all search records that get created for each page. The search records contain a `topics` property that is used to filter search results by topics. For more information, see the [Search](/contributing/search.md) contributing guide. Refer to the content models for more details around adding topics. A full list of existing topics is located in the [allowed topics file](/data/allowed-topics.js). If topics in article frontmatter and the allow-topics list become out of sync, the [topics CI test](/tests/unit/search/topics.js) will fail.
- Purpose: Indicate the topics covered by the article. The topics are used to filter guides on some landing pages. For example, the guides at the bottom of [this page](https://docs.github.com/en/actions/guides) can be filtered by topics, and the topics are listed under the guide intro. Topics are also added to all search records that get created for each page. The search records contain a `topics` property that is used to filter search results by topics. For more information, see the [Search](/contributing/search.md) contributing guide. Refer to the content models for more details about adding topics. A full list of existing topics is located in the [allowed topics file](/data/allowed-topics.js). If topics in article frontmatter and the allow-topics list become out of sync, the [topics CI test](/tests/unit/search/topics.js) will fail.
- Type: Array of `String`s
- Optional: Topics are preferred for each article, but, there may be cases where existing articles don't yet have topics or adding a topic to a new article may not add value.
- Optional: Topics are preferred for each article, but, there may be cases where existing articles don't yet have topics, or adding a topic to a new article may not add value.
### `contributor`
- Purpose: Indicate an article is contributed and maintained by a third-party organization, typically a GitHub Technology Partner.
@@ -294,7 +294,7 @@ contributor:
If you see two single quotes in a row (`''`) in YML frontmatter where you might expect to see one (`'`), this is the YML-preferred way to escape a single quote. From [the YAML spec](https://yaml.org/spec/history/2001-12-10.html):
> In single quoted leaves, a single quote character needs to be escaped. This is done by repeating the character.
> In single-quoted leaves, a single quote character needs to be escaped. This is done by repeating the character.
As an alternative, you can change the single quotes surrounding the frontmatter field to double quotes and leave interior single quotes unescaped.
@@ -314,7 +314,7 @@ Make sure not to add hardcoded "In this article" sections in the Markdown source
A content file can have **two** types of versioning:
* [`versions`](#versions) frontmatter (**required**)
* Determines in which the versions the page is available. See [contributing/permalinks](../contributing/permalinks.md) for more info.
* Determines in which versions the page is available. See [contributing/permalinks](../contributing/permalinks.md) for more info.
* Liquid statements in content (**optional**)
* Conditionally render content depending on the current version being viewed. See [contributing/liquid-helpers](../contributing/liquid-helpers.md) for more info. Note Liquid conditionals can also appear in `data` and `include` files.
@@ -358,7 +358,7 @@ and when viewed on GitHub Enterprise Server docs, the version is included as wel
### Preventing transformations
Sometimes you want to link to a Dotcom-only article in Enterprise content and you don't want the link to be Enterprise-ified. To prevent the transformation, include the preferred version in the path.
Sometimes you want to link to a Dotcom-only article in Enterprise content and you don't want the link to be Enterprise-ified. To prevent the transformation, you should include the preferred version in the path.
```markdown
"[GitHub's Terms of Service](/free-pro-team@latest/github/site-policy/github-terms-of-service)"
@@ -388,8 +388,8 @@ The homepage is the main Table of Contents file for the docs site. The homepage
To create a product guides page (e.g. [Actions' Guide page](https://docs.github.com/en/actions/guides)), create or modify an existing markdown file with these specific frontmatter values:
1. Use the product guides page template by referencing it `layout: product-guides`
2. (optional) Include the learning tracks in [`learningTracks`](#learningTracks)
1. Use the product guides page template by referencing `layout: product-guides`.
2. (optional) Include the learning tracks in [`learningTracks`](#learningTracks).
3. (optional) Define which articles to include with [`includeGuides`](#includeGuides).
If using learning tracks, they need to be defined in [`data/learning-tracks/*.yml`](../data/learning-tracks/README.md).

View File

@@ -25,6 +25,7 @@ children:
- /managing-the-default-branch-name-for-your-repositories
- /managing-security-and-analysis-settings-for-your-personal-account
- /managing-access-to-your-personal-accounts-project-boards
- /managing-your-cookie-preferences-for-githubs-enterprise-marketing-pages
- /integrating-jira-with-your-personal-projects
- /what-does-the-available-for-hire-checkbox-do
shortTitle: Personal account settings

View File

@@ -0,0 +1,33 @@
---
title: Managing your cookie preferences for GitHub's enterprise marketing pages
intro: "You can control how {% data variables.product.company_short %} uses information from non-essential tracking cookies for enterprise marketing pages."
versions:
fpt: '*'
ghes: '*'
ghae: '*'
ghec: '*'
topics:
- Accounts
shortTitle: Manage cookie preferences
---
## About cookie preferences on enterprise marketing pages
{% data variables.product.company_short %} may use non-essential cookies on some enterprise marketing pages. You can customize how these cookies behave. For more information about how {% data variables.product.company_short %} uses cookies, see "[{% data variables.product.company_short %} Privacy Statement](/free-pro-team@latest/site-policy/privacy-policies/github-privacy-statement)."
## Changing your cookie preferences
You can customize how non-essential cookies behave on any {% data variables.product.company_short %} enterprise marketing page.
1. Navigate to the {% data variables.product.company_short %} enterprise marketing page where you'd like to change your cookie preferences. For example, navigate to [{% data variables.product.company_short %} Resources](https://resources.github.com/).
1. Scroll to the bottom of the page, then click **Manage Cookies**.
![Screenshot of button to manage cookie settings.](/assets/images/help/settings/cookie-settings-manage.png)
1. Under "Manage cookie preferences," to accept or reject each non-essential cookie, click **Accept** or **Reject**.
![Screenshot of radio buttons to choose "Accept" or "Reject" for non-essential cookies.](/assets/images/help/settings/cookie-settings-accept-or-reject.png)
1. Click **Save changes**.
![Screenshot of button to save changes.](/assets/images/help/settings/cookie-settings-save.png)

View File

@@ -145,11 +145,11 @@ With OIDC, a {% data variables.product.prodname_actions %} workflow requires a t
Audience and Subject claims are typically used in combination while setting conditions on the cloud role/resources to scope its access to the GitHub workflows.
- **Audience**: By default, this value uses the URL of the organization or repository owner. This can be used to set a condition that only the workflows in the specific organization can access the cloud role.
- **Subject**: Has a predefined format and is a concatenation of some of the key metadata about the workflow, such as the {% data variables.product.prodname_dotcom %} organization, repository, branch, or associated [`job`](/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idenvironment) environment. See "[Example subject claims](#example-subject-claims)" to see how the subject claim is assembled from concatenated metadata.
- **Subject**: By default, has a predefined format and is a concatenation of some of the key metadata about the workflow, such as the {% data variables.product.prodname_dotcom %} organization, repository, branch, or associated [`job`](/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idenvironment) environment. See "[Example subject claims](#example-subject-claims)" to see how the subject claim is assembled from concatenated metadata.
There are also many additional claims supported in the OIDC token that can also be used for setting these conditions.
If you need more granular trust conditions, you can customize the issuer (`iss`) and subject (`sub`) claims that are included with the JWT. For more information, see "[Customizing the token claims](#customizing-the-token-claims)".
In addition, your cloud provider could allow you to assign a role to the access tokens, letting you specify even more granular permissions.
There are also many additional claims supported in the OIDC token that can be used for setting these conditions. In addition, your cloud provider could allow you to assign a role to the access tokens, letting you specify even more granular permissions.
{% note %}
@@ -243,9 +243,13 @@ curl -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" "$ACTIONS_ID_TOK
{% ifversion actions-oidc-hardening-config %}
## Customizing the token claims
You can security harden your OIDC configuration by customizing the claims that are included with the JWT. This allows your cloud provider to apply more granular trust conditions when determining whether to grant access to its resources. For example, {% ifversion ghec %}you can customize the issuer (`iss`) claim to only allow access from a specific enterprise URL, and {% endif %}you can customize the subject (`sub`) value to require that requests originate from a specific repository, reusable workflow, or other source.
You can security harden your OIDC configuration by customizing the claims that are included with the JWT. These customisations allow you to define more granular trust conditions on your cloud roles when allowing your workflows to access resources hosted in the cloud:
To configure the claim conditions on {% data variables.product.prodname_dotcom %}, you can use the REST API endpoints described in the following sections.
{% ifversion ghec %} - For an additional layer of security, you can append the `issuer` url with your enterprise slug. This lets you set conditions on the issuer (`iss`) claim, configuring it to only accept JWT tokens from a unique `issuer` URL that must include your enterprise slug.{% endif %}
- You can standardize your OIDC configuration by setting conditions on the subject (`sub`) claim that require JWT tokens to originate from a specific repository, reusable workflow, or other source.
- You can define granular OIDC policies by using additional OIDC token claims, such as `repository_id` and `repo_visibility`. For more information, see "[Understanding the OIDC token](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#understanding-the-oidc-token)".
To customize these claim formats, organization and repository admins can use the REST API endpoints described in the following sections.
{% ifversion ghec %}
@@ -280,19 +284,21 @@ After this setting is applied, the JWT will contain the updated `iss` value. In
To configure organization-wide security, compliance, and standardization, you can customize the standard claims to suit your required access conditions. If your cloud provider supports conditions on subject claims, you can create a condition that checks whether the `sub` value matches the path of the reusable workflow, such as `"job_workflow_ref: "octo-org/octo-automation/.github/workflows/oidc.yml@refs/heads/main""`. The exact format will vary depending on your cloud provider's OIDC configuration. To configure the matching condition on {% data variables.product.prodname_dotcom %}, you can can use the REST API to require that the `sub` claim must always include a specific custom claim, such as `job_workflow_ref`. For more information, see "[Set the customization template for an OIDC subject claim for an organization](/rest/actions/oidc#set-the-customization-template-for-an-oidc-subject-claim-for-an-organization)."
Customizing the claims results in a new format for the entire `sub` claim, which replaces the default predefined `sub` format in the token described in "[Example subject claims](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#example-subject-claims)."
The following example templates demonstrate various ways to customize the subject claim. To configure these settings on {% data variables.product.prodname_dotcom %}, organization admins use the REST API to specify a list of claims that must be included in the subject (`sub`) claim. {% data reusables.actions.use-request-body-api %}
To customize your subject claims, you should first create a matching condition in your cloud provider's OIDC configuration, before adding the configuration using the REST API. Once the configuration is completed, each time a new job runs, the OIDC token generated during that job will follow the new customization template. If the matching condition doesn't exist in the cloud provider's OIDC configuration before the job runs, the generated token might not be accepted by the cloud provider, since the cloud conditions may not be synchronized.
To customize your subject claims, you should first create a matching condition in your cloud provider's OIDC configuration, before customizing the configuration using the REST API. Once the configuration is completed, each time a new job runs, the OIDC token generated during that job will follow the new customization template. If the matching condition doesn't exist in the cloud provider's OIDC configuration before the job runs, the generated token might not be accepted by the cloud provider, since the cloud conditions may not be synchronized.
{% note %}
**Note**: When the organization template is applied, it will not affect any existing repositories that already use OIDC. For new repositories that are created after the template has been applied, the repository owner will need to opt-in to receive this configuration. For more information, see "[Set the opt-in flag of an OIDC subject claim customization for a repository](/rest/actions/oidc#set-the-opt-in-flag-of-an-oidc-subject-claim-customization-for-a-repository)."
**Note**: When the organization template is applied, it will not affect any existing repositories that already use OIDC. For existing repositories, as well as any new repositories that are created after the template has been applied, the repository owner will need to opt-in to receive this configuration. For more information, see "[Set the opt-in flag of an OIDC subject claim customization for a repository](/rest/actions/oidc#set-the-opt-in-flag-of-an-oidc-subject-claim-customization-for-a-repository)."
{% endnote %}
#### Example: Allowing repository based on visibility and owner
This example template enables cloud access based on repository visibility and owner, letting you restrict cloud role access to only private repositories within an organization or enterprise. {% data reusables.actions.use-request-body-api %}
This example template allows the `sub` claim to have a new format, using `repository_owner` and `repository_visibility`:
```json
{
@@ -303,11 +309,11 @@ This example template enables cloud access based on repository visibility and ow
}
```
In your cloud provider's OIDC configuration, configure the `sub` condition to require that claims must include specific values for `repository_owner` and `repository_visibility`. For example: `"repository_owner: "monalisa":repository_visibility:private"`.
In your cloud provider's OIDC configuration, configure the `sub` condition to require that claims must include specific values for `repository_owner` and `repository_visibility`. For example: `"repository_owner: "monalisa":repository_visibility:private"`. The approach lets you restrict cloud role access to only private repositories within an organization or enterprise.
#### Example: Allowing access to all repositories with a specific owner
This example template grants access to all repositories with a specified `repository_owner`. {% data reusables.actions.use-request-body-api %}
This example template enables the `sub` claim to have a new format with only the value of `repository_owner`. {% data reusables.actions.use-request-body-api %}
```json
{
@@ -322,7 +328,9 @@ In your cloud provider's OIDC configuration, configure the `sub` condition to re
#### Example: Requiring a reusable workflow
This example template requires a specific reusable workflow in a claim, letting an enterprise enforce consistent deployments across its enterprise, organizations, and repositories. {% data reusables.actions.use-request-body-api %}
This example template allows the `sub` claim to have a new format that contains the value of the `job_workflow_ref` claim. This enables an enterprise to use [reusable workflows](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#example-subject-claims) to enforce consistent deployments across its organizations and repositories.
{% data reusables.actions.use-request-body-api %}
```json
{
@@ -336,7 +344,9 @@ In your cloud provider's OIDC configuration, configure the `sub` condition to re
#### Example: Requiring a reusable workflow and other claims
This example template combines the requirement of a specific reusable workflow with additional claims. {% data reusables.actions.use-request-body-api %}
The following example template combines the requirement of a specific reusable workflow with additional claims. {% data reusables.actions.use-request-body-api %}
This example also demonstrates how to use `"context"` to define your conditions. This is the part that follows the repository in the [default `sub` format](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#example-subject-claims). For example, when the job references an environment, the context contains: `environment:<environmentName>`.
```json
{
@@ -350,6 +360,9 @@ This example template combines the requirement of a specific reusable workflow w
In your cloud provider's OIDC configuration, configure the `sub` condition to require that claims must include specific values for `repo`, `context`, and `job_workflow_ref`.
This customization template requires that the `sub` uses the following format: `repo:<orgName/repoName>:environment:<environmentName>:job_workflow_ref:<reusableWorkflowPath>`.
For example: `"sub": "repo:octo-org/octo-repo:environment:prod:job_workflow_ref:octo-org/octo-automation/.github/workflows/oidc.yml@refs/heads/main"`
#### Example: Granting access to a specific repository
This example template lets you grant cloud access to all the workflows in a specific repository, across all branches/tags and environments. To help improve security, combine this template with the custom issuer URL described in "[Customizing the token URL for an enterprise](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#customizing-the-token-url-for-an-enterprise)."

View File

@@ -23,11 +23,18 @@ topics:
Rather than copying and pasting deployment jobs from one workflow to another, you can create a reusable workflow that performs the deployment steps. A reusable workflow can be used by another workflow if it meets one of the access requirements described in "[Reusing workflows](/actions/learn-github-actions/reusing-workflows#access-to-reusable-workflows)."
When combined with OpenID Connect (OIDC), reusable workflows let you enforce consistent deployments across your repository, organization, or enterprise. You can do this by defining trust conditions on cloud roles based on reusable workflows.
You should be familiar with the concepts described in "[Reusing workflows](/actions/learn-github-actions/reusing-workflows" and "[About security hardening with OpenID Connect](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect)."
In order to create trust conditions based on reusable workflows, your cloud provider must support custom claims for `job_workflow_ref`. This allows your cloud provider to identify which repository the job originally came from. If your cloud provider only supports the standard claims (_audience_ and _subject_), it will not be able to determine that the job originated from the reusable workflow repository. Cloud providers that support `job_workflow_ref` include Google Cloud Platform and HashiCorp Vault.
## Defining the trust conditions
Before proceeding, you should be familiar with the concepts of [reusable workflows](/actions/learn-github-actions/reusing-workflows) and [OpenID Connect](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect).
When combined with OpenID Connect (OIDC), reusable workflows let you enforce consistent deployments across your repository, organization, or enterprise. You can do this by defining trust conditions on cloud roles based on reusable workflows. The available options will vary depending on your cloud provider:
- **Using `job_workflow_ref`**:
- To create trust conditions based on reusable workflows, your cloud provider must support custom claims for `job_workflow_ref`. This allows your cloud provider to identify which repository the job originally came from.
- For clouds that only support the standard claims (audience (`aud`) and subject (`sub`)), you can use the API to customize the `sub` claim to include `job_workflow_ref`. For more information, see "[Customizing the token claims](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#customizing-the-token-claims)". Support for custom claims is currently available for Google Cloud Platform and HashiCorp Vault.
- **Customizing the token claims**:
- You can configure more granular trust conditions by customizing the issuer (`iss`) and subject (`sub`) claims included with the JWT. For more information, see "[Customizing the token claims](/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#customizing-the-token-claims)".
## How the token works with reusable workflows

View File

@@ -71,7 +71,7 @@ You can add self-hosted runners at the organization level, where they can be use
{% data reusables.organizations.navigate-to-org %}
{% data reusables.organizations.org_settings %}
{% data reusables.organizations.settings-sidebar-actions-runners %}
1. Click **New runner**.
{% ifversion actions-hosted-runners %}1. Click **New runner**, then click **New self-hosted runner**.{% else %}1. Click **New runner**.{% endif %}
{% data reusables.actions.self-hosted-runner-configure %}
{% elsif ghae or ghes < 3.4 %}
{% data reusables.organizations.navigate-to-org %}

View File

@@ -18,15 +18,24 @@ shortTitle: Run runner app on startup
{% capture service_first_step %}1. Stop the self-hosted runner application if it is currently running.{% endcapture %}
{% capture service_non_windows_intro_shell %}On the runner machine, open a shell in the directory where you installed the self-hosted runner application. Use the commands below to install and manage the self-hosted runner service.{% endcapture %}
{% capture service_nonwindows_intro %}You must add a runner to {% data variables.product.product_name %} before you can configure the self-hosted runner application as a service. For more information, see "[Adding self-hosted runners](/github/automating-your-workflow-with-github-actions/adding-self-hosted-runners)."{% endcapture %}
{% capture service_win_name %}actions.runner.*{% endcapture %}
{% capture service_nonwindows_intro %}
{% note %}
**Note:** You must add a runner to {% data variables.product.product_name %} before you can configure the self-hosted runner application as a service.
For more information, see "[Adding self-hosted runners](/github/automating-your-workflow-with-github-actions/adding-self-hosted-runners)."
{% endnote %}
{% endcapture %}
{% capture service_win_name %}actions.runner.*{% endcapture %}
{% linux %}
{{ service_nonwindows_intro }}
For Linux systems that use `systemd`, you can use the `svc.sh` script distributed with the self-hosted runner application to install and manage using the application as a service.
For Linux systems that use `systemd`, you can use the `svc.sh` script that is created after successfully adding the runner to install and manage using the application as a service.
{{ service_non_windows_intro_shell }}

View File

@@ -9,216 +9,65 @@ versions:
ghae: '*'
ghec: '*'
type: tutorial
shortTitle: Manage access to runners
shortTitle: Using runner groups
---
{% data reusables.actions.enterprise-beta %}
{% data reusables.actions.enterprise-github-hosted-runners %}
## About self-hosted runner groups
## About runner groups
{% ifversion fpt %}
{% note %}
**Note:** All organizations have a single default self-hosted runner group. Only enterprise accounts and organizations owned by enterprise accounts can create and manage additional self-hosted runner groups.
{% endnote %}
Self-hosted runner groups are used to control access to self-hosted runners. Organization admins can configure access policies that control which repositories in an organization have access to the runner group.
If you use {% data variables.product.prodname_ghe_cloud %}, you can create additional runner groups; enterprise admins can configure access policies that control which organizations in an enterprise have access to the runner group; and organization admins can assign additional granular repository access policies to the enterprise runner group. For more information, see the [{% data variables.product.prodname_ghe_cloud %} documentation](/enterprise-cloud@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups).
{% endif %}
{% data reusables.actions.about-runner-groups %} {% ifversion fpt %}For more information, see the [{% data variables.product.prodname_ghe_cloud %} documentation](/enterprise-cloud@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups).{% endif %}
{% ifversion ghec or ghes or ghae %}
Self-hosted runner groups are used to control access to self-hosted runners at the organization and enterprise level. Enterprise owners can configure access policies that control which organizations {% ifversion restrict-groups-to-workflows %}and workflows {% endif %}in an enterprise have access to the runner group. Organization owners can configure access policies that control which repositories{% ifversion restrict-groups-to-workflows %} and workflows{% endif %} in an organization have access to the runner group.
When an enterprise owner grants an organization access to a runner group, organization owners can see the runner group listed in the organization's self-hosted runner settings. The organization owners can then assign additional granular repository{% ifversion restrict-groups-to-workflows %} and workflow{% endif %} access policies to the enterprise runner group.
When new runners are created, they are automatically assigned to the default group. Runners can only be in one group at a time. You can move runners from the default group to another group. For more information, see "[Moving a self-hosted runner to a group](#moving-a-self-hosted-runner-to-a-group)."
## Creating a self-hosted runner group for an organization
All organizations have a single default self-hosted runner group. Organizations within an enterprise account can create additional self-hosted groups. Organization admins can allow individual repositories access to a runner group. For information about how to create a self-hosted runner group with the REST API, see "[Self-hosted runner groups](/rest/reference/actions#self-hosted-runner-groups)."
{%- ifversion ghec or ghes %}
Self-hosted runners are automatically assigned to the default group when created, and can only be members of one group at a time. You can move a runner from the default group to any group you create.
{% data reusables.actions.self-hosted-runner-security-admonition %}
When creating a group, you must choose a policy that defines which repositories{% ifversion restrict-groups-to-workflows %} and workflows{% endif %} have access to the runner group.
{%- endif %}
{% ifversion ghec or ghes > 3.3 or ghae-issue-5091 %}
{% data reusables.organizations.navigate-to-org %}
{% data reusables.organizations.org_settings %}
{% data reusables.organizations.settings-sidebar-actions-runner-groups %}
1. In the "Runner groups" section, click **New runner group**.
1. Enter a name for your runner group.
{% data reusables.actions.runner-group-assign-policy-repo %}
{% warning %}
**Warning**: {% indented_data_reference reusables.actions.self-hosted-runner-security spaces=3 %}
For more information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security-with-public-repositories)."
{% endwarning %}
{% data reusables.actions.runner-group-assign-policy-workflow %}{%- ifversion restrict-groups-to-workflows %} Organization-owned runner groups cannot access workflows from a different organization in the enterprise; instead, you must create an enterprise-owned runner group.{% endif %}
{% data reusables.actions.self-hosted-runner-create-group %}
{% elsif ghae or ghes < 3.4 %}
{% data reusables.organizations.navigate-to-org %}
{% data reusables.organizations.org_settings %}
{% data reusables.organizations.settings-sidebar-actions-runner-groups %}
1. Under {% ifversion ghes or ghae %}"Runners"{% endif %}, click **Add new**, and then **New group**.
![Add runner group](/assets/images/help/settings/actions-org-add-runner-group.png)
1. Enter a name for your runner group, and assign a policy for repository access.
You can configure a runner group to be accessible to a specific list of repositories, or to all repositories in the organization.{% ifversion ghec or ghes %} By default, only private repositories can access runners in a runner group, but you can override this. This setting can't be overridden if configuring an organization's runner group that was shared by an enterprise.{% endif %}
{%- ifversion ghes %}
{% warning %}
**Warning**:
{% indented_data_reference reusables.actions.self-hosted-runner-security spaces=3 %}
For more information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security-with-public-repositories)."
{% endwarning %}
{%- endif %}
![Add runner group options](/assets/images/help/settings/actions-org-add-runner-group-options.png)
1. Click **Save group** to create the group and apply the policy.
{% endif %}
{% data reusables.actions.creating-a-runner-group-for-an-organization %}
## Creating a self-hosted runner group for an enterprise
Enterprises can add their self-hosted runners to groups for access management. Enterprises can create groups of self-hosted runners that are accessible to specific organizations in the enterprise account{% ifversion restrict-groups-to-workflows %} or to specific workflows{% endif %}. Organization owners can then assign additional granular repository{% ifversion restrict-groups-to-workflows %} or workflow{% endif %} access policies to the enterprise runner groups. For information about how to create a self-hosted runner group with the REST API, see the enterprise endpoints in the [{% data variables.product.prodname_actions %} REST API](/rest/reference/actions#self-hosted-runner-groups).
{%- ifversion ghec or ghes %}
Self-hosted runners are automatically assigned to the default group when created, and can only be members of one group at a time. You can assign the runner to a specific group during the registration process, or you can later move the runner from the default group to a custom group.
{% data reusables.actions.self-hosted-runner-security-admonition %}
When creating a group, you must choose a policy that defines which organizations have access to the runner group.
{%- endif %}
{% data reusables.actions.self-hosted-runner-groups-add-to-enterprise-first-steps %}
1. To choose a policy for organization access, select the **Organization access** drop-down, and click a policy. You can configure a runner group to be accessible to a specific list of organizations, or all organizations in the enterprise.{% ifversion ghes %} By default, only private repositories can access runners in a runner group, but you can override this.{% endif %}
{%- ifversion ghec or ghes %}
{% warning %}
**Warning**:
{% indented_data_reference reusables.actions.self-hosted-runner-security spaces=3 %}
For more information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security-with-public-repositories)."
{% endwarning %}
{%- endif %}
{%- ifversion ghec or ghes %}
![Add runner group options](/assets/images/help/settings/actions-enterprise-account-add-runner-group-options.png)
{%- elsif ghae %}
![Add runner group options](/assets/images/help/settings/actions-enterprise-account-add-runner-group-options-ae.png)
{%- endif %}
{% data reusables.actions.runner-group-assign-policy-workflow %}
1. Click **Save group** to create the group and apply the policy.
{% data reusables.actions.creating-a-runner-group-for-an-enterprise %}
{% endif %}
## Changing the access policy of a self-hosted runner group
For runner groups in an enterprise, you can change what organizations in the enterprise can access a runner group{% ifversion restrict-groups-to-workflows %} or restrict what workflows a runner group can run{% endif %}. For runner groups in an organization, you can change what repositories in the organization can access a runner group{% ifversion restrict-groups-to-workflows %} or restrict what workflows a runner group can run{% endif %}.
{%- ifversion fpt or ghec or ghes %}
### Changing what organizations or repositories can access a runner group
{% data reusables.actions.self-hosted-runner-security-admonition %}
{% ifversion fpt or ghec or ghes > 3.3 or ghae-issue-5091 %}
{% data reusables.actions.self-hosted-runner-groups-navigate-to-repo-org-enterprise %}
{% data reusables.actions.settings-sidebar-actions-runner-groups-selection %}
1. For runner groups in an enterprise, under **Organization access**, modify what organizations can access the runner group. For runner groups in an organization, under **Repository access**, modify what repositories can access the runner group.
{%- endif %}
{%- ifversion fpt or ghec or ghes %}
{% warning %}
**Warning**:
{% indented_data_reference reusables.actions.self-hosted-runner-security spaces=3 %}
For more information, see "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners#self-hosted-runner-security-with-public-repositories)."
{% endwarning %}
{%- endif %}
{% elsif ghae or ghes < 3.4 %}
{% data reusables.actions.self-hosted-runner-configure-runner-group-access %}
{% endif %}
{% ifversion restrict-groups-to-workflows %}
### Changing what workflows can access a runner group
You can configure a self-hosted runner group to run either selected workflows or all workflows. For example, you might use this setting to protect secrets that are stored on self-hosted runners or to standardize deployment workflows by restricting a runner group to run only a specific reusable workflow. This setting cannot be overridden if you are configuring an organization's runner group that was shared by an enterprise.
{% data reusables.actions.self-hosted-runner-groups-navigate-to-repo-org-enterprise %}
{% data reusables.actions.settings-sidebar-actions-runner-groups-selection %}
1. Under **Workflow access**, select the dropdown menu and click **Selected workflows**.
1. Click {% octicon "gear" aria-label="the gear icon" %}.
1. Enter a comma separated list of the workflows that can access the runner group. Use the full path, including the repository name and owner. Pin the workflow to a branch, tag, or full SHA. For example: `octo-org/octo-repo/.github/workflows/build.yml@v2, octo-org/octo-repo/.github/workflows/deploy.yml@d6dc6c96df4f32fa27b039f2084f576ed2c5c2a5, monalisa/octo-test/.github/workflows/test.yml@main`.
Only jobs directly defined within the selected workflows will have access to the runner group.
Organization-owned runner groups cannot access workflows from a different organization in the enterprise; instead, you must create an enterprise-owned runner group.
1. Click **Save**.
{% endif %}
{% data reusables.actions.changing-the-access-policy-of-a-runner-group %}
## Changing the name of a runner group
{% ifversion fpt or ghec or ghes > 3.3 or ghae-issue-5091 %}
{% data reusables.actions.self-hosted-runner-groups-navigate-to-repo-org-enterprise %}
{% data reusables.actions.settings-sidebar-actions-runner-groups-selection %}
1. Change the runner group name.
{% elsif ghae or ghes < 3.4 %}
{% data reusables.actions.self-hosted-runner-configure-runner-group %}
1. Change the runner group name.
{% endif %}
{% data reusables.actions.changing-the-name-of-a-runner-group %}
{% ifversion ghec or ghes or ghae %}
## Automatically adding a self-hosted runner to a group
You can use the configuration script to automatically add a new self-hosted runner to a group. For example, this command registers a new self-hosted runner and uses the `--runnergroup` parameter to add it to a group named `rg-runnergroup`.
```sh
./config.sh --url $org_or_enterprise_url --token $token --runnergroup rg-runnergroup
```
The command will fail if the runner group doesn't exist:
```
Could not find any self-hosted runner group named "rg-runnergroup".
```
{% data reusables.actions.automatically-adding-a-runner-to-a-group %}
## Moving a self-hosted runner to a group
If you don't specify a runner group during the registration process, your new self-hosted runners are automatically assigned to the default group, and can then be moved to another group.
{% data reusables.actions.self-hosted-runner-navigate-to-org-enterprise %}
{% ifversion ghec or ghes > 3.3 or ghae-issue-5091 %}
1. In the "Runners" list, click the runner that you want to configure.
2. Select the **Runner group** drop-down.
3. In "Move runner to group", choose a destination group for the runner.
{% elsif ghae or ghes < 3.4 %}
1. In the {% ifversion ghes or ghae %}"Runner groups"{% endif %} section of the settings page, locate the current group of the runner you want to move and expand the list of group members.
![View runner group members](/assets/images/help/settings/actions-org-runner-group-members.png)
2. Select the checkbox next to the self-hosted runner, and then click **Move to group** to see the available destinations.
![Runner group member move](/assets/images/help/settings/actions-org-runner-group-member-move.png)
3. To move the runner, click on the destination group.
![Runner group member move](/assets/images/help/settings/actions-org-runner-group-member-move-destination.png)
{% endif %}
{% data reusables.actions.moving-a-runner-to-a-group %}
## Removing a self-hosted runner group
Self-hosted runners are automatically returned to the default group when their group is removed.
{% ifversion ghes or ghae or ghec %}
{% data reusables.actions.self-hosted-runner-groups-navigate-to-repo-org-enterprise %}
1. In the list of groups, to the right of the group you want to delete, click {% octicon "kebab-horizontal" aria-label="The horizontal kebab icon" %}.
2. To remove the group, click **Remove group**.
3. Review the confirmation prompts, and click **Remove this runner group**.
{% data reusables.actions.removing-a-runner-group %}
{% endif %}
{% endif %}

View File

@@ -2,6 +2,7 @@
title: Understanding GitHub Actions
shortTitle: Understanding GitHub Actions
intro: 'Learn the basics of {% data variables.product.prodname_actions %}, including core concepts and essential terminology.'
miniTocMaxHeadingLevel: 3
redirect_from:
- /github/automating-your-workflow-with-github-actions/core-concepts-for-github-actions
- /actions/automating-your-workflow-with-github-actions/core-concepts-for-github-actions
@@ -82,7 +83,7 @@ For more information, see "[Creating actions](/actions/creating-actions)."
### Runners
{% data reusables.actions.about-runners %} Each runner can run a single job at a time. {% ifversion ghes or ghae %} You must host your own runners for {% data variables.product.product_name %}. {% elsif fpt or ghec %}{% data variables.product.company_short %} provides Ubuntu Linux, Microsoft Windows, and macOS runners to run your workflows; each workflow run executes in a fresh, newly-provisioned virtual machine. If you need a different operating system or require a specific hardware configuration, you can host your own runners.{% endif %} For more information{% ifversion fpt or ghec %} about self-hosted runners{% endif %}, see "[Hosting your own runners](/actions/hosting-your-own-runners)."
{% data reusables.actions.about-runners %} Each runner can run a single job at a time. {% ifversion ghes or ghae %} You must host your own runners for {% data variables.product.product_name %}. {% elsif fpt or ghec %}{% data variables.product.company_short %} provides Ubuntu Linux, Microsoft Windows, and macOS runners to run your workflows; each workflow run executes in a fresh, newly-provisioned virtual machine. {% ifversion actions-hosted-runners %} {% data variables.product.prodname_dotcom %} also offers {% data variables.actions.hosted_runner %}s, which are available in larger configurations. For more information, see "[Using {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/using-larger-runners)." {% endif %}If you need a different operating system or require a specific hardware configuration, you can host your own runners.{% endif %} For more information{% ifversion fpt or ghec %} about self-hosted runners{% endif %}, see "[Hosting your own runners](/actions/hosting-your-own-runners)."
{% data reusables.actions.workflow-basic-example-and-explanation %}

View File

@@ -98,9 +98,9 @@ Each time you create a new release, you can trigger a workflow to publish your p
### Configuring the destination repository
If you don't provide the `repository` key in your *package.json* file, then {% data variables.product.prodname_registry %} publishes a package in the {% data variables.product.prodname_dotcom %} repository you specify in the `name` field of the *package.json* file. For example, a package named `@my-org/test` is published to the `my-org/test` {% data variables.product.prodname_dotcom %} repository.
Linking your package to {% data variables.product.prodname_registry %} using the `repository` key is optional. If you choose not to provide the `repository` key in your *package.json* file, then {% data variables.product.prodname_registry %} publishes a package in the {% data variables.product.prodname_dotcom %} repository you specify in the `name` field of the *package.json* file. For example, a package named `@my-org/test` is published to the `my-org/test` {% data variables.product.prodname_dotcom %} repository. If the `url` specified in the `repository` key is invalid, your package may still be published however it won't be linked to the repository source as intended.
However, if you do provide the `repository` key, then the repository in that key is used as the destination npm registry for {% data variables.product.prodname_registry %}. For example, publishing the below *package.json* results in a package named `my-amazing-package` published to the `octocat/my-other-repo` {% data variables.product.prodname_dotcom %} repository.
If you do provide the `repository` key in your *package.json* file, then the repository in that key is used as the destination npm registry for {% data variables.product.prodname_registry %}. For example, publishing the below *package.json* results in a package named `my-amazing-package` published to the `octocat/my-other-repo` {% data variables.product.prodname_dotcom %} repository. Once published, only the repository source is updated, and the package doesn't inherit any permissions from the destination repository.
```json
{

View File

@@ -86,6 +86,15 @@ While the job runs, the logs and output can be viewed in the {% data variables.p
## Supported runners and hardware resources
{% ifversion actions-hosted-runners %}
{% note %}
**Note**: {% data variables.product.prodname_dotcom %} also offers {% data variables.actions.hosted_runner %}s, which are available in larger configurations. For more information, see "[Using {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/using-larger-runners)."
{% endnote %}
{% endif %}
Hardware specification for Windows and Linux virtual machines:
- 2-core CPU (x86_64)
- 7 GB of RAM

View File

@@ -0,0 +1,50 @@
---
title: Controlling access to larger runners
intro: You can use policies to limit access to {% data variables.actions.hosted_runner %}s that have been added to an organization or enterprise.
product: '{% data reusables.gated-features.hosted-runners %}'
versions:
feature: 'actions-hosted-runners'
type: tutorial
shortTitle: Controlling access to {% data variables.actions.hosted_runner %}s
---
{% data reusables.actions.enterprise-beta %}
{% data reusables.actions.enterprise-github-hosted-runners %}
## About runner groups
{% data reusables.actions.about-runner-groups %} {% ifversion fpt %}For more information, see the [{% data variables.product.prodname_ghe_cloud %} documentation](/enterprise-cloud@latest/actions/using-github-hosted-runners/controlling-access-to-larger-runners).{% endif %}
{% ifversion ghec or ghes or ghae %}
## Creating a runner group for an organization
{% data reusables.actions.hosted-runner-security-admonition %}
{% data reusables.actions.creating-a-runner-group-for-an-organization %}
## Creating a runner group for an enterprise
{% data reusables.actions.hosted-runner-security-admonition %}
{% data reusables.actions.creating-a-runner-group-for-an-enterprise %}
{% endif %}
## Changing the access policy of a runner group
{% data reusables.actions.hosted-runner-security-admonition %}
{% data reusables.actions.changing-the-access-policy-of-a-runner-group %}
## Changing the name of a runner group
{% data reusables.actions.changing-the-name-of-a-runner-group %}
{% ifversion ghec or ghes or ghae %}
## Moving a runner to a group
{% data reusables.actions.moving-a-runner-to-a-group %}
## Removing a runner group
{% data reusables.actions.removing-a-runner-group %}
{% endif %}

View File

@@ -7,6 +7,8 @@ versions:
ghes: '*'
children:
- /about-github-hosted-runners
- /using-larger-runners
- /controlling-access-to-larger-runners
- /monitoring-your-current-jobs
- /customizing-github-hosted-runners
- /connecting-to-a-private-network

View File

@@ -0,0 +1,135 @@
---
title: Using larger runners
intro: '{% data variables.product.prodname_dotcom %} offers larger runners with more RAM and CPU.'
miniTocMaxHeadingLevel: 3
product: '{% data reusables.gated-features.hosted-runners %}'
versions:
feature: 'actions-hosted-runners'
shortTitle: Using {% data variables.actions.hosted_runner %}s
---
## Overview of {% data variables.actions.hosted_runner %}s
In addition to the [standard {% data variables.product.prodname_dotcom %}-hosted runners](/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources), {% data variables.product.prodname_dotcom %} also offers customers on {% data variables.product.prodname_team %} and {% data variables.product.prodname_ghe_cloud %} plans a range of {% data variables.actions.hosted_runner %}s with more RAM and CPU. These runners are hosted by {% data variables.product.prodname_dotcom %} and have the runner application and other tools preinstalled.
When you add a {% data variables.actions.hosted_runner %} to an organization, you are defining a type of machine from a selection of available hardware specifications and operating system images. {% data variables.product.prodname_dotcom %} will then create multiple instances of this runner that scale up and down to match the job demands of your organization, based on the autoscaling limits you define.
## Architectural overview of {% data variables.actions.hosted_runner %}s
The {% data variables.actions.hosted_runner %}s are managed at the organization level, where they are arranged into groups that can contain multiple instances of the runner. They can also be created at the enterprise level and shared with organizations in the hierarchy. Once you've created a group, you can then add a runner to the group and update your workflows to target the label assigned to the {% data variables.actions.hosted_runner %}. You can also control which repositories are permitted to send jobs to the group for processing. For more information about groups, see "[Controlling access to {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/controlling-access-to-larger-runners)."
In the following diagram, a class of hosted runner named `ubuntu-20.04-16core` has been defined with customized hardware and operating system configuration.
![Diagram explaining {% data variables.actions.hosted_runner %}](/assets/images/hosted-runner.png)
1. Instances of this runner are automatically created and added to a group called `ubuntu-20.04-16core`.
2. The runners have been assigned the label `ubuntu-20.04-16core`.
3. Workflow jobs use the `ubuntu-20.04-16core` label in their `runs-on` key to indicate the type of runner they need to execute the job.
4. {% data variables.product.prodname_actions %} checks the runner group to see if your repository is authorized to send jobs to the runner.
5. The job runs on the next available instance of the `ubuntu-20.04-16core` runner.
## Autoscaling {% data variables.actions.hosted_runner %}s
Your {% data variables.actions.hosted_runner %}s can be configured to automatically scale to suit your needs. When jobs are submitted for processing, more machines can be automatically provisioned to run the jobs, until reaching a pre-defined maximum limit. Each machine only handles one job at a time, so these settings effectively determine the number of jobs that can be run concurrently.
During the runner deployment process, you can configure the _Max_ option, which allows you to control your costs by setting the maximum parallel number of machines that are created in this set. A higher value here can help avoid workflows being blocked due to parallelism.
## Networking for {% data variables.actions.hosted_runner %}s
By default, {% data variables.actions.hosted_runner %}s receive a dynamic IP address that changes for each job run. Optionally, {% data variables.product.prodname_ghe_cloud %} customers can configure their {% data variables.actions.hosted_runner %}s to receive a static IP address from {% data variables.product.prodname_dotcom %}'s IP address pool. When enabled, instances of the {% data variables.actions.hosted_runner %} will receive an address from a range that is unique to the runner, allowing you to use this range to configure a firewall allowlist. You can use up to 10 static IP address ranges in total across all your {% data variables.actions.hosted_runner %}s.
{% note %}
**Note**: If runners are unused for more than 30 days, their IP address ranges are automatically removed and cannot be recovered.
{% endnote %}
## Planning for {% data variables.actions.hosted_runner %}s
### Create a runner group
Runner groups are used to collect sets of virtual machines and create a security boundary around them. You can then decide which organizations or repositories are permitted to run jobs on those sets of machines. During the {% data variables.actions.hosted_runner %} deployment process, the runner can be added to an existing group, or otherwise it will join a default group. You can create a group by following the steps in "[Controlling access to {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/controlling-access-to-larger-runners)."
### Understanding billing
Compared to standard {% data variables.product.prodname_dotcom %}-hosted runners, {% data variables.actions.hosted_runner %}s are billed differently. For more information, see "[Per-minute rates](/billing/managing-billing-for-github-actions/about-billing-for-github-actions#per-minute-rates)".
## Adding a {% data variables.actions.hosted_runner %} to an enterprise
You can add {% data variables.actions.hosted_runner %}s to an enterprise, where they can be assigned to multiple organizations. The organization admins can then control which repositories can use the runners. To add a {% data variables.actions.hosted_runner %} to an enterprise, you must be an enterprise owner.
{% data reusables.actions.add-hosted-runner-overview %}
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.policies-tab %}
{% data reusables.enterprise-accounts.actions-tab %}
{% data reusables.enterprise-accounts.actions-runners-tab %}
{% data reusables.actions.add-hosted-runner %}
1. To allow organizations to access your {% data variables.actions.hosted_runner %}s, you specify the list of organizations that can use it. For more information, see "[Managing access to your runners](#managing-access-to-your-runners)."
## Adding a {% data variables.actions.hosted_runner %} to an organization
You can add a {% data variables.actions.hosted_runner %} to an organization, where the organization admins can control which repositories can use it.
{% data reusables.actions.add-hosted-runner-overview %}
{% data reusables.organizations.navigate-to-org %}
{% data reusables.organizations.org_settings %}
{% data reusables.organizations.settings-sidebar-actions-runners %}
{% data reusables.actions.add-hosted-runner %}
1. To allow repositories to access your {% data variables.actions.hosted_runner %}s, add them to the list of repositories that can use it. For more information, see "[Managing access to your runners](#managing-access-to-your-runners)."
## Running jobs on your runner
Once your runner type has been been defined, you can update your workflows to send jobs to the runner instances for processing. In this example, a runner group is populated with Ubuntu 16-core runners, which have been assigned the label `ubuntu-20.04-16core`. If you have a runner matching this label, the `check-bats-version` job then uses the `runs-on` key to target that runner whenever the job is run:
```yaml
name: learn-github-actions
on: [push]
jobs:
check-bats-version:
runs-on: ubuntu-20.04-16core
steps:
- uses: {% data reusables.actions.action-checkout %}
- uses:{% data reusables.actions.action-setup-node %}
with:
node-version: '14'
- run: npm install -g bats
- run: bats -v
```
## Managing access to your runners
{% note %}
**Note**: Before your workflows can send jobs to {% data variables.actions.hosted_runner %}s, you must first configure permissions for the runner group. See the following sections for more information.
{% endnote %}
Runner groups are used to control which repositories can run jobs on your {% data variables.actions.hosted_runner %}s. You must grant access to the group from each level of the management hierarchy, depending on where you've defined the {% data variables.actions.hosted_runner %}:
- **Runners at the enterprise level**: Configure the runner group to grant access to all the required organizations. In addition, for each organization, you must configure the group to specify which repositories are allowed access.
- **Runners at the organization level**: Configure the runner group by specifying which repositories are allowed access.
For example, the following diagram has a runner group named `grp-ubuntu-20.04-16core` at the enterprise level. Before the repository named `octo-repo` can use the runners in the group, you must first configure the group at the enterprise level to allow access from the `octo-org` organization; you must then configure the group at the organization level to allow access from `octo-repo`:
![Diagram explaining {% data variables.actions.hosted_runner %} groups](/assets/images/hosted-runner-mgmt.png)
### Allowing repositories to access a runner group
This procedure demonstrates how to configure group permissions at the enterprise and organization levels:
{% data reusables.actions.runner-groups-navigate-to-repo-org-enterprise %}
{% data reusables.actions.settings-sidebar-actions-runner-groups-selection %}
- For runner groups in an enterprise: under **Organization access**, modify which organizations can access the runner group.
- For runner groups in an organization: under **Repository access**, modify which repositories can access the runner group.
{% warning %}
**Warning**:
{% data reusables.actions.hosted-runner-security %}
For more information, see "[Controlling access to {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/controlling-access-to-larger-runners)."
{% endwarning %}

View File

@@ -51,7 +51,7 @@ For more information on workflow run artifacts, see "[Persisting workflow data u
A workflow can access and restore a cache created in the current branch, the base branch (including base branches of forked repositories), or the default branch (usually `main`). For example, a cache created on the default branch would be accessible from any pull request. Also, if the branch `feature-b` has the base branch `feature-a`, a workflow triggered on `feature-b` would have access to caches created in the default branch (`main`), `feature-a`, and `feature-b`.
Access restrictions provide cache isolation and security by creating a logical boundary between different branches. For example, a cache created for the branch `feature-a` (with the base `main`) would not be accessible to a pull request for the branch `feature-c` (with the base `main`).
Access restrictions provide cache isolation and security by creating a logical boundary between different branches or tags. For example, a cache created for the branch `feature-a` (with the base `main`) would not be accessible to a pull request for the branch `feature-c` (with the base `main`). On similar lines, a cache created for the tag `release-a` (from the base `main`) would not be accessible to a workflow triggered for the tag `release-b` (with the base `main`).
Multiple workflows within a repository share cache entries. A cache created for a branch within a workflow can be accessed and restored from another workflow for the same repository and branch.

View File

@@ -23,7 +23,7 @@ topics:
Rather than copying and pasting from one workflow to another, you can make workflows reusable. You and anyone with access to the reusable workflow can then call the reusable workflow from another workflow.
Reusing workflows avoids duplication. This makes workflows easier to maintain and allows you to create new workflows more quickly by building on the work of others, just as you do with actions. Workflow reuse also promotes best practice by helping you to use workflows that are well designed, have already been tested, and have been proved to be effective. Your organization can build up a library of reusable workflows that can be centrally maintained.
Reusing workflows avoids duplication. This makes workflows easier to maintain and allows you to create new workflows more quickly by building on the work of others, just as you do with actions. Workflow reuse also promotes best practice by helping you to use workflows that are well designed, have already been tested, and have been proven to be effective. Your organization can build up a library of reusable workflows that can be centrally maintained.
The diagram below shows three build jobs on the left of the diagram. After each of these jobs completes successfully a dependent job called "Deploy" runs. This job calls a reusable workflow that contains three jobs: "Staging", "Review", and "Production." The "Production" deployment job only runs after the "Staging" job has completed successfully. Using a reusable workflow to run deployment jobs allows you to run those jobs for each build without duplicating code in workflows.
@@ -127,7 +127,7 @@ You can define inputs and secrets, which can be passed from the caller workflow
runs-on: ubuntu-latest
environment: production
steps:
- uses: ./.github/workflows/my-action
- uses: octo-org/my-action@v1
with:
username: ${{ inputs.username }}
token: ${{ secrets.envPAT }}
@@ -168,12 +168,13 @@ jobs:
name: Pass input and secrets to my-action
runs-on: ubuntu-latest
steps:
- uses: ./.github/workflows/my-action
- uses: octo-org/my-action@v1
with:
username: ${{ inputs.username }}
token: ${{ secrets.token }}
```
{% endraw %}
{% ifversion actions-reusable-workflow-matrix %}
## Using a matrix strategy with a reusable workflow

View File

@@ -16,7 +16,9 @@ topics:
---
If you configure a hostname instead of a hard-coded IP address, you will be able to change the physical hardware that {% data variables.product.product_location %} runs on without affecting users or client software.
The hostname setting in the {% data variables.enterprise.management_console %} should be set to an appropriate fully qualified domain name (FQDN) which is resolvable on the internet or within your internal network. For example, your hostname setting could be `github.companyname.com.` Web and API requests will automatically redirect to the hostname configured in the {% data variables.enterprise.management_console %}. Note that `localhost` is not a valid hostname setting.
The hostname setting in the {% data variables.enterprise.management_console %} should be set to an appropriate fully qualified domain name (FQDN) which is resolvable on the internet or within your internal network. For example, your hostname setting could be `github.companyname.com.` Web and API requests will automatically redirect to the hostname configured in the {% data variables.enterprise.management_console %}. Note that `localhost` is not a valid hostname setting.
Hostnames must be less than 63 characters in length per [Section 2.3.4 of the Domain Names Specification RFC](https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.4).
After you configure a hostname, you can enable subdomain isolation to further increase the security of {% data variables.product.product_location %}. For more information, see "[Enabling subdomain isolation](/enterprise/admin/guides/installation/enabling-subdomain-isolation/)."

View File

@@ -90,6 +90,14 @@ settings to allow incoming emails](#configuring-dns-and-firewall-settings-to-all
You can enforce TLS encryption for all incoming SMTP connections, which can help satisfy an ISO-27017 certification requirement.
{%- ifversion ghes = 3.6 %}
{% note %}
**Note**: Enforcement of TLS for SMTP connections is unavailable in {% data variables.product.product_name %} 3.6.0. The feature will be available in an upcoming release.
{% endnote %}
{%- endif %}
{% data reusables.enterprise_site_admin_settings.email-settings %}
1. Under "Authentication," select **Enforce TLS auth (recommended)**.

View File

@@ -131,46 +131,49 @@ Key | Description
{% ifversion ghes %}
## Indexing
GitHub's [code search][] features are powered by [ElasticSearch][]. This section of the site admin dashboard shows you the current status of your ElasticSearch cluster and provides you with several tools to control the behavior of searching and indexing. These tools are split into the following three categories.
GitHub's search features are powered by Elasticsearch. This section of the site admin dashboard shows you the current status of your Elasticsearch cluster and provides you with several tools to control search and index behavior.
[Code Search]: https://github.com/blog/1381-a-whole-new-code-search
[ElasticSearch]: http://www.elasticsearch.org/
For more information about code search, see "[Searching for information on {% data variables.product.prodname_dotcom %}](/search-github)." For more information about Elasticsearch, see the [Elasticsearch website](https://elastic.co).
{% note %}
**Note**: In normal use, site administrators do not need to create new indices or schedule repair jobs. For troubleshooting or other support purposes, {% data variables.contact.github_support %} may instruct you to run a repair job.
{% endnote %}
### Index management
{% data variables.product.product_name %} reconciles the state of the search index with data on the instance automatically and regularly.
- Issues, pull requests, repositories, and users in the database
- Git repositories (source code) on disk
Your instance uses repair jobs to reconcile the data, and schedules a repair job in the background when the following events occur.
- A new search index is created.
- Missing data needs to be backfilled.
- Old search data needs to be updated.
You can create a new index, or you can click on an existing index in the list to manage the index. You can perform the following operations on an index.
- Make the index searchable.
- Make the index writable.
- Update the index.
- Delete the index
- Reset the index repair state.
- Start a new index repair job.
- Enable or disable index repair jobs.
A progress bar shows the current status of a repair job across background workers. The bar is the percentage difference of the repair offset with the highest record ID in the database. You can ignore the value shown in the progress bar after a repair job has completed. The progress bar shows the difference between the repair offset and the highest record ID in the database, and will decrease as more repositories are added to {% data variables.product.product_location %} even though those repositories are actually indexed.
To minimize the effects on I/O performance and reduce the chances of operations timing out, run the repair job during off-peak hours. As the job reconciles the search index with database and Git repository data, one CPU will be used. Monitor your system's load averages and CPU usage with a utility like `top`. If you don't notice any significant increase in resource consumption, it should also be safe to run an index repair job during peak hours.
Repair jobs use a "repair offset" for parallelization. This is an offset into the database table for the record being reconciled. Multiple background jobs can synchronize work based on this offset.
### Code search
This allows you to enable or disable both search and index operations on source code.
### Code search index repair
This controls how the code search index is repaired. You can
- enable or disable index repair jobs
- start a new index repair job
- reset all index repair state
{% data variables.product.prodname_enterprise %} uses repair jobs to reconcile the state of the search index with data stored in a database (issues, pull requests, repositories, and users) and data stored in Git repositories (source code). This happens when
- a new search index is created;
- missing data needs to be backfilled; or
- old search data needs to be updated.
In other words, repair jobs are started as needed and run in the background—they are not scheduled by site admins in any way.
Furthermore, repair jobs use a "repair offset" for parallelization. This is an offset into the database table for the record being reconciled. Multiple background jobs can synchronize work based on this offset.
A progress bar shows the current status of a repair job across all of its background workers. It is the percentage difference of the repair offset with the highest record ID in the database. Don't worry about the value shown in the progress bar after a repair job has completed: because it shows the difference between the repair offset and the highest record ID in the database, it will decrease as more repositories are added to {% data variables.product.product_location %} even though those repositories are actually indexed.
You can start a new code-search index repair job at any time. It will use a single CPU as it reconciles the search index with database and Git repository data. To minimize the effects this will have on I/O performance and reduce the chances of operations timing out, try to run a repair job during off-peak hours first. Monitor your system's load averages and CPU usage with a utility like `top`; if you don't notice any significant changes, it should be safe to run an index repair job during peak hours, as well.
### Issues index repair
This controls how the [Issues][] index is repaired. You can
[Issues]: https://github.com/blog/831-issues-2-0-the-next-generation
- enable or disable index repair jobs
- start a new index repair job
- reset all index repair state
{% endif %}
## Reserved logins

View File

@@ -83,7 +83,7 @@ You can create a runner group to manage access to the runner that you added to y
{% data variables.product.product_name %} adds all new runners to a group. Runners can be in one group at a time. By default, {% data variables.product.product_name %} adds new runners to the "Default" group.
{% data reusables.actions.self-hosted-runner-groups-add-to-enterprise-first-steps %}
{% data reusables.actions.runner-groups-add-to-enterprise-first-steps %}
1. To choose a policy for organization access, under "Organization access", select the **Organization access** drop-down, and click **Selected organizations**.
1. To the right of the drop-down with the organization access policy, click {% octicon "gear" aria-label="The Gear icon" %}.
1. Select the organizations you'd like to grant access to the runner group.
@@ -100,7 +100,7 @@ You can create a runner group to manage access to the runner that you added to y
{% endwarning %}
{%- endif %}
{% data reusables.actions.self-hosted-runner-create-group %}
{% data reusables.actions.create-runner-group %}
{%- ifversion ghec or ghes > 3.3 or ghae-issue-5091 %}
1. Click the "Runners" tab.
1. In the list of runners, click the runner that you deployed in the previous section.

View File

@@ -44,7 +44,8 @@ You set up the audit log stream on {% data variables.product.product_name %} by
- [Amazon S3](#setting-up-streaming-to-amazon-s3)
- [Azure Blob Storage](#setting-up-streaming-to-azure-blob-storage)
- [Azure Event Hubs](#setting-up-streaming-to-azure-event-hubs)
- [Azure Event Hubs](#setting-up-streaming-to-azure-event-hubs){% ifversion streaming-datadog %}
- [Datadog](#setting-up-streaming-to-datadog){% endif %}
- [Google Cloud Storage](#setting-up-streaming-to-google-cloud-storage)
- [Splunk](#setting-up-streaming-to-splunk)
@@ -60,7 +61,7 @@ You can set up streaming to S3 with access keys or, to avoid storing long-lived
#### Setting up streaming to S3 with access keys
{% endif %}
To stream audit logs to Amazon's S3 endpoint, you must have a bucket and access keys. For more information, see [Creating, configuring, and working with Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) in the the AWS documentation. Make sure to block public access to the bucket to protect your audit log information.
To stream audit logs to Amazon's S3 endpoint, you must have a bucket and access keys. For more information, see [Creating, configuring, and working with Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) in the AWS documentation. Make sure to block public access to the bucket to protect your audit log information.
To set up audit log streaming from {% data variables.product.prodname_dotcom %} you will need:
* The name of your Amazon S3 bucket
@@ -231,6 +232,32 @@ You need two pieces of information about your event hub: its instance name and t
{% data reusables.enterprise.verify-audit-log-streaming-endpoint %}
{% ifversion streaming-datadog %}
### Setting up streaming to Datadog
To set up streaming to Datadog, you must create a client token or an API key in Datadog, then configure audit log streaming in {% data variables.product.product_name %} using the token for authentication. You do not need to create a bucket or other storage container in Datadog.
After you set up streaming to Datadog, you can see your audit log data by filtering by "github.audit.streaming." For more information, see [Log Management](https://docs.datadoghq.com/logs/).
1. If you don't already have a Datadog account, create one.
1. In Datadog, generate a client token or an API key, then click **Copy key**. For more information, see [API and Application Keys](https://docs.datadoghq.com/account_management/api-app-keys/) in Datadog Docs.
{% data reusables.enterprise.navigate-to-log-streaming-tab %}
1. Select the **Configure stream** dropdown menu and click **Datadog**.
![Screenshot of the "Configure stream" dropdown menu with "Datadog" highlighted](/assets/images/help/enterprises/audit-stream-choice-datadog.png)
1. Under "Token", paste the token you copied earlier.
![Screenshot of the "Token" field](/assets/images/help/enterprises/audit-stream-datadog-token.png)
1. Select the "Site" dropdown menu and click your Datadog site. To determine your Datadog site, compare your Datadog URL to the table in [Datadog sites](https://docs.datadoghq.com/getting_started/site/) in Datadog Docs.
![Screenshot of the "Site" dropdown menu](/assets/images/help/enterprises/audit-stream-datadog-site.png)
1. To verify that {% data variables.product.prodname_dotcom %} can connect and write to the Datadog endpoint, click **Check endpoint**.
![Check the endpoint](/assets/images/help/enterprises/audit-stream-check.png)
{% data reusables.enterprise.verify-audit-log-streaming-endpoint %}
1. After a few minutes, confirm that audit log data is appearing on the **Logs** tab in Datadog. If audit log data is not appearing, confirm that your token and site are correct in {% data variables.product.prodname_dotcom %}.
{% endif %}
### Setting up streaming to Google Cloud Storage
To set up streaming to Google Cloud Storage, you must create a service account in Google Cloud with the appropriate credentials and permissions, then configure audit log streaming in {% data variables.product.product_name %} using the service account's credentials for authentication.
@@ -292,6 +319,10 @@ To stream audit logs to Splunk's HTTP Event Collector (HEC) endpoint you must ma
Pausing the stream allows you to perform maintenance on the receiving application without losing audit data. Audit logs are stored for up to seven days on {% data variables.product.product_location %} and are then exported when you unpause the stream.
{% ifversion streaming-datadog %}
Datadog only accepts logs from up to 18 hours in the past. If you pause a stream to a Datadog endpoint for more than 18 hours, you risk losing logs that Datadog won't accept after you resume streaming.
{% endif %}
{% data reusables.enterprise.navigate-to-log-streaming-tab %}
1. Click **Pause stream**.

View File

@@ -121,7 +121,7 @@ For more information about the audit log REST API, see "[Enterprise administrati
The query below searches for audit log events created on Jan 1st, 2022 in the `avocado-corp` enterprise, and return the first page with a maximum of 100 items per page using [REST API pagination](/rest/overview/resources-in-the-rest-api#pagination):
```shell
curl -H "Authorization: token <em>TOKEN</em>" \
curl -H "Authorization: Bearer <em>TOKEN</em>" \
--request GET \
"https://api.github.com/enterprises/avocado-corp/audit-log?phrase=created:2022-01-01&page=1&per_page=100"
```
@@ -133,7 +133,7 @@ You can specify multiple search phrases, such as `created` and `actor`, by separ
The query below searches for audit log events for pull requests, where the event occurred on or after Jan 1st, 2022 in the `avocado-corp` enterprise, and the action was performed by the `octocat` user:
```shell
curl -H "Authorization: token <em>TOKEN</em>" \
curl -H "Authorization: Bearer <em>TOKEN</em>" \
--request GET \
"https://api.github.com/enterprises/avocado-corp/audit-log?phrase=action:pull_request+created:>=2022-01-01+actor:octocat"
```

View File

@@ -81,6 +81,8 @@ You can also configure allowed IP addresses for an individual organization. For
{% data reusables.identity-and-permissions.about-adding-ip-allow-list-entries %}
{% data reusables.identity-and-permissions.ipv6-allow-lists %}
{% data reusables.enterprise-accounts.access-enterprise %}
{% data reusables.enterprise-accounts.settings-tab %}
{% data reusables.enterprise-accounts.security-tab %}

View File

@@ -44,7 +44,7 @@ The Migrations API is currently in a preview period, which means that the endpoi
* Your access token for authentication.
* A [list of the repositories](/free-pro-team@latest/rest/repos#list-organization-repositories) you want to migrate:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" \
-X POST \
-H "Accept: application/vnd.github+json" \
-d'{"lock_repositories":true,"repositories":["<em>orgname</em>/<em>reponame</em>", "<em>orgname</em>/<em>reponame</em>"]}' \
@@ -59,7 +59,7 @@ The Migrations API is currently in a preview period, which means that the endpoi
* Your access token for authentication.
* The unique `id` of the migration:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" \
-H "Accept: application/vnd.github+json" \
https://api.github.com/orgs/<em>orgname</em>/migrations/<em>id</em>
```
@@ -74,7 +74,7 @@ The Migrations API is currently in a preview period, which means that the endpoi
* Your access token for authentication.
* The unique `id` of the migration:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" \
-H "Accept: application/vnd.github+json" \
-L -o migration_archive.tar.gz \
https://api.github.com/orgs/<em>orgname</em>/migrations/<em>id</em>/archive
@@ -84,7 +84,7 @@ The Migrations API is currently in a preview period, which means that the endpoi
* Your access token for authentication.
* The unique `id` of the migration:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" \
-X DELETE \
-H "Accept: application/vnd.github+json" \
https://api.github.com/orgs/<em>orgname</em>/migrations/<em>id</em>/archive

View File

@@ -133,7 +133,7 @@ To unlock the repositories on a {% data variables.product.prodname_dotcom_the_we
* The unique `id` of the migration
* The name of the repository to unlock
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
-H "Accept: application/vnd.github.wyandotte-preview+json" \
https://api.github.com/orgs/<em>orgname</em>/migrations/<em>id</em>/repos/<em>repo_name</em>/lock
```
@@ -142,7 +142,7 @@ curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
After unlocking the {% data variables.product.prodname_dotcom_the_website %} organization's repositories, you should delete every repository you previously migrated using [the repository delete endpoint](/rest/repos/#delete-a-repository). You'll need your access token for authentication:
```shell
curl -H "Authorization: token <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
curl -H "Authorization: Bearer <em>GITHUB_ACCESS_TOKEN</em>" -X DELETE \
https://api.github.com/repos/<em>orgname</em>/<em>repo_name</em>
```

View File

@@ -28,11 +28,11 @@ topics:
{% data reusables.command_line.open_the_multi_os_terminal %}
3. Generate a GPG key pair. Since there are multiple versions of GPG, you may need to consult the relevant [_man page_](https://en.wikipedia.org/wiki/Man_page) to find the appropriate key generation command. Your key must use RSA.
- If you are on version 2.1.17 or greater, paste the text below to generate a GPG key pair.
```shell
```shell{:copy}
$ gpg --full-generate-key
```
- If you are not on version 2.1.17 or greater, the `gpg --full-generate-key` command doesn't work. Paste the text below and skip to step 6.
```shell
```shell{:copy}
$ gpg --default-new-key-algo rsa4096 --gen-key
```
4. At the prompt, specify the kind of key you want, or press `Enter` to accept the default.
@@ -51,10 +51,10 @@ topics:
{% data reusables.gpg.list-keys-with-note %}
{% data reusables.gpg.copy-gpg-key-id %}
10. Paste the text below, substituting in the GPG key ID you'd like to use. In this example, the GPG key ID is `3AA5C34371567BD2`:
```shell
$ gpg --armor --export <em>3AA5C34371567BD2</em>
# Prints the GPG key ID, in ASCII armor format
```
```shell{:copy}
$ gpg --armor --export 3AA5C34371567BD2
# Prints the GPG key ID, in ASCII armor format
```
11. Copy your GPG key, beginning with `-----BEGIN PGP PUBLIC KEY BLOCK-----` and ending with `-----END PGP PUBLIC KEY BLOCK-----`.
12. [Add the GPG key to your GitHub account](/articles/adding-a-gpg-key-to-your-github-account).

View File

@@ -1,6 +1,7 @@
---
title: About billing for GitHub Actions
intro: 'If you want to use {% data variables.product.prodname_actions %} beyond the storage or minutes included in your account, you will be billed for additional usage.'
miniTocMaxHeadingLevel: 3
redirect_from:
- /github/setting-up-and-managing-billing-and-payments-on-github/about-billing-for-github-actions
- /github/setting-up-and-managing-billing-and-payments-on-github/managing-billing-for-github-actions/about-billing-for-github-actions
@@ -27,6 +28,14 @@ Minutes reset every month, while storage usage does not.
### Included storage and minutes
{% ifversion actions-hosted-runners %}
{% note %}
**Note**: Entitlement minutes cannot be used for Windows and Ubuntu runners over 2-cores. These runners will always be charged for, including in public repos. For more information, see "[Per-minute rates for runners](/billing/managing-billing-for-github-actions/about-billing-for-github-actions#per-minute-rates)."
{% endnote %}
{% endif %}
|Product | Storage | Minutes (per month)|
|------- | ------- | ---------|
| {% data variables.product.prodname_free_user %} | 500 MB | 2,000 |
@@ -57,15 +66,15 @@ The storage used by a repository is the total storage used by {% data variables.
### Per-minute rates
| Operating system | Per-minute rate (USD) |
|------- | ---------|
| Linux | $0.008 |
| macOS | $0.08 |
| Windows | $0.016 |
{% data reusables.billing.billing-standard-runners %}
{% ifversion actions-hosted-runners %}{% data reusables.billing.billing-hosted-runners %}{% endif %}
The number of jobs you can run concurrently across all repositories in your user or organization account depends on your GitHub plan. For more information, see "[Usage limits and billing](/actions/reference/usage-limits-billing-and-administration)" for {% data variables.product.prodname_dotcom %}-hosted runners and "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners/#usage-limits)" for self-hosted runner usage limits.
{% data reusables.user-settings.context_switcher %}
- The number of jobs you can run concurrently across all repositories in your user or organization account depends on your GitHub plan. For more information, see "[Usage limits and billing](/actions/reference/usage-limits-billing-and-administration)" for {% data variables.product.prodname_dotcom %}-hosted runners and "[About self-hosted runners](/actions/hosting-your-own-runners/about-self-hosted-runners/#usage-limits)" for self-hosted runner usage limits.
- {% data reusables.user-settings.context_switcher %}
{% ifversion actions-hosted-runners %}
- For {% data variables.actions.hosted_runner %}s, there is no additional cost for configurations that assign public static IP addresses to a {% data variables.actions.hosted_runner %}. For more information on {% data variables.actions.hosted_runner %}s, see "[Using {% data variables.actions.hosted_runner %}s](/actions/using-github-hosted-runners/using-larger-runners)."
- Entitlement minutes cannot be used for {% data variables.actions.hosted_runner %}s.
{% endif %}
## Calculating minute and storage spending

View File

@@ -359,7 +359,7 @@ In the following example, the `+` symbol ensures that the specified additional {
## Using a custom configuration file
A custom configuration file is an alternative way to specify additional {% ifversion codeql-packs %}packs and {% endif %}queries to run. You can also use the file to disable the default queries and to specify which directories to scan during analysis.
A custom configuration file is an alternative way to specify additional {% ifversion codeql-packs %}packs and {% endif %}queries to run. You can also use the file to disable the default queries{% ifversion code-scanning-exclude-queries-from-analysis %}, exclude or include specific queries,{% endif %} and to specify which directories to scan during analysis.
In the workflow file, use the `config-file` parameter of the `init` action to specify the path to the configuration file you want to use. This example loads the configuration file _./.github/codeql/codeql-config.yml_.
@@ -442,6 +442,41 @@ Optionally, you can give each array element a name, as shown in the example conf
If you only want to run custom queries, you can disable the default security queries by using `disable-default-queries: true`.
{% ifversion code-scanning-exclude-queries-from-analysis %}
### Excluding specific queries from analysis
You can add `exclude` and `include` filters to your custom configuration file, to specify the queries you want to exclude or include in the analysis.
This is useful if you want to exclude, for example:
- Specific queries from the default suites (`security`, `security-extended` and `security-and-quality`).
- Specific queries whose results do not interest you.
- All the queries that generate warnings and recommendations.
You can use `exclude` filters similar to those in the configuration file below to exclude queries that you want to remove from the default analysis. In the example of configuration file below, both the `js/redundant-assignment` and the `js/useless-assignment-to-local` queries are excluded from analysis.
```yaml
query-filters:
- exclude:
id: js/redundant-assignment
- exclude:
id: js/useless-assignment-to-local
```
To find the id of a query, you can click the alert in the list of alerts in the Security tab. This opens the alert details page. The `Rule ID` field contains the query id. For more information about the alert details page, see "[About {% data variables.product.prodname_code_scanning %} alerts](/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-alerts#about-alert-details)."
{% tip %}
**Tips:**
- The order of the filters is important. The first filter instruction that appears after the instructions about the queries and query packs determines whether the queries are included or excluded by default.
- Subsequent instructions are executed in order and the instructions that appear later in the file take precedence over the earlier instructions.
{% endtip %}
You can find another example illustrating the use of these filters in the "[Example configuration files](#example-configuration-files)" section.
For more information about using `exclude` and `include` filters in your custom configuration file, see "[Creating {% data variables.product.prodname_codeql %} query suites](https://codeql.github.com/docs/codeql-cli/creating-codeql-query-suites/#filtering-the-queries-in-a-query-suite)." For information on the query metadata you can filter on, see "[Metadata for CodeQL queries](https://codeql.github.com/docs/writing-codeql-queries/metadata-for-codeql-queries/)."
{% endif %}
### Specifying directories to scan
For the interpreted languages that {% data variables.product.prodname_codeql %} supports (Python{% ifversion fpt or ghes > 3.3 or ghae-issue-5017 %}, Ruby{% endif %} and JavaScript/TypeScript), you can restrict {% data variables.product.prodname_code_scanning %} to files in specific directories by adding a `paths` array to the configuration file. You can exclude the files in specific directories from analysis by adding a `paths-ignore` array.

View File

@@ -43,8 +43,8 @@ By default, the code scanning alerts page is filtered to show alerts for the def
![Summary of alerts](/assets/images/help/repository/code-scanning-click-alert.png)
{% ifversion fpt or ghec or ghes > 3.4 or ghae-issue-6249 %}
{% data reusables.code-scanning.alert-default-branch %}
![The "Affected branches" section in an alert](/assets/images/help/repository/code-scanning-affected-branches.png){% endif %}
{% data reusables.code-scanning.alert-default-branch %}
![The "Affected branches" section in an alert](/assets/images/help/repository/code-scanning-affected-branches.png){% endif %}
1. Optionally, if the alert highlights a problem with data flow, click **Show paths** to display the path from the data source to the sink where it's used.
{% ifversion fpt or ghec or ghes > 3.4 or ghae-issue-6249 %}
![The "Show paths" link on an alert](/assets/images/help/repository/code-scanning-show-paths.png)

View File

@@ -151,25 +151,29 @@ The names of the {% data variables.product.prodname_code_scanning %} analysis ch
![{% data variables.product.prodname_code_scanning %} pull request checks](/assets/images/help/repository/code-scanning-pr-checks.png)
When the {% data variables.product.prodname_code_scanning %} jobs complete, {% data variables.product.prodname_dotcom %} works out whether any alerts were added by the pull request and adds the "{% data variables.product.prodname_code_scanning_capc %} results / TOOL NAME" entry to the list of checks. After {% data variables.product.prodname_code_scanning %} has been performed at least once, you can click **Details** to view the results of the analysis. If you used a pull request to add {% data variables.product.prodname_code_scanning %} to the repository, you will initially see {% ifversion fpt or ghes > 3.2 or ghae or ghec %}an "Analysis not found"{% else %}a "Missing analysis"{% endif %} message when you click **Details** on the "{% data variables.product.prodname_code_scanning_capc %} results / TOOL NAME" check.
When the {% data variables.product.prodname_code_scanning %} jobs complete, {% data variables.product.prodname_dotcom %} works out whether any alerts were added by the pull request and adds the "{% data variables.product.prodname_code_scanning_capc %} results / TOOL NAME" entry to the list of checks. After {% data variables.product.prodname_code_scanning %} has been performed at least once, you can click **Details** to view the results of the analysis.
{% ifversion fpt or ghes > 3.2 or ghae or ghec %}
![Analysis not found for commit message](/assets/images/help/repository/code-scanning-analysis-not-found.png)
{% ifversion fpt or ghec or ghes > 3.4 or ghae-issue-7095 %}
<!--Troubleshooting section no longer relevant-->
{% elsif ghes < 3.5 or ghae %}
If you used a pull request to add {% data variables.product.prodname_code_scanning %} to the repository, you will initially see {% ifversion ghes > 3.2 or ghae %}an "Analysis not found"{% elsif ghes = 3.2 %}a "Missing analysis"{% endif %} message when you click **Details** on the "{% data variables.product.prodname_code_scanning_capc %} results / TOOL NAME" check.
{% ifversion ghes > 3.2 or ghae %}
![Analysis not found for commit message](/assets/images/enterprise/3.4/repository/code-scanning-analysis-not-found.png)
The table lists one or more categories. Each category relates to specific analyses, for the same tool and commit, performed on a different language or a different part of the code. For each category, the table shows the two analyses that {% data variables.product.prodname_code_scanning %} attempted to compare to determine which alerts were introduced or fixed in the pull request.
For example, in the screenshot above, {% data variables.product.prodname_code_scanning %} found an analysis for the merge commit of the pull request, but no analysis for the head of the main branch.
{% else %}
![Missing analysis for commit message](/assets/images/enterprise/3.2/repository/code-scanning-missing-analysis.png)
{% endif %}
{% ifversion fpt or ghes > 3.2 or ghae or ghec %}
### Reasons for the "Analysis not found" message
{% else %}
{% elsif ghes = 3.2 %}
![Missing analysis for commit message](/assets/images/enterprise/3.2/repository/code-scanning-missing-analysis.png)
### Reasons for the "Missing analysis" message
{% endif %}
After {% data variables.product.prodname_code_scanning %} has analyzed the code in a pull request, it needs to compare the analysis of the topic branch (the branch you used to create the pull request) with the analysis of the base branch (the branch into which you want to merge the pull request). This allows {% data variables.product.prodname_code_scanning %} to compute which alerts are newly introduced by the pull request, which alerts were already present in the base branch, and whether any existing alerts are fixed by the changes in the pull request. Initially, if you use a pull request to add {% data variables.product.prodname_code_scanning %} to a repository, the base branch has not yet been analyzed, so it's not possible to compute these details. In this case, when you click through from the results check on the pull request you will see the {% ifversion fpt or ghes > 3.2 or ghae or ghec %}"Analysis not found"{% else %}"Missing analysis for base commit SHA-HASH"{% endif %} message.
After {% data variables.product.prodname_code_scanning %} has analyzed the code in a pull request, it needs to compare the analysis of the topic branch (the branch you used to create the pull request) with the analysis of the base branch (the branch into which you want to merge the pull request). This allows {% data variables.product.prodname_code_scanning %} to compute which alerts are newly introduced by the pull request, which alerts were already present in the base branch, and whether any existing alerts are fixed by the changes in the pull request. Initially, if you use a pull request to add {% data variables.product.prodname_code_scanning %} to a repository, the base branch has not yet been analyzed, so it's not possible to compute these details. In this case, when you click through from the results check on the pull request you will see the {% ifversion ghes > 3.2 or ghae %}"Analysis not found"{% elsif ghes = 3.2 %}"Missing analysis for base commit SHA-HASH"{% endif %} message.
There are other situations where there may be no analysis for the latest commit to the base branch for a pull request. These include:
@@ -177,7 +181,7 @@ There are other situations where there may be no analysis for the latest commit
To check whether a branch has been scanned, go to the {% data variables.product.prodname_code_scanning_capc %} page, click the **Branch** drop-down and select the relevant branch.
![Choose a branch from the Branch drop-down menu](/assets/images/help/repository/code-scanning-branch-dropdown.png)
![Choose a branch from the Branch drop-down menu](/assets/images/help/repository/code-scanning-branch-dropdown.png)
The solution in this situation is to add the name of the base branch to the `on:push` and `on:pull_request` specification in the {% data variables.product.prodname_code_scanning %} workflow on that branch and then make a change that updates the open pull request that you want to scan.
@@ -189,6 +193,8 @@ There are other situations where there may be no analysis for the latest commit
Merge a trivial change into the base branch to trigger {% data variables.product.prodname_code_scanning %} on this latest commit, then push a change to the pull request to retrigger {% data variables.product.prodname_code_scanning %}.
{% endif %}
## Next steps
After setting up {% data variables.product.prodname_code_scanning %}, and allowing its actions to complete, you can:

View File

@@ -33,8 +33,6 @@ In repositories where {% data variables.product.prodname_code_scanning %} is con
- The **Conversation** tab of the pull request, as part of a pull request review {% endif %}
- The **Files changed** tab of the pull request
{% ifversion code-scanning-pr-conversations-tab %} {% endif %}
If you have write permission for the repository, you can see any existing {% data variables.product.prodname_code_scanning %} alerts on the **Security** tab. For information about repository alerts, see "[Managing {% data variables.product.prodname_code_scanning %} alerts for your repository](/code-security/secure-coding/managing-code-scanning-alerts-for-your-repository)."
{% ifversion fpt or ghes > 3.2 or ghae or ghec %}

View File

@@ -45,9 +45,15 @@ If you upload a second SARIF file for a commit with the same category and from t
If you're new to SARIF and want to learn more, see Microsoft's [`SARIF tutorials`](https://github.com/microsoft/sarif-tutorials) repository.
## Preventing duplicate alerts using fingerprints
## Providing data to track {% data variables.product.prodname_code_scanning %} alerts across runs
Each time the results of a new code scan are uploaded, the results are processed and alerts are added to the repository. To prevent duplicate alerts for the same problem, {% data variables.product.prodname_code_scanning %} uses fingerprints to match results across various runs so they only appear once in the latest run for the selected branch. This makes it possible to match alerts to the right line of code when files are edited.
Each time the results of a new code scan are uploaded, the results are processed and alerts are added to the repository. To prevent duplicate alerts for the same problem, {% data variables.product.prodname_code_scanning %} uses fingerprints to match results across various runs so they only appear once in the latest run for the selected branch. This makes it possible to match alerts to the correct line of code when files are edited. The `ruleID` for a result has to be the same across analysis.
### Reporting consistent filepaths
The filepath has to be consistent across the runs to enable a computation of a stable fingerprint. If the filepaths differ for the same result, each time there is a new analysis a new alert will be created, and the old one will be closed. This will cause having multiple alerts for the same result.
### Including data for fingerprint generation
{% data variables.product.prodname_dotcom %} uses the `partialFingerprints` property in the OASIS standard to detect when two results are logically identical. For more information, see the "[partialFingerprints property](https://docs.oasis-open.org/sarif/sarif/v2.1.0/cs01/sarif-v2.1.0-cs01.html#_Toc16012611)" entry in the OASIS documentation.
@@ -77,6 +83,12 @@ You can check a SARIF file is compatible with {% data variables.product.prodname
If you use a code analysis engine other than {% data variables.product.prodname_codeql %}, you can review the supported SARIF properties to optimize how your analysis results will appear on {% data variables.product.prodname_dotcom %}.
{% note %}
**Note:** You must supply an explicit value for any property marked as "required". The empty string is not supported for required properties.
{% endnote %}
Any valid SARIF 2.1.0 output file can be uploaded, however, {% data variables.product.prodname_code_scanning %} will only use the following supported properties.
### `sarifLog` object
@@ -138,7 +150,7 @@ Each `result` object contains details for one alert in the codebase. Within the
| `level`| **Optional.** The severity of the result. This level overrides the default severity defined by the rule. {% data variables.product.prodname_code_scanning_capc %} uses the level to filter results by severity on {% data variables.product.prodname_dotcom %}.
| `message.text`| **Required.** A message that describes the result. {% data variables.product.prodname_code_scanning_capc %} displays the message text as the title of the result. Only the first sentence of the message will be displayed when visible space is limited.
| `locations[]`| **Required.** The set of locations where the result was detected up to a maximum of 10. Only one location should be included unless the problem can only be corrected by making a change at every specified location. **Note:** At least one location is required for {% data variables.product.prodname_code_scanning %} to display a result. {% data variables.product.prodname_code_scanning_capc %} will use this property to decide which file to annotate with the result. Only the first value of this array is used. All other values are ignored.
| `partialFingerprints`| **Required.** A set of strings used to track the unique identity of the result. {% data variables.product.prodname_code_scanning_capc %} uses `partialFingerprints` to accurately identify which results are the same across commits and branches. {% data variables.product.prodname_code_scanning_capc %} will attempt to use `partialFingerprints` if they exist. If you are uploading third-party SARIF files with the `upload-action`, the action will create `partialFingerprints` for you when they are not included in the SARIF file. For more information, see "[Preventing duplicate alerts using fingerprints](#preventing-duplicate-alerts-using-fingerprints)." **Note:** {% data variables.product.prodname_code_scanning_capc %} only uses the `primaryLocationLineHash`.
| `partialFingerprints`| **Required.** A set of strings used to track the unique identity of the result. {% data variables.product.prodname_code_scanning_capc %} uses `partialFingerprints` to accurately identify which results are the same across commits and branches. {% data variables.product.prodname_code_scanning_capc %} will attempt to use `partialFingerprints` if they exist. If you are uploading third-party SARIF files with the `upload-action`, the action will create `partialFingerprints` for you when they are not included in the SARIF file. For more information, see "[Providing data to track code scanning alerts across runs](#providing-data-to-track-code-scanning-alerts-across-runs)." **Note:** {% data variables.product.prodname_code_scanning_capc %} only uses the `primaryLocationLineHash`.
| `codeFlows[].threadFlows[].locations[]`| **Optional.** An array of `location` objects for a `threadFlow` object, which describes the progress of a program through a thread of execution. A `codeFlow` object describes a pattern of code execution used to detect a result. If code flows are provided, {% data variables.product.prodname_code_scanning %} will expand code flows on {% data variables.product.prodname_dotcom %} for the relevant result. For more information, see the [`location` object](#location-object).
| `relatedLocations[]`| A set of locations relevant to this result. {% data variables.product.prodname_code_scanning_capc %} will link to related locations when they are embedded in the result message. For more information, see the [`location` object](#location-object).
@@ -204,7 +216,7 @@ These example SARIF output files show supported properties and example values.
### Example with minimum required properties
This SARIF output file has example values to show the minimum required properties for {% data variables.product.prodname_code_scanning %} results to work as expected. If you remove any properties or don't include values, this data will not be displayed correctly or sync on {% data variables.product.prodname_dotcom %}.
This SARIF output file has example values to show the minimum required properties for {% data variables.product.prodname_code_scanning %} results to work as expected. If you remove any properties, omit values, or use an empty string, this data will not be displayed correctly or sync on {% data variables.product.prodname_dotcom %}.
```json
{

View File

@@ -58,7 +58,7 @@ For more information see the [`upload-sarif` action](https://github.com/github/c
The `upload-sarif` action can be configured to run when the `push` and `scheduled` event occur. For more information about {% data variables.product.prodname_actions %} events, see "[Events that trigger workflows](/actions/reference/events-that-trigger-workflows)."
If your SARIF file doesn't include `partialFingerprints`, the `upload-sarif` action will calculate the `partialFingerprints` field for you and attempt to prevent duplicate alerts. {% data variables.product.prodname_dotcom %} can only create `partialFingerprints` when the repository contains both the SARIF file and the source code used in the static analysis. For more information about preventing duplicate alerts, see "[About SARIF support for code scanning](/code-security/secure-coding/sarif-support-for-code-scanning#preventing-duplicate-alerts-using-fingerprints)."
If your SARIF file doesn't include `partialFingerprints`, the `upload-sarif` action will calculate the `partialFingerprints` field for you and attempt to prevent duplicate alerts. {% data variables.product.prodname_dotcom %} can only create `partialFingerprints` when the repository contains both the SARIF file and the source code used in the static analysis. For more information about preventing duplicate alerts, see "[About SARIF support for code scanning](/code-security/secure-coding/sarif-support-for-code-scanning#providing-data-to-track-code-scanning-alerts-across-runs)."
{% data reusables.code-scanning.upload-sarif-alert-limit %}

View File

@@ -78,7 +78,8 @@ You can display the command-line help for any command using the <nobr>`--help`</
| <nobr>`--command`</nobr> | | Recommended. Use to specify the build command or script that invokes the build process for the codebase. Commands are run from the current folder or, where it is defined, from <nobr>`--source-root`</nobr>. Not needed for Python and JavaScript/TypeScript analysis. |
| <nobr>`--db-cluster`</nobr> | | Optional. Use in multi-language codebases to generate one database for each language specified by <nobr>`--language`</nobr>.
| <nobr>`--no-run-unnecessary-builds`</nobr> | | Recommended. Use to suppress the build command for languages where the {% data variables.product.prodname_codeql_cli %} does not need to monitor the build (for example, Python and JavaScript/TypeScript).
| <nobr>`--source-root`</nobr> | | Optional. Use if you run the CLI outside the checkout root of the repository. By default, the `database create` command assumes that the current directory is the root directory for the source files, use this option to specify a different location. |
| <nobr>`--source-root`</nobr> | | Optional. Use if you run the CLI outside the checkout root of the repository. By default, the `database create` command assumes that the current directory is the root directory for the source files, use this option to specify a different location. |{% ifversion fpt or ghec or ghes > 3.2 or ghae %}
| <nobr>`--codescanning-config`</nobr> | | Optional (Advanced). Use if you have a configuration file that specifies how to create the {% data variables.product.prodname_codeql %} databases and what queries to run in later steps. For more information, see "[Using a custom configuration file](/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-a-custom-configuration-file)" and "[database create](https://codeql.github.com/docs/codeql-cli/manual/database-create/#cmdoption-codeql-database-create-codescanning-config)." |{% endif %}
For more information, see [Creating {% data variables.product.prodname_codeql %} databases](https://codeql.github.com/docs/codeql-cli/creating-codeql-databases/) in the documentation for the {% data variables.product.prodname_codeql_cli %}.

View File

@@ -417,7 +417,7 @@ updates:
### `open-pull-requests-limit`
By default, {% data variables.product.prodname_dependabot %} opens a maximum of five pull requests for version updates. Once there are five open pull requests, new requests are blocked until you merge or close some of the open requests, after which new pull requests can be opened on subsequent updates. Use `open-pull-requests-limit` to change this limit. This also provides a simple way to temporarily disable version updates for a package manager.
By default, {% data variables.product.prodname_dependabot %} opens a maximum of five pull requests for version updates. Once there are five open pull requests from {% data variables.product.prodname_dependabot %}, {% data variables.product.prodname_dependabot %} will not open any new requests until some of those open requests are merged or closed. Use `open-pull-requests-limit` to change this limit. This also provides a simple way to temporarily disable version updates for a package manager.
This option has no impact on security updates, which have a separate, internal limit of ten open pull requests.

View File

@@ -12,25 +12,25 @@ shortTitle: Private image registry
## About private image registries and {% data variables.product.prodname_github_codespaces %}
A registry is a secure space for storing, managing, and fetching private container images. You may use one to store one or more images. There are many examples of registries, such as {% data variables.product.prodname_dotcom %} Container Registry, Azure Container Registry, or DockerHub.
A registry is a secure space for storing, managing, and fetching private container images. You may use one to store one or more images. There are many examples of registries, such as {% data variables.product.prodname_container_registry %}, {% data variables.product.prodname_npm_registry %}, Azure Container Registry, or DockerHub.
{% data variables.product.prodname_dotcom %} Container Registry can be configured to pull container images seamlessly, without having to provide any authentication credentials to {% data variables.product.prodname_github_codespaces %}. For other image registries, you must create secrets in {% data variables.product.prodname_dotcom %} to store the access details, which will allow {% data variables.product.prodname_codespaces %} to access images stored in that registry.
{% data variables.product.prodname_ghcr_and_npm_registry %} can be configured to allow container images to be pulled seamlessly into {% data variables.product.prodname_github_codespaces %} during codespace creation, without having to provide any authentication credentials. For other image registries, you must create secrets in {% data variables.product.prodname_dotcom %} to store the access details, which will allow {% data variables.product.prodname_codespaces %} to access images stored in that registry.
## Accessing images stored in {% data variables.product.prodname_dotcom %} Container Registry
## Accessing images stored in {% data variables.product.prodname_ghcr_and_npm_registry %}
{% data variables.product.prodname_dotcom %} Container Registry is the easiest way for {% data variables.product.prodname_codespaces %} to consume devcontainer container images.
{% data variables.product.prodname_ghcr_and_npm_registry %} provide the easiest way for {% data variables.product.prodname_codespaces %} to consume dev container images.
For more information, see "[Working with the Container registry](/packages/working-with-a-github-packages-registry/working-with-the-container-registry)".
For more information, see "[Working with the Container registry](/packages/working-with-a-github-packages-registry/working-with-the-container-registry)" and "[Working with the npm registry](/packages/working-with-a-github-packages-registry/working-with-the-npm-registry)".
### Accessing an image published to the same repository as the codespace
If you publish a container image to {% data variables.product.prodname_dotcom %} Container Registry in the same repository that the codespace is being launched in, you will automatically be able to fetch that image on codespace creation. You won't have to provide any additional credentials, unless the **Inherit access from repo** option was unselected when the container image was published.
If you publish a container image to {% data variables.product.prodname_ghcr_or_npm_registry %} in the same repository that the codespace is being launched in, you will automatically be able to fetch that image on codespace creation. You won't have to provide any additional credentials, unless the **Inherit access from repo** option was unselected when the container image was published.
#### Inheriting access from the repository from which an image was published
By default, when you publish a container image to {% data variables.product.prodname_dotcom %} Container Registry, the image inherits the access setting of the repository from which the image was published. For example, if the repository is public, the image is also public. If the repository is private, the image is also private, but is accessible from the repository.
By default, when you publish a container image to {% data variables.product.prodname_ghcr_or_npm_registry %}, the image inherits the access setting of the repository from which the image was published. For example, if the repository is public, the image is also public. If the repository is private, the image is also private, but is accessible from the repository.
This behavior is controlled by the **Inherit access from repo** option. **Inherit access from repo** is selected by default when publishing via {% data variables.product.prodname_actions %}, but not when publishing directly to {% data variables.product.prodname_dotcom %} Container Registry using a Personal Access Token (PAT).
This behavior is controlled by the **Inherit access from repo** option. **Inherit access from repo** is selected by default when publishing via {% data variables.product.prodname_actions %}, but not when publishing directly to {% data variables.product.prodname_ghcr_or_npm_registry %} using a Personal Access Token (PAT).
If the **Inherit access from repo** option was not selected when the image was published, you can manually add the repository to the published container image's access controls. For more information, see "[Configuring a package's access control and visibility](/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility#inheriting-access-for-a-container-image-from-a-repository)."
@@ -46,13 +46,13 @@ If you want to allow a subset of an organization's repositories to access a cont
### Publishing a container image from a codespace
Seamless access from a codespace to {% data variables.product.prodname_dotcom %} Container Registry is limited to pulling container images. If you want to publish a container image from inside a codespace, you must use a personal access token (PAT) with the `write:packages` scope.
Seamless access from a codespace to {% data variables.product.prodname_ghcr_or_npm_registry %} is limited to pulling container images. If you want to publish a container image from inside a codespace, you must use a personal access token (PAT) with the `write:packages` scope.
We recommend publishing images via {% data variables.product.prodname_actions %}. For more information, see "[Publishing Docker images](/actions/publishing-packages/publishing-docker-images)."
We recommend publishing images via {% data variables.product.prodname_actions %}. For more information, see "[Publishing Docker images](/actions/publishing-packages/publishing-docker-images)" and "[Publishing Node.js packages](/actions/publishing-packages/publishing-nodejs-packages)."
## Accessing images stored in other container registries
If you are accessing a container image from a registry that isn't {% data variables.product.prodname_dotcom %} Container Registry, {% data variables.product.prodname_codespaces %} checks for the presence of three secrets, which define the server name, username, and personal access token (PAT) for a container registry. If these secrets are found, {% data variables.product.prodname_github_codespaces %} will make the registry available inside your codespace.
If you are accessing a container image from a registry that isn't {% data variables.product.prodname_ghcr_or_npm_registry %}, {% data variables.product.prodname_codespaces %} checks for the presence of three secrets, which define the server name, username, and personal access token (PAT) for a container registry. If these secrets are found, {% data variables.product.prodname_github_codespaces %} will make the registry available inside your codespace.
- `<*>_CONTAINER_REGISTRY_SERVER`
- `<*>_CONTAINER_REGISTRY_USER`

View File

@@ -17,7 +17,7 @@ redirect_from:
## About the {% data variables.product.prodname_vscode_command_palette %}
The Command Palette is one of the focal features of {% data variables.product.prodname_vscode %} and is available for you to use in {% data variables.product.prodname_github_codespaces %}. The {% data variables.product.prodname_vscode_command_palette %} allows you to access many commands for {% data variables.product.prodname_codespaces %} and {% data variables.product.prodname_vscode_shortname %}. For more information on using the {% data variables.product.prodname_vscode_command_palette_shortname %}, see "[User Interface](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette)" in the {% data variables.product.prodname_vscode_shortname %} documentation.
The Command Palette is one of the focal features of {% data variables.product.prodname_vscode %} and is available for you to use in {% data variables.product.prodname_github_codespaces %}. The Command Palette allows you to access many commands for {% data variables.product.prodname_codespaces %} and {% data variables.product.prodname_vscode_shortname %}. For more information on using the {% data variables.product.prodname_vscode_command_palette_shortname %}, see "[User Interface](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette)" in the {% data variables.product.prodname_vscode_shortname %} documentation.
## Accessing the {% data variables.product.prodname_vscode_command_palette_shortname %}

View File

@@ -0,0 +1,120 @@
---
title: Getting started with GitHub Codespaces for machine learning
shortTitle: Machine learning
intro: 'Learn about working on machine learning projects with {% data variables.product.prodname_github_codespaces %} and its out-of-the-box tools.'
product: '{% data reusables.gated-features.codespaces %}'
versions:
fpt: '*'
ghec: '*'
type: tutorial
topics:
- Codespaces
- Developer
---
## Introduction
This guide introduces you to machine learning with {% data variables.product.prodname_github_codespaces %}. Youll build a simple image classifier, learn about some of the tools that come preinstalled in {% data variables.product.prodname_github_codespaces %}, configure your development environment for NVIDIA CUDA, and use {% data variables.product.prodname_cli %} to open your codespace in JupyterLab.
## Prerequisite
You have access to {% data variables.product.prodname_github_codespaces %}. For more information, see "[Creating a codespace](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-github-codespaces)."
## Build a simple image classifier
We'll use a Jupyter notebook to build a simple image classifier.
Jupyter notebooks are sets of cells that you can execute one after another. The notebook we'll use includes a number of cells that build an image classifier using [PyTorch](https://pytorch.org/). Each cell is a different phase of that process: download a dataset, set up a neural network, train a model, and then test that model.
We'll run all of the cells, in sequence, to perform all phases of building the image classifier. When we do this Jupyter saves the output back into the notebook so that you can examine the results.
### Creating a repository and a codespace
1. Go to the [github/codespaces-getting-started-ml](https://github.com/github/codespaces-getting-started-ml) template repository and click **Use this template**.
{% data reusables.codespaces.open-codespace-from-template-repo %}
By default, a codespace for this repository opens in a web-based version of {% data variables.product.prodname_vscode %}.
### Open the image classifier notebook
The default container image that's used by {% data variables.product.prodname_github_codespaces %} includes a set of machine learning libraries that are preinstalled in your codespace. For example, Numpy, pandas, SciPy, Matplotlib, seaborn, scikit-learn, TensorFlow, Keras, PyTorch, Requests, and Plotly. For more information about the default image, see "[Introduction to dev containers](/codespaces/setting-up-your-project-for-codespaces/introduction-to-dev-containers#using-the-default-dev-container-configuration)" and [the `devcontainers/images` repository](https://github.com/devcontainers/images/tree/main/src/codespaces#github-codespaces-default-linux-universal).
1. In the {% data variables.product.prodname_vscode_shortname %} editor, close any "Get Started" tabs that are displayed.
1. Open the `image-classifier.ipynb` notebook file.
1. Click the Python kernel link at the top right of the editor.
![Screenshot of the Python kernal link](/assets/images/help/codespaces/jupyter-python-kernel-link.png)
1. In the drop-down menu, choose the kernel in the directory `/opt/python/latest/bin/python`.
![Screenshot of the Python kernal drop-down menu](/assets/images/help/codespaces/jupyter-python-kernel-dropdown.png)
### Build the image classifier
The image classifier notebook contains all the code you need to download a dataset, train a neural network, and evaluate its performance.
1. Click **Run All** to execute all of the notebooks cells.
![Screenshot of the Run All button](/assets/images/help/codespaces/jupyter-run-all.png)
1. Scroll down to view the output of each cell.
![Screenshot of Step 3 in the editor](/assets/images/help/codespaces/jupyter-notebook-step3.png)
## Configure NVIDIA CUDA for your codespace
Some software, such as TensorFlow, requires you to install NVIDIA CUDA to use your codespaces GPU. Where this is the case, you can create your own custom configuration, by using a `devcontainer.json` file, and specify that CUDA should be installed. For more information on creating a custom configuration, see "[Introduction to dev containers](/codespaces/setting-up-your-project-for-codespaces/introduction-to-dev-containers#creating-a-custom-dev-container-configuration)."
{% note %}
**Note**: For full details of the script that's run when you add the `nvidia-cuda` feature, see [the devcontainers/features repository](https://github.com/devcontainers/features/tree/main/src/nvidia-cuda).
{% endnote %}
1. Within a codespace, open the `.devcontainer/devcontainer.json` file in the editor.
1. Add a top-level `features` object with the following contents:
```json{:copy}
“features”: {
"ghcr.io/devcontainers/features/nvidia-cuda:1": {
"installCudnn": true
}
}
```
For more information about the `features` object, see the [development containers specification](https://containers.dev/implementors/features/#devcontainer-json-properties).
If you are using the `devcontainer.json` file from the image classifier repository you created for this tutorial, your `devcontainer.json` file will now look like this:
```
{
"customizations": {
"vscode": {
"extensions": [
"ms-python.python",
"ms-toolsai.jupyter"
]
}
},
“features”: {
"ghcr.io/devcontainers/features/nvidia-cuda:1": {
"installCudnn": true
}
}
}
```
1. Save the change.
{% data reusables.codespaces.rebuild-command %}
The codespace container will be rebuilt. This will take several minutes. When the rebuild is complete the codespace is automatically reopened.
1. Commit the change to the repository so that CUDA will be installed in any new codespaces you create from this repository in future.
## Open your codespace in JupyterLab
The default container image that's used by {% data variables.product.prodname_github_codespaces %} includes JupyterLab, the web-based Jupyter IDE. You can use {% data variables.product.prodname_cli %} to open your codespace in JupyterLab without having to install anything else on your codespace.
1. In the terminal, enter the {% data variables.product.prodname_cli %} command `gh cs jupyter`.
1. Choose the codespace you want to open.
![Screenshot of opening a codespace from the terminal](/assets/images/help/codespaces/open-codespace-in-jupyter.png)

View File

@@ -18,6 +18,7 @@ children:
- /forwarding-ports-in-your-codespace
- /default-environment-variables-for-your-codespace
- /connecting-to-a-private-network
- /getting-started-with-github-codespaces-for-machine-learning
- /using-github-codespaces-in-visual-studio-code
- /using-github-codespaces-with-github-cli
---

View File

@@ -73,7 +73,7 @@ As you develop in your codespace, it will save any changes to your files every f
### Closing or stopping your codespace
To stop your codespace you can [use the {% data variables.product.prodname_vscode_command_palette %}](/codespaces/codespaces-reference/using-the-vs-code-command-palette-in-codespaces#suspending-or-stopping-a-codespace) (`Shift + Command + P` (Mac) / `Ctrl + Shift + P` (Windows)). If you exit your codespace without running the stop command (for example, closing the browser tab), or if you leave the codespace running without interaction, the codespace and its running processes will continue until a window of inactivity occurs, after which the codespace will stop. By default, the window of inactivity is 30 minutes.
To stop your codespace you can [use the {% data variables.product.prodname_vscode_command_palette %}](/codespaces/codespaces-reference/using-the-vs-code-command-palette-in-codespaces#suspending-or-stopping-a-codespace) (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> (Mac) / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd> (Windows/Linux)). If you exit your codespace without running the stop command (for example, closing the browser tab), or if you leave the codespace running without interaction, the codespace and its running processes will continue until a window of inactivity occurs, after which the codespace will stop. By default, the window of inactivity is 30 minutes.
When you close or stop your codespace, all uncommitted changes are preserved until you connect to the codespace again.

View File

@@ -25,12 +25,7 @@ For more information on exactly how {% data variables.product.prodname_github_co
## Creating your codespace
1. Navigate to the [template repository](https://github.com/github/haikus-for-codespaces) and select **Use this template**.
1. Choose an owner for the new repository, enter a repository name, select your preferred privacy setting, and click **Create repository from template**.
1. Navigate to the main page of the newly created repository. Under the repository name, use the **{% octicon "code" aria-label="The code icon" %} Code** drop-down menu, and in the **Codespaces** tab, click **Create codespace on main**.
![New codespace button](/assets/images/help/codespaces/new-codespace-button.png)
{% data reusables.codespaces.open-codespace-from-template-repo %}
## Running the application

View File

@@ -155,9 +155,9 @@ You can add features to your predefined container configuration to customize whi
You can add some of the most common features by selecting them when configuring your predefined container. For more information on the available features, see the [script library](https://github.com/microsoft/vscode-dev-containers/tree/main/script-library#scripts) in the `vscode-dev-containers` repository.
1. Access the Command Palette (`Shift + Command + P` / `Ctrl + Shift + P`), then start typing "configure". Select **Codespaces: Configure Devcontainer Features**.
1. Access the Command Palette (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> (Mac) / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd> (Windows/Linux)), then start typing "configure". Select **Codespaces: Configure Devcontainer Features**.
![The Configure Devcontainer Features command in the command palette](/assets/images/help/codespaces/codespaces-configure-features.png)
![The Configure Devcontainer Features command in the Command Palette](/assets/images/help/codespaces/codespaces-configure-features.png)
1. Update your feature selections, then click **OK**.
@@ -165,7 +165,7 @@ You can add some of the most common features by selecting them when configuring
1. To apply the changes, in the bottom right corner of the screen, click **Rebuild now**. For more information about rebuilding your container, see "[Applying changes to your configuration](#applying-configuration-changes-to-a-codespace)."
!["Codespaces: Rebuild Container" in the command palette](/assets/images/help/codespaces/rebuild-prompt.png)
!["Codespaces: Rebuild Container" in the Command Palette](/assets/images/help/codespaces/rebuild-prompt.png)
## Creating a custom dev container configuration

View File

@@ -28,7 +28,7 @@ These logs contain detailed information about the codespace, the container, the
{% webui %}
1. If you are using {% data variables.product.prodname_codespaces %} in the browser, ensure that you are connected to the codespace you want to debug.
1. Open the {% data variables.product.prodname_vscode %} Command Palette (`Shift + Command + P` (Mac) / `Ctrl + Shift + P` (Windows)) and type **Export logs**. Select **Codespaces: Export Logs** from the list to download the logs.
1. Open the {% data variables.product.prodname_vscode_command_palette_shortname %} (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> (Mac) / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd> (Windows/Linux)) and type **Export logs**. Select **Codespaces: Export Logs** from the list to download the logs.
1. Define where to save the zip archive of logs then click **Save** (desktop) or click **OK** (web).
1. If you are using {% data variables.product.prodname_codespaces %} in the browser, right-click on the zip archive of logs from the Explorer view and select **Download…** to download them to your local machine.
@@ -36,7 +36,7 @@ These logs contain detailed information about the codespace, the container, the
{% vscode %}
1. Open the {% data variables.product.prodname_vscode %} Command Palette (`Shift + Command + P` (Mac) / `Ctrl + Shift + P` (Windows)) and type **Export logs**. Select **Codespaces: Export Logs** from the list to download the logs.
1. Open the {% data variables.product.prodname_vscode_command_palette_shortname %} (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> (Mac) / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd> (Windows/Linux)) and type **Export logs**. Select **Codespaces: Export Logs** from the list to download the logs.
1. Define where to save the zip archive of logs then click **Save** (desktop) or click **OK** (web).
{% endvscode %}
@@ -55,7 +55,7 @@ These logs contain information about the container, dev container, and their con
{% webui %}
1. Connect to the codespace you want to debug.
2. Open the {% data variables.product.prodname_vscode_command_palette %} (`Shift + Command + P` (Mac) / `Ctrl + Shift + P` (Windows)) and type **Creation logs**. Select **Codespaces: View Creation Log** from the list to open the `creation.log` file.
2. Open the {% data variables.product.prodname_vscode_command_palette_shortname %} (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> (Mac) / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd> (Windows/Linux)) and type **Creation logs**. Select **Codespaces: View Creation Log** from the list to open the `creation.log` file.
If you want to share the log with support, you can copy the text from the creation log into a text editor and save the file locally.
@@ -63,7 +63,7 @@ If you want to share the log with support, you can copy the text from the creati
{% vscode %}
Open the Command Palette (`Shift + Command + P` (Mac) / `Ctrl + Shift + P` (Windows)) and type **Creation logs**. Select **Codespaces: View Creation Log** from the list to open the `creation.log` file.
Open the {% data variables.product.prodname_vscode_command_palette_shortname %} (<kbd>Shift</kbd>+<kbd>Command</kbd>+<kbd>P</kbd> (Mac) / <kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>P</kbd> (Windows/Linux)) and type **Creation logs**. Select **Codespaces: View Creation Log** from the list to open the `creation.log` file.
If you want to share the log with support, you can copy the text from the creation log into a text editor and save the file locally.

View File

@@ -1,5 +1,5 @@
---
title: Troubleshooting creation and deletion of Codespaces
title: Troubleshooting creation and deletion of codespaces
intro: 'This article provides troubleshooting steps for common issues you may experience when creating or deleting a codespace, including storage and configuration issues.'
product: '{% data reusables.gated-features.codespaces %}'
versions:
@@ -16,6 +16,8 @@ shortTitle: Creation and deletion
### No access to create a codespace
{% data variables.product.prodname_codespaces %} are not available for all repositories. If the "Open with Codespaces" button is missing, {% data variables.product.prodname_github_codespaces %} may not be available for that repository. For more information, see "[Creating a codespace](/codespaces/developing-in-codespaces/creating-a-codespace#access-to-codespaces)."
You can't create a codespace for a private repository that is owned by an organization, unless you have write access to the repository or the organization has enabled forking for it.
If you believe your organization has [enabled {% data variables.product.prodname_codespaces %}](/codespaces/managing-codespaces-for-your-organization/enabling-codespaces-for-your-organization#about-enabling-codespaces-for-your-organization), make sure that an organization owner or billing manager has set the spending limit for {% data variables.product.prodname_codespaces %}. For more information, see "[Managing your spending limit for {% data variables.product.prodname_codespaces %}](/billing/managing-billing-for-github-codespaces/managing-spending-limits-for-codespaces)."
### Codespace does not open when created

View File

@@ -18,7 +18,7 @@ redirect_from:
Codespaces are set to stop after 30 minutes without any activity. If you try to interact with a codespace after it has stopped, you may see a `503 service unavailable` error.
- If a **Start** button is shown in {% data variables.product.prodname_vscode %} or in your browser window, click **Start** to reconnect to the codespace.
- Reset your codespace by reloading the window. From the [command palette](/codespaces/codespaces-reference/using-the-command-palette-in-codespaces#accessing-the-command-palette) in {% data variables.product.prodname_vscode %}, click **Developer: Reload Window**.
- Reset your codespace by reloading the window. From the [Command Palette](/codespaces/codespaces-reference/using-the-command-palette-in-codespaces#accessing-the-command-palette) in {% data variables.product.prodname_vscode %}, click **Developer: Reload Window**.
## Browser cannot connect

View File

@@ -149,13 +149,19 @@ To authenticate with an installation access token, include it in the Authorizati
```shell
$ curl -i \
-H "Authorization: token YOUR_INSTALLATION_ACCESS_TOKEN" \
-H "Authorization: Bearer YOUR_INSTALLATION_ACCESS_TOKEN" \
-H "Accept: application/vnd.github+json" \
{% data variables.product.api_url_pre %}/installation/repositories
```
`YOUR_INSTALLATION_ACCESS_TOKEN` is the value you must replace.
{% note %}
**Note:** {% data reusables.getting-started.bearer-vs-token %}
{% endnote %}
## Accessing API endpoints as an installation
For a list of REST API endpoints that are available for use by {% data variables.product.prodname_github_apps %} using an installation access token, see "[Available Endpoints](/rest/overview/endpoints-available-for-github-apps)."

View File

@@ -107,13 +107,13 @@ By default, the response takes the following form. The response parameters `expi
The user's access token allows the GitHub App to make requests to the API on behalf of a user.
Authorization: token OAUTH-TOKEN
Authorization: Bearer OAUTH-TOKEN
GET {% data variables.product.api_url_code %}/user
For example, in curl you can set the Authorization header like this:
```shell
curl -H "Authorization: token OAUTH-TOKEN" {% data variables.product.api_url_pre %}/user
curl -H "Authorization: Bearer OAUTH-TOKEN" {% data variables.product.api_url_pre %}/user
```
## Device flow
@@ -132,12 +132,12 @@ The device flow allows you to authorize users for a headless app, such as a CLI
Once you have an OAuth token for a user, you can check which installations that user can access.
Authorization: token OAUTH-TOKEN
Authorization: Bearer OAUTH-TOKEN
GET /user/installations
You can also check which repositories are accessible to a user for an installation.
Authorization: token OAUTH-TOKEN
Authorization: Bearer OAUTH-TOKEN
GET /user/installations/:installation_id/repositories
More details can be found in: [List app installations accessible to the user access token](/rest/apps#list-app-installations-accessible-to-the-user-access-token) and [List repositories accessible to the user access token](/rest/apps#list-repositories-accessible-to-the-user-access-token).

View File

@@ -23,6 +23,8 @@ When an organization has an allow list, third-party applications that connect vi
## Adding an IP address allow list to a {% data variables.product.prodname_github_app %}
{% data reusables.identity-and-permissions.ipv6-allow-lists %}
{% data reusables.apps.settings-step %}
{% data reusables.user-settings.developer_settings %}
{% data reusables.user-settings.github_apps %}

View File

@@ -47,7 +47,7 @@ Rate limits for {% data variables.product.prodname_github_apps %} and {% data va
{% ifversion fpt or ghec %}
{% data variables.product.prodname_github_apps %} that are installed on an organization or a repository within an enterprise on {% data variables.product.product_location %} are subject to a limit of 15,000 requests per hour.
{% data variables.product.prodname_github_apps %} that are installed on an organization within an enterprise on {% data variables.product.product_location %} are subject to a limit of 15,000 requests per hour per organization that has installed the app.
{% endif %}

View File

@@ -106,13 +106,13 @@ Accept: application/xml
The access token allows you to make requests to the API on a behalf of a user.
Authorization: token OAUTH-TOKEN
Authorization: Bearer OAUTH-TOKEN
GET {% data variables.product.api_url_code %}/user
For example, in curl you can set the Authorization header like this:
```shell
curl -H "Authorization: token OAUTH-TOKEN" {% data variables.product.api_url_pre %}/user
curl -H "Authorization: Bearer OAUTH-TOKEN" {% data variables.product.api_url_pre %}/user
```
## Device flow

View File

@@ -27,7 +27,7 @@ If your {% data variables.product.prodname_oauth_app %} doesn't have access to a
Check headers to see what OAuth scopes you have, and what the API action accepts:
```shell
$ curl -H "Authorization: token OAUTH-TOKEN" {% data variables.product.api_url_pre %}/users/codertocat -I
$ curl -H "Authorization: Bearer OAUTH-TOKEN" {% data variables.product.api_url_pre %}/users/codertocat -I
HTTP/2 200
X-OAuth-Scopes: repo, user
X-Accepted-OAuth-Scopes: user

View File

@@ -37,12 +37,12 @@ $ export SECRET_TOKEN=<em>your_token</em>
## Validating payloads from GitHub
When your secret token is set, {% data variables.product.product_name %} uses it to create a hash signature with each payload. This hash signature is included with the headers of each request as `X-Hub-Signature-256`.
When your secret token is set, {% data variables.product.product_name %} uses it to create a hash signature with each payload. This hash signature is included with the headers of each request as `x-hub-signature-256`.
{% ifversion fpt or ghes or ghec %}
{% note %}
**Note:** For backward-compatibility, we also include the `X-Hub-Signature` header that is generated using the SHA-1 hash function. If possible, we recommend that you use the `X-Hub-Signature-256` header for improved security. The example below demonstrates using the `X-Hub-Signature-256` header.
**Note:** For backward-compatibility, we also include the `x-hub-signature` header that is generated using the SHA-1 hash function. If possible, we recommend that you use the `x-hub-signature-256` header for improved security. The example below demonstrates using the `x-hub-signature-256` header.
{% endnote %}
{% endif %}

View File

@@ -24,23 +24,22 @@ To configure an LMS to connect to {% data variables.product.prodname_classroom %
## Supported LMSes
{% data variables.product.prodname_classroom %} supports import of roster data from LMSes that implement Learning Tools Interoperability (LTI) standards.
{% note %}
- LTI version 1.0 and/or 1.1
- LTI Names and Roles Provisioning 1.X
**Note:** {% data variables.product.prodname_classroom %} previously supported import of roster data from LMSes that implement Learning Tools Interoperability (LTI) versions 1.0 and 1.1. On June 30, 2022, the Instructional Management System (IMS) Global Learning Consortium [ended support for LTI versions 1.0 and 1.1](https://www.imsglobal.org/lti-security-announcement-and-deprecation-schedule). In the interest of keeping sensitive student information safe and secure, {% data variables.product.company_short %} has temporarily disabled importing roster data from LTI-compliant LMSes.<br><br>
Using LTI helps keep your information safe and secure. LTI is an industry-standard protocol and GitHub Classroom's use of LTI is certified by the Instructional Management System (IMS) Global Learning Consortium. For more information, see [Learning Tools Interoperability](https://www.imsglobal.org/activity/learning-tools-interoperability) and [About IMS Global Learning Consortium](http://www.imsglobal.org/aboutims.html) on the IMS Global Learning Consortium website.
Support for the latest version of Learning Tools Interoperability, [LTI 1.3](https://www.imsglobal.org/activity/learning-tools-interoperability), is currently being worked on and will be made available in {% data variables.product.prodname_classroom %} very soon.
{% endnote %}
LTI is an industry-standard protocol and GitHub Classroom's use of LTI is certified by the Instructional Management System (IMS) Global Learning Consortium. For more information, see [Learning Tools Interoperability](https://www.imsglobal.org/activity/learning-tools-interoperability) and [About IMS Global Learning Consortium](http://www.imsglobal.org/aboutims.html) on the IMS Global Learning Consortium website.
{% data variables.product.company_short %} has tested import of roster data from the following LMSes into {% data variables.product.prodname_classroom %}.
- Canvas
- Google Classroom
- Moodle
- Sakai
Currently, {% data variables.product.prodname_classroom %} doesn't support import of roster data from Blackboard or Brightspace.
## Generating configuration credentials for your classroom
## Connecting to Google Classroom
{% data reusables.classroom.sign-into-github-classroom %}
{% data reusables.classroom.click-classroom-in-list %}
@@ -48,90 +47,16 @@ Currently, {% data variables.product.prodname_classroom %} doesn't support impor
1. If your classroom already has a roster, you can either update the roster or delete the roster and create a new roster.
- For more information about deleting and creating a roster, see "[Deleting a roster for a classroom](/education/manage-coursework-with-github-classroom/manage-classrooms#deleting-a-roster-for-a-classroom)" and "[Creating a roster for your classroom](/education/manage-coursework-with-github-classroom/manage-classrooms#creating-a-roster-for-your-classroom)."
- For more information about updating a roster, see "[Adding students to the roster for your classroom](/education/manage-coursework-with-github-classroom/manage-classrooms#adding-students-to-the-roster-for-your-classroom)."
1. In the list of LMSes, click your LMS. If your LMS is not supported, click **Other LMS**.
![List of LMSes](/assets/images/help/classroom/classroom-settings-click-lms.png)
1. Read about connecting your LMS, then click **Connect to _LMS_**.
1. Copy the "Consumer Key", "Shared Secret", and "Launch URL" for the connection to the classroom.
![Copy credentials](/assets/images/help/classroom/classroom-copy-credentials.png)
1. In the list of LMSes, click Google Classroom.
![Google Classroom](/assets/images/help/classroom/classroom-settings-click-google-classroom.png)
1. Sign in to Google, then select the Classroom to link to.
## Configuring a generic LMS
You must configure the privacy settings for your LMS to allow external tools to receive roster information.
## Connecting to Canvas, Moodle, Sakai, and other LMSes
1. Navigate to your LMS.
1. Configure an external tool.
1. Provide the configuration credentials you generated in {% data variables.product.prodname_classroom %}.
- Consumer key
- Shared secret
- Launch URL (sometimes called "tool URL" or similar)
Connecting to other LMSes is temporarily unavailable as {% data variables.product.company_short %} updates to Learning Tools Interoperability (LTI) version 1.3. For more information, see "[Supported LMSes](#supported-lmses)."
## Configuring Canvas
You can configure {% data variables.product.prodname_classroom %} as an external app for Canvas to import roster data into your classroom. For more information about Canvas, see the [Canvas website](https://www.instructure.com/canvas/).
1. Sign into [Canvas](https://www.instructure.com/canvas/#login).
1. Select the Canvas course to integrate with {% data variables.product.prodname_classroom %}.
1. In the left sidebar, click **Settings**.
1. Click the **Apps** tab.
1. Click **View app configurations**.
1. Click **+App**.
1. Select the **Configuration Type** drop-down menu, and click **By URL**.
1. Paste the configuration credentials from {% data variables.product.prodname_classroom %}. For more information, see "[Generating configuration credentials for your classroom](#generating-configuration-credentials-for-your-classroom)."
| Field in Canvas app configuration | Value or setting |
| :- | :- |
| **Consumer Key** | Consumer key from {% data variables.product.prodname_classroom %} |
| **Shared Secret** | Shared secret from {% data variables.product.prodname_classroom %} |
| **Allow this tool to access the IMS Names and Role Provisioning Service** | Enabled |
| **Configuration URL** | Launch URL from {% data variables.product.prodname_classroom %} |
{% note %}
**Note**: If you don't see a checkbox in Canvas labeled "Allow this tool to access the IMS Names and Role Provisioning Service", then your Canvas administrator must contact Canvas support to enable membership service configuration for your Canvas account. Without enabling this feature, you won't be able to sync the roster from Canvas. For more information, see [How do I contact Canvas Support?](https://community.canvaslms.com/t5/Canvas-Basics-Guide/How-do-I-contact-Canvas-Support/ta-p/389767) on the Canvas website.
{% endnote %}
1. Click **Submit**.
1. In the left sidebar, click **Home**.
1. To prompt Canvas to send a confirmation email, in the left sidebar, click **GitHub Classroom**. Follow the instructions in the email to finish linking {% data variables.product.prodname_classroom %}.
## Configuring Moodle
You can configure {% data variables.product.prodname_classroom %} as an activity for Moodle to import roster data into your classroom. For more information about Moodle, see the [Moodle website](https://moodle.org).
You must be using Moodle version 3.0 or greater.
1. Sign into [Moodle](https://moodle.org/login/).
1. Select the Moodle course to integrate with {% data variables.product.prodname_classroom %}.
1. Click **Turn editing on**.
1. Wherever you'd like {% data variables.product.prodname_classroom %} to be available in Moodle, click **Add an activity or resource**.
1. Choose **External tool** and click **Add**.
1. In the "Activity name" field, type "GitHub Classroom".
1. In the **Preconfigured tool** field, to the right of the drop-down menu, click **+**.
1. Under "External tool configuration", paste the configuration credentials from {% data variables.product.prodname_classroom %}. For more information, see "[Generating configuration credentials for your classroom](#generating-configuration-credentials-for-your-classroom)."
| Field in Moodle app configuration | Value or setting |
| :- | :- |
| **Tool name** | {% data variables.product.prodname_classroom %} - _YOUR CLASSROOM NAME_<br/><br/>**Note**: You can use any name, but we suggest this value for clarity. |
| **Tool URL** | Launch URL from {% data variables.product.prodname_classroom %} |
| **LTI version** | LTI 1.0/1.1 |
| **Default launch container** | New window |
| **Consumer key** | Consumer key from {% data variables.product.prodname_classroom %} |
| **Shared secret** | Shared secret from {% data variables.product.prodname_classroom %} |
1. Scroll to and click **Services**.
1. To the right of "IMS LTI Names and Role Provisioning", select the drop-down menu and click **Use this service to retrieve members' information as per privacy settings**.
1. Scroll to and click **Privacy**.
1. To the right of **Share launcher's name with tool** and **Share launcher's email with tool**, select the drop-down menus to click **Always**.
1. At the bottom of the page, click **Save changes**.
1. In the **Preconfigure tool** menu, click **GitHub Classroom - _YOUR CLASSROOM NAME_**.
1. Under "Common module settings", to the right of "Availability", select the drop-down menu and click **Hide from students**.
1. At the bottom of the page, click **Save and return to course**.
1. Navigate to anywhere you chose to display {% data variables.product.prodname_classroom %}, and click the {% data variables.product.prodname_classroom %} activity.
## Importing a roster from your LMS
For more information about importing the roster from your LMS into {% data variables.product.prodname_classroom %}, see "[Manage classrooms](/education/manage-coursework-with-github-classroom/manage-classrooms#creating-a-roster-for-your-classroom)."
In the meantime, you may manually input your roster for your class. For more information about manually importing the roster from your LMS into {% data variables.product.prodname_classroom %}, see "[Manage classrooms](/education/manage-coursework-with-github-classroom/manage-classrooms#creating-a-roster-for-your-classroom)."
## Disconnecting your LMS

View File

@@ -7,15 +7,11 @@ topics:
- Profile
---
{% note %}
**Note:** The ability to follow organizations is currently in public beta and subject to change.
{% endnote %}
{% data reusables.organizations.follow-organizations-beta %}
## About followers on {% data variables.product.product_name %}
When you follow organizations, you'll see their public activity on your personal dashboard. For more information, see "[About your personal dashboard](/account-and-profile/setting-up-and-managing-your-personal-account-on-github/managing-personal-account-settings/about-your-personal-dashboard#staying-updated-with-activity-from-the-community)."
{% data reusables.organizations.about-following-organizations %} For more information, see "[About your personal dashboard](/account-and-profile/setting-up-and-managing-your-personal-account-on-github/managing-personal-account-settings/about-your-personal-dashboard#staying-updated-with-activity-from-the-community)."
You can unfollow an organization if you do not wish to see their {% ifversion fpt or ghec %}public{% endif %} activity on {% data variables.product.product_name %}.

View File

@@ -58,11 +58,21 @@ If you have existing source code or repositories stored locally on your computer
![Create New Repository drop-down](/assets/images/help/repository/repo-create.png)
{% data reusables.command_line.open_the_multi_os_terminal %}
3. Change the current working directory to your local project.
4. Initialize the local directory as a Git repository.
```shell
$ git init -b main
```
4. Use the `init` command to initialize the local directory as a Git repository. By default, the initial branch is called `master`.
If youre using Git 2.28.0 or a later version, you can set the name of the default branch using `-b`.
``` shell
$ git init -b main
```
If youre using Git 2.27.1 or an earlier version, you can set the name of the default branch using `&& git branch -m`.
``` shell
$ git init && git branch -m main
```
5. Add the files in your new local repository. This stages them for the first commit.
```shell
$ git add .
# Adds the files in the local repository and stages them for commit. {% data reusables.git.unstage-codeblock %}
@@ -95,10 +105,19 @@ If you have existing source code or repositories stored locally on your computer
![Create New Repository drop-down](/assets/images/help/repository/repo-create.png)
{% data reusables.command_line.open_the_multi_os_terminal %}
3. Change the current working directory to your local project.
4. Initialize the local directory as a Git repository.
```shell
$ git init -b main
```
4. Use the `init` command to initialize the local directory as a Git repository. By default, the initial branch is called `master`.
If youre using Git 2.28.0 or a later version, you can set the name of the default branch using `-b`.
``` shell
$ git init -b main
```
If youre using Git 2.27.1 or an earlier version, you can set the name of the default branch using `&& git branch -m`.
``` shell
$ git init && git branch -m main
```
5. Add the files in your new local repository. This stages them for the first commit.
```shell
$ git add .
@@ -132,10 +151,19 @@ If you have existing source code or repositories stored locally on your computer
![Create New Repository drop-down](/assets/images/help/repository/repo-create.png)
{% data reusables.command_line.open_the_multi_os_terminal %}
3. Change the current working directory to your local project.
4. Initialize the local directory as a Git repository.
```shell
$ git init -b main
```
4. Use the `init` command to initialize the local directory as a Git repository. By default, the initial branch is called `master`.
If youre using Git 2.28.0 or a later version, you can set the name of the default branch using `-b`.
``` shell
$ git init -b main
```
If youre using Git 2.27.1 or an earlier version, you can set the name of the default branch using `&& git branch -m`.
``` shell
$ git init && git branch -m main
```
5. Add the files in your new local repository. This stages them for the first commit.
```shell
$ git add .

View File

@@ -57,7 +57,23 @@ From your dashboard, click the drop down menu of your username on the left side
![Switch account context dropdown](/assets/images/help/overview/dashboard-contextswitcher.png)
### Exploring other projects on {% data variables.product.prodname_dotcom %}
{% ifversion for-you-feed %}
## Following organizations
{% data reusables.organizations.follow-organizations-beta %}
{% data reusables.organizations.about-following-organizations %}
To follow an organization, in the header of the organization's page, click **Follow**.
![Screenshot of the organization header, with the follow button highlighted](/assets/images/help/profile/organization-profile-following.png)
For more information, see "[Following organizations](/get-started/exploring-projects-on-github/following-organizations)."
{% endif %}
## Exploring other projects on {% data variables.product.prodname_dotcom %}
You can discover new and interesting projects on {% data variables.product.prodname_dotcom %}'s Explore page. You can star interesting projects to make them easy to find again later. Visit your stars page to see all your starred projects. For more information about stars, see "[Saving repositories with stars](/get-started/exploring-projects-on-github/saving-repositories-with-stars)."

View File

@@ -118,7 +118,7 @@ Right now, you have a fork of the Spoon-Knife repository, but you do not have th
> Cloning into `Spoon-Knife`...
> remote: Counting objects: 10, done.
> remote: Compressing objects: 100% (8/8), done.
> remove: Total 10 (delta 1), reused 10 (delta 1)
> remote: Total 10 (delta 1), reused 10 (delta 1)
> Unpacking objects: 100% (10/10), done.
```

View File

@@ -32,7 +32,7 @@ Here is an example request using cURL:
```
$ curl \
-H "Authorization: token $GITHUB_TOKEN" \
-H "Authorization: Bearer $GITHUB_TOKEN" \
-H "X-Github-Next-Global-ID: 1" \
https://api.github.com/graphql \
-d '{ "query": "{ node(id: \"MDQ6VXNlcjM0MDczMDM=\") { id } }" }'

View File

@@ -70,7 +70,7 @@ You can find the node ID of an organization project if you know the organization
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"query{organization(login: \"<em>ORGANIZATION</em>\") {projectV2(number: <em>NUMBER</em>){id}}}"}'
```
{% endcurl %}
@@ -94,7 +94,7 @@ You can also find the node ID of all projects in your organization. The followin
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"{organization(login: \"<em>ORGANIZATION</em>\") {projectsV2(first: 20) {nodes {id title}}}}"}'
```
{% endcurl %}
@@ -125,7 +125,7 @@ You can find the node ID of a user project if you know the project number. Repla
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"query{user(login: \"<em>USER</em>\") {projectV2(number: <em>NUMBER</em>){id}}}"}'
```
{% endcurl %}
@@ -149,7 +149,7 @@ You can also find the node ID for all of your projects. The following example wi
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"{user(login: \"<em>USER</em>\") {projectsV2(first: 20) {nodes {id title}}}}"}'
```
{% endcurl %}
@@ -180,7 +180,7 @@ The following example will return the ID, name, settings, and configuration for
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"query{ node(id: \"<em>PROJECT_ID</em>\") { ... on ProjectV2 { fields(first: 20) { nodes { ... on ProjectV2Field { id name } ... on ProjectV2IterationField { id name configuration { iterations { startDate id }}} ... on ProjectV2SingleSelectField { id name options { id name }}}}}}}"}'
```
{% endcurl %}
@@ -284,7 +284,7 @@ If you just need the name and ID of a field, and do not need information about i
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"query{ node(id: \"<em>PROJECT_ID</em>\") { ... on ProjectV2 { fields(first: 20) { nodes { ... on ProjectV2FieldCommon { id name }}}}}}"}'
```
{% endcurl %}
@@ -354,7 +354,7 @@ The following example will return the first 20 issues, pull requests, and draft
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"query{ node(id: \"<em>PROJECT_ID</em>\") { ... on ProjectV2 { items(first: 20) { nodes{ id fieldValues(first: 8) { nodes{ ... on ProjectV2ItemFieldTextValue { text field { ... on ProjectV2FieldCommon { name }}} ... on ProjectV2ItemFieldDateValue { date field { ... on ProjectV2FieldCommon { name } } } ... on ProjectV2ItemFieldSingleSelectValue { name field { ... on ProjectV2FieldCommon { name }}}}} content{ ... on DraftIssue { title body } ...on Issue { title assignees(first: 10) { nodes{ login }}} ...on PullRequest { title assignees(first: 10) { nodes{ login }}}}}}}}}"}'
```
{% endcurl %}
@@ -446,7 +446,7 @@ The following example will add an issue or pull request to your project. Replace
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"mutation {addProjectV2ItemById(input: {projectId: \"<em>PROJECT_ID</em>\" contentId: \"<em>CONTENT_ID</em>\"}) {item {id}}}"}'
```
{% endcurl %}
@@ -488,8 +488,8 @@ The following example will add a draft issue to your project. Replace `PROJECT_I
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--data '{"query":"mutation {addProjectV2DraftIssue(input: {projectId: "<em>PROJECT_ID</em>" title: "<em>TITLE</em>" body: "<em>BODY</em>"}) {item {id}}}"}'
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"mutation {addProjectV2DraftIssue(input: {projectId: "<em>PROJECT_ID</em>" title: "<em>TITLE</em>" body: "<em>BODY</em>"}) {projectItem {id}}}"}'
```
{% endcurl %}
@@ -498,7 +498,7 @@ curl --request POST \
gh api graphql -f query='
mutation {
addProjectV2DraftIssue(input: {projectId: "<em>PROJECT_ID</em>" title: "<em>TITLE</em>" body: "<em>BODY</em>"}) {
item {
projectItem {
id
}
}
@@ -512,7 +512,7 @@ The response will contain the node ID of the newly created draft issue.
{
"data": {
"addProjectV2ItemById": {
"item": {
"projectItem": {
"id": "PVTI_lADOANN5s84ACbL0zgBbxFc"
}
}
@@ -528,7 +528,7 @@ The following example will update your project's settings. Replace `PROJECT_ID`
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"mutation { updateProjectV2(input: { projectId: \"<em>PROJECT_ID</em>\", title: \"Project title\", public: false, readme: \"# Project README\n\nA long description\", shortDescription: \"A short description\"}) { projectV2 { id, title, readme, shortDescription }}}"}'
```
{% endcurl %}
@@ -565,7 +565,7 @@ The following example will update the value of a text field for an item. Replace
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"mutation {updateProjectV2ItemFieldValue( input: { projectId: "<em>PROJECT_ID</em>" itemId: "<em>ITEM_ID</em>" fieldId: "<em>FIELD_ID</em>" value: { text: "Updated text" }}) { projectV2Item { id }}}"}'
```
{% endcurl %}
@@ -619,7 +619,7 @@ The following example will update the value of a single select field for an item
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"mutation {updateProjectV2ItemFieldValue( input: { projectId: "<em>PROJECT_ID</em>" itemId: "<em>ITEM_ID</em>" fieldId: "<em>FIELD_ID</em>" value: { singleSelectOptionId: "<em>OPTION_ID</em>" }}) { projectV2Item { id }}}"}'
```
{% endcurl %}
@@ -659,7 +659,7 @@ The following example will update the value of an iteration field for an item.
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"mutation {updateProjectV2ItemFieldValue( input: { projectId: "<em>PROJECT_ID</em>" itemId: "<em>ITEM_ID</em>" fieldId: "<em>FIELD_ID</em>" value: { singleSelectOptionId: "<em>OPTION_ID</em>" }}) { projectV2Item { id }}}"}'
```
{% endcurl %}
@@ -694,7 +694,7 @@ The following example will delete an item from a project. Replace `PROJECT_ID` w
```shell
curl --request POST \
--url https://api.github.com/graphql \
--header 'Authorization: token <em>TOKEN</em>' \
--header 'Authorization: Bearer <em>TOKEN</em>' \
--data '{"query":"mutation {deleteProjectV2Item(input: {projectId: \"<em>PROJECT_ID</em>\" itemId: \"<em>ITEM_ID</em>\"}) {deletedItemId}}"}'
```
{% endcurl %}

View File

@@ -23,7 +23,7 @@ To transfer an open issue to another repository, you must have write access to t
{% endnote %}
When you transfer an issue, comments, labels and assignees are retained. The issue's milestones are not retained. This issue will stay on any user-owned or organization-wide project boards and be removed from any repository project boards. For more information, see "[About project boards](/articles/about-project-boards)."
When you transfer an issue, comments and assignees are retained. Labels and milestones are also retained if they're present in the target repository, with labels matching by name and milestones matching by both name and due date. This issue will stay on any user-owned or organization-wide project boards and be removed from any repository project boards. For more information, see "[About project boards](/articles/about-project-boards)."
People or teams who are mentioned in the issue will receive a notification letting them know that the issue has been transferred to a new repository. The original URL redirects to the new issue's URL. People who don't have read permissions in the new repository will see a banner letting them know that the issue has been transferred to a new repository that they can't access.

View File

@@ -38,6 +38,8 @@ You can also configure allowed IP addresses for the organizations in an enterpri
{% data reusables.identity-and-permissions.about-adding-ip-allow-list-entries %}
{% data reusables.identity-and-permissions.ipv6-allow-lists %}
{% data reusables.profile.access_org %}
{% data reusables.profile.org_settings %}
{% data reusables.organizations.security %}

View File

@@ -1,6 +1,6 @@
---
title: Enabling or disabling GitHub Discussions for an organization
intro: 'You can use {% data variables.product.prodname_discussions %} in a organization as a place for your organization to have conversations that aren''t specific to a single repository within your organization.'
intro: 'You can use {% data variables.product.prodname_discussions %} in an organization as a place for your organization to have conversations that aren''t specific to a single repository within your organization.'
permissions: 'Organization owners can enable {% data variables.product.prodname_discussions %} for their organization.'
versions:
feature: discussions

View File

@@ -18,6 +18,7 @@ featuredLinks:
guideCards:
- '{% ifversion docker-ghcr-enterprise-migration %}/packages/working-with-a-github-packages-registry/migrating-to-the-container-registry-from-the-docker-registry{% endif %}'
- '{% ifversion fpt or ghec or ghes > 3.4 %}/packages/working-with-a-github-packages-registry/working-with-the-container-registry{% else %}/packages/working-with-a-github-packages-registry/working-with-the-docker-registry{% endif %}'
- '{% ifversion packages-npm-v2 %}/packages/working-with-a-github-packages-registry/working-with-the-npm-registry{% endif %}'
- /packages/working-with-a-github-packages-registry/working-with-the-rubygems-registry
changelog:
label: packages

View File

@@ -18,20 +18,22 @@ The permissions for packages are either repository-scoped or user/organization-s
A repository-scoped package inherits the permissions and visibility of the repository that owns the package. You can find a package scoped to a repository by going to the main page of the repository and clicking the **Packages** link to the right of the page. {% ifversion fpt or ghec %}For more information, see "[Connecting a repository to a package](/packages/learn-github-packages/connecting-a-repository-to-a-package)."{% endif %}
The {% data variables.product.prodname_registry %} registries below use repository-scoped permissions:
The {% data variables.product.prodname_registry %} registries below **only** use repository-scoped permissions:
{% ifversion not fpt or ghec %}- Docker registry (`docker.pkg.github.com`){% endif %}
- npm registry
{% ifversion packages-npm-v2 %}{% else %}- npm registry{% endif %}
- RubyGems registry
- Apache Maven registry
- NuGet registry
{% ifversion packages-npm-v2 %}For {% data variables.product.prodname_ghcr_and_npm_registry %}, you can choose to allow packages to be scoped to a user, an organization, or linked to a repository.{% endif %}
{% ifversion fpt or ghec %}
## Granular permissions for user/organization-scoped packages
Packages with granular permissions are scoped to a personal user or organization account. You can change the access control and visibility of the package separately from a repository that is connected (or linked) to a package.
Currently, only the {% data variables.product.prodname_container_registry %} offers granular permissions for your container image packages.
Currently, the {% data variables.product.prodname_ghcr_and_npm_registry %} offer granular permissions for your container image packages.
## Visibility and access permissions for container images

View File

@@ -15,7 +15,7 @@ shortTitle: Access control & visibility
Packages with granular permissions are scoped to a personal user or organization account. You can change the access control and visibility of a package separately from the repository that it is connected (or linked) to.
Currently, you can only use granular permissions with the {% data variables.product.prodname_container_registry %}. Granular permissions are not supported in our other package registries, such as the npm registry.{% ifversion docker-ghcr-enterprise-migration %} For more information about migration to the {% data variables.product.prodname_container_registry %}, see "[Migrating to the {% data variables.product.prodname_container_registry %} from the Docker registry](/packages/working-with-a-github-packages-registry/migrating-to-the-container-registry-from-the-docker-registry)."{% endif %}
Currently, you can only use granular permissions with the {% data variables.product.prodname_ghcr_and_npm_registry %}. Granular permissions are not supported in our other package registries, such as the RubyGems registry.{% ifversion docker-ghcr-enterprise-migration %} For more information about migration to the {% data variables.product.prodname_container_registry %}, see "[Migrating to the {% data variables.product.prodname_container_registry %} from the Docker registry](/packages/working-with-a-github-packages-registry/migrating-to-the-container-registry-from-the-docker-registry)."{% endif %}
For more information about permissions for repository-scoped packages, packages-related scopes for PATs, or managing permissions for your actions workflows, see "[About permissions for GitHub Packages](/packages/learn-github-packages/about-permissions-for-github-packages)."
@@ -105,7 +105,7 @@ To further customize access to your container image, see "[Configuring access to
{% ifversion fpt or ghec %}
## Ensuring {% data variables.product.prodname_codespaces %} access to your package
By default, a codespace can seamlessly access certain packages in the {% data variables.product.prodname_dotcom %} Container Registry, such as those published in the same repository with the **Inherit access** option selected. For more information on which access is automatically configured, see "[Accessing images stored in {% data variables.product.prodname_dotcom %} Container Registry](/codespaces/codespaces-reference/allowing-your-codespace-to-access-a-private-image-registry#accessing-images-stored-in-github-container-registry)."
By default, a codespace can seamlessly access certain packages in the {% data variables.product.prodname_ghcr_and_npm_registry %}, such as those published in the same repository with the **Inherit access** option selected. For more information on which access is automatically configured, see "[Allowing your codespace to access a private image registry](/codespaces/codespaces-reference/allowing-your-codespace-to-access-a-private-image-registry#accessing-images-stored-in-container-registry-and-npm-registry)."
Otherwise, to ensure that a codespace has access to your package, you must grant access to the repository where the codespace is being launched.

View File

@@ -23,12 +23,12 @@ On {% data variables.product.prodname_dotcom %} if you have the required access,
- an entire private package
- an entire public package, if there's not more than 5000 downloads of any version of the package
- a specific version of a private package
- a specific version of a public package, if the package version doesn't have more than 5000 downloads
- a specific version of a public package, if the package version doesn't have more than 5,000 downloads
{% note %}
**Note:**
- You cannot delete a public package if any version of the package has more than 5000 downloads. In this scenario, contact [GitHub support](https://support.github.com/contact?tags=docs-packages) for further assistance.
- You cannot delete a public package if any version of the package has more than 5,000 downloads. In this scenario, contact [GitHub support](https://support.github.com/contact?tags=docs-packages) for further assistance.
- When deleting public packages, be aware that you may break projects that depend on your package.
{% endnote %}
@@ -46,7 +46,7 @@ You can use the REST API to manage your packages. For more information, see the
{% endif %}
For packages that inherit their permissions and access from repositories, you can use GraphQL to delete a specific package version.{% ifversion fpt or ghec %} The {% data variables.product.prodname_registry %} GraphQL API does not support containers or Docker images that use the package namespace `https://ghcr.io/OWNER/PACKAGE-NAME`.{% endif %} For more information about GraphQL support, see "[Deleting a version of a repository-scoped package with GraphQL](#deleting-a-version-of-a-repository-scoped-package-with-graphql)."
For packages that inherit their permissions and access from repositories, you can use GraphQL to delete a specific package version.{% data reusables.package_registry.no-graphql-to-delete-packages %} For more information about GraphQL support, see "[Deleting a version of a repository-scoped package with GraphQL](#deleting-a-version-of-a-repository-scoped-package-with-graphql)."
{% endif %}
@@ -54,18 +54,19 @@ For packages that inherit their permissions and access from repositories, you ca
For packages that inherit their access permissions from repositories, you can delete a package if you have admin permissions to the repository.
Repository-scoped packages on {% data variables.product.prodname_registry %} include these packages:
- npm
- RubyGems
- maven
- Gradle
- NuGet
{% ifversion not fpt or ghec %}- Docker images at `docker.pkg.github.com/OWNER/REPOSITORY/IMAGE-NAME`{% endif %}
The {% data variables.product.prodname_registry %} registries below **only** use repository-scoped permissions:
{% ifversion not fpt or ghec %}- Docker images at `docker.pkg.github.com/OWNER/REPOSITORY/IMAGE-NAME`{% endif %}
{% ifversion packages-npm-v2 %}{% else %}- npm{% endif %}
- RubyGems registry
- Apache Maven registry
- NuGet registry
{% ifversion packages-npm-v2 %}For {% data variables.product.prodname_ghcr_and_npm_registry %}, you can choose to allow packages to be scoped to a user, an organization, or linked to a repository.{% endif %}
{% ifversion fpt or ghec %}
To delete a package that has granular permissions separate from a repository, such as container images stored at `https://ghcr.io/OWNER/PACKAGE-NAME`, you must have admin access to the package.
For more information, see "[About permissions for {% data variables.product.prodname_registry %}](/packages/learn-github-packages/about-permissions-for-github-packages)."
To delete a package that has granular permissions separate from a repository, such as container images stored at `https://ghcr.io/OWNER/PACKAGE-NAME` or `https://npm.pkg.github.com/OWNER/PACKAGE-NAME`, you must have admin access to the package. For more information, see "[About permissions for {% data variables.product.prodname_registry %}](/packages/learn-github-packages/about-permissions-for-github-packages)."
{% endif %}
@@ -89,9 +90,7 @@ To delete a version of a repository-scoped package, you must have admin permissi
For packages that inherit their permissions and access from repositories, you can use the GraphQL to delete a specific package version.
{% ifversion fpt or ghec %}
For containers or Docker images at `ghcr.io`, GraphQL is not supported but you can use the REST API. For more information, see the "[{% data variables.product.prodname_registry %} API](/rest/reference/packages)."
{% endif %}
{% data reusables.package_registry.no-graphql-to-delete-packages %}{% ifversion fpt or ghec %} You can however use the REST API. For more information, see the "[{% data variables.product.prodname_registry %} API](/rest/reference/packages)."{% endif %}
Use the `deletePackageVersion` mutation in the GraphQL API. You must use a token with the `read:packages`, `delete:packages`, and `repo` scopes. For more information about tokens, see "[About {% data variables.product.prodname_registry %}](/packages/publishing-and-managing-packages/about-github-packages#authenticating-to-github-packages)."

View File

@@ -51,7 +51,7 @@ For more information about the configuration of {% data variables.product.prodna
| | |
|--------------------|--------------------|
| Permissions | {% ifversion fpt or ghec %}The permissions for a package are either inherited from the repository where the package is hosted or, for packages in the {% data variables.product.prodname_container_registry %}, they can be defined for specific user or organization accounts. For more information, see "[Configuring a packages access control and visibility](/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility)." {% else %}Each package inherits the permissions of the repository where the package is hosted. <br> <br> For example, anyone with read permissions for a repository can install a package as a dependency in a project, and anyone with write permissions can publish a new package version.{% endif %} |
| Permissions | {% ifversion fpt or ghec %}The permissions for a package are either inherited from the repository where the package is hosted or, for packages in the {% data variables.product.prodname_ghcr_and_npm_registry %}, they can be defined for specific user or organization accounts. For more information, see "[Configuring a packages access control and visibility](/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility)." {% else %}Each package inherits the permissions of the repository where the package is hosted. <br> <br> For example, anyone with read permissions for a repository can install a package as a dependency in a project, and anyone with write permissions can publish a new package version.{% endif %} |
| Visibility | {% data reusables.package_registry.public-or-private-packages %} |
For more information, see "[About permissions for {% data variables.product.prodname_registry %}](/packages/learn-github-packages/about-permissions-for-github-packages)."
@@ -101,7 +101,9 @@ For more information about Docker and the {% data variables.product.prodname_con
## Managing packages
{% ifversion fpt or ghec %}
You can delete a package in the {% ifversion ghae %}{% data variables.product.product_name %}{% else %}{% data variables.product.product_location %}{% endif %} user interface or using the REST API. For more information, see the "[{% data variables.product.prodname_registry %} API](/rest/reference/packages)."
You can delete a package in the {% ifversion ghae %}{% data variables.product.product_name %}{% else %}{% data variables.product.product_location %}{% endif %} user interface or using the REST API. For more information, see "[Deleting and restoring a package](/packages/learn-github-packages/deleting-and-restoring-a-package)" and the "[{% data variables.product.prodname_registry %} API](/rest/reference/packages)."
{% data reusables.package_registry.no-graphql-to-delete-packages %}
{% endif %}
{% ifversion ghes %}
@@ -112,7 +114,9 @@ You can delete a private or public package in the {% data variables.product.prod
You can delete a version of a package in the {% data variables.product.product_name %} user interface or using the GraphQL API.
{% endif %}
When you use the GraphQL API to query and delete private packages, you must use the same token you use to authenticate to {% data variables.product.prodname_registry %}. For more information, see "[Deleting and restoring a package](/packages/learn-github-packages/deleting-and-restoring-a-package)" and "[Forming calls with GraphQL](/graphql/guides/forming-calls-with-graphql)."
When you use the GraphQL API to query and delete private packages, you must use the same token you use to authenticate to {% data variables.product.prodname_registry %}.
For more information, see {% ifversion ghes or ghae %}"[Deleting and restoring a package](/packages/learn-github-packages/deleting-and-restoring-a-package)" and {% endif %}"[Forming calls with GraphQL](/graphql/guides/forming-calls-with-graphql)."
You can configure webhooks to subscribe to package-related events, such as when a package is published or updated. For more information, see the "[`package` webhook event](/webhooks/event-payloads/#package)."

View File

@@ -21,17 +21,17 @@ versions:
## About package views
Your ability to view a package depends on several factors. By default, you can view all packages you have published.
Your ability to view a package depends on several factors. By default, you can view all packages you have published.
Repository-scoped packages inherit their permissions and visibility from the repository that owns the package. The registries below use this type of permissions:{% ifversion not fpt or ghec %}
Repository-scoped packages inherit their permissions and visibility from the repository that owns the package. The registries below **only** use this type of permissions:{% ifversion not fpt or ghec %}
- Docker registry (`docker.pkg.github.com`){% endif %}
- npm registry
{% ifversion packages-npm-v2 %}{% else %}- npm registry{% endif %}
- RubyGems registry
- Apache Maven registry
- NuGet registry
{% ifversion fpt or ghec %}
The Container registry offers you the option of granular permissions and visibility settings that can be customized for each package owned by a personal user or organization account. You can choose to use granular permissions or connect the package to a repository and inherit it's permissions. For more information, see "[Connecting a repository to a package](/packages/learn-github-packages/connecting-a-repository-to-a-package)."
The {% data variables.product.prodname_ghcr_and_npm_registry %} offer you the option of granular permissions and visibility settings that can be customized for each package owned by a personal user or organization account. You can choose to use granular permissions or connect the package to a repository and inherit it's permissions. For more information, see "[Connecting a repository to a package](/packages/learn-github-packages/connecting-a-repository-to-a-package)."
{% endif %}
For more information, see "[About permissions for GitHub Packages](/packages/learn-github-packages/about-permissions-for-github-packages){% ifversion fpt or ghec %}" and "[Configuring a package's access control and visibility](/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility){% endif %}."

View File

@@ -24,9 +24,9 @@ shortTitle: Publish & install with Actions
You can extend the CI and CD capabilities of your repository by publishing or installing packages as part of your workflow.
{% ifversion fpt or ghec %}
### Authenticating to the {% data variables.product.prodname_container_registry %}
### Authenticating to the {% data variables.product.prodname_ghcr_and_npm_registry %}
{% data reusables.package_registry.authenticate_with_pat_for_container_registry %}
{% data reusables.package_registry.authenticate_with_pat_for_v2_registry %}
{% endif %}
@@ -40,7 +40,7 @@ You can reference the `GITHUB_TOKEN` in your workflow file using the {% raw %}`{
{% note %}
**Note:** Repository-owned packages include RubyGems, npm, Apache Maven, NuGet, {% ifversion fpt or ghec %}and Gradle. {% else %}Gradle, and Docker packages that use the package namespace `docker.pkg.github.com`.{% endif %}
**Note:** Some registries, such as RubyGems, {% ifversion packages-npm-v2 %}{% else %}npm, {% endif %}Apache Maven, NuGet, {% ifversion fpt or ghec %}and Gradle{% else %}Gradle, and Docker packages that use the package namespace `docker.pkg.github.com`{% endif %}, only allow repository-owned packages. With {% data variables.product.prodname_ghcr_and_npm_registry_full %} you can choose to allow packages to be owned by a user, an organization, or linked to a repository.
{% endnote %}
@@ -49,11 +49,11 @@ When you enable GitHub Actions, GitHub installs a GitHub App on your repository.
{% data variables.product.prodname_registry %} allows you to push and pull packages through the `GITHUB_TOKEN` available to a {% data variables.product.prodname_actions %} workflow.
{% ifversion fpt or ghec %}
## About permissions and package access for {% data variables.product.prodname_container_registry %}
## About permissions and package access for {% data variables.product.prodname_ghcr_and_npm_registry %}
The {% data variables.product.prodname_container_registry %} (`ghcr.io`) allows users to create and administer containers as free-standing resources at the organization level. Containers can be owned by an organization or personal account and you can customize access to each of your containers separately from repository permissions.
The {% data variables.product.prodname_ghcr_and_npm_registry_full %} allows users to create and administer packages as free-standing resources at the organization level. Packages can be owned by an organization or personal account and you can customize access to each of your packages separately from repository permissions.
All workflows accessing the {% data variables.product.prodname_container_registry %} should use the `GITHUB_TOKEN` instead of a personal access token. For more information about security best practices, see "[Security hardening for GitHub Actions](/actions/learn-github-actions/security-hardening-for-github-actions#using-secrets)."
All workflows accessing the {% data variables.product.prodname_ghcr_and_npm_registry %} should use the `GITHUB_TOKEN` instead of a personal access token. For more information about security best practices, see "[Security hardening for GitHub Actions](/actions/learn-github-actions/security-hardening-for-github-actions#using-secrets)."
## Default permissions and access settings for containers modified through workflows
@@ -484,9 +484,9 @@ Installing packages hosted by {% data variables.product.prodname_registry %} thr
{% data reusables.package_registry.actions-configuration %}
{% ifversion fpt or ghec %}
## Upgrading a workflow that accesses `ghcr.io`
## Upgrading a workflow that accesses a registry using a PAT
The {% data variables.product.prodname_container_registry %} supports the `GITHUB_TOKEN` for easy and secure authentication in your workflows. If your workflow is using a personal access token (PAT) to authenticate to `ghcr.io`, then we highly recommend you update your workflow to use the `GITHUB_TOKEN`.
The {% data variables.product.prodname_ghcr_and_npm_registry %} support the `GITHUB_TOKEN` for easy and secure authentication in your workflows. If your workflow is using a personal access token (PAT) to authenticate to the registry, then we highly recommend you update your workflow to use the `GITHUB_TOKEN`.
For more information about the `GITHUB_TOKEN`, see "[Authentication in a workflow](/actions/reference/authentication-in-a-workflow#using-the-github_token-in-a-workflow)."
@@ -504,9 +504,9 @@ Using the `GITHUB_TOKEN` instead of a PAT, which includes the `repo` scope, incr
{% endnote %}
1. Optionally, using the "role" drop-down menu, select the default access level that you'd like the repository to have to your container image.
![Permission access levels to give to repositories](/assets/images/help/package-registry/repository-permission-options-for-package-access-through-actions.png)
1. Open your workflow file. On the line where you log in to `ghcr.io`, replace your PAT with {% raw %}`${{ secrets.GITHUB_TOKEN }}`{% endraw %}.
1. Open your workflow file. On the line where you log in to the registry, replace your PAT with {% raw %}`${{ secrets.GITHUB_TOKEN }}`{% endraw %}.
For example, this workflow publishes a Docker image using {% raw %}`${{ secrets.GITHUB_TOKEN }}`{% endraw %} to authenticate.
For example, this workflow publishes a Docker image to the {% data variables.product.prodname_container_registry %} and uses {% raw %}`${{ secrets.GITHUB_TOKEN }}`{% endraw %} to authenticate.
```yaml{:copy}
name: Demo Push

View File

@@ -42,7 +42,9 @@ When installing or publishing a Docker image, the {% data variables.product.prod
## Authenticating to the {% data variables.product.prodname_container_registry %}
{% data reusables.package_registry.authenticate_with_pat_for_container_registry %}
{% ifversion fpt or ghec or ghes > 3.4 %}
To authenticate to the {% data variables.product.prodname_container_registry %} (`ghcr.io`) within a {% data variables.product.prodname_actions %} workflow, use the `GITHUB_TOKEN` for the best security and experience. {% data reusables.package_registry.authenticate_with_pat_for_v2_registry %}
{% endif %}
{% ifversion ghes %}Ensure that you replace `HOSTNAME` with {% data variables.product.product_location_enterprise %} hostname or IP address in the examples below.{% endif %}

View File

@@ -21,6 +21,8 @@ shortTitle: npm registry
{% data reusables.package_registry.admins-can-configure-package-types %}
{% ifversion packages-npm-v2 %}
{% else %}
## Limits for published npm versions
If you publish over 1,000 npm package versions to {% data variables.product.prodname_registry %}, you may see performance issues and timeouts occur during usage.
@@ -28,12 +30,17 @@ If you publish over 1,000 npm package versions to {% data variables.product.prod
In the future, to improve performance of the service, you won't be able to publish more than 1,000 versions of a package on {% data variables.product.prodname_dotcom %}. Any versions published before hitting this limit will still be readable.
If you reach this limit, consider deleting package versions or contact Support for help. When this limit is enforced, our documentation will be updated with a way to work around this limit. For more information, see "[Deleting and restoring a package](/packages/learn-github-packages/deleting-and-restoring-a-package)" or "[Contacting Support](/packages/learn-github-packages/about-github-packages#contacting-support)."
{% endif %}
## Authenticating to {% data variables.product.prodname_registry %}
{% data reusables.package_registry.authenticate-packages %}
{% data reusables.package_registry.authenticate-packages-github-token %}
{% ifversion packages-npm-v2 %}
{% data reusables.package_registry.authenticate_with_pat_for_v2_registry %}
You can also choose to give access permissions to packages independently for {% data variables.product.prodname_codespaces %} and {% data variables.product.prodname_actions %}. For more information, see "[Ensuring Codespaces access to your package](/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility#ensuring-codespaces-access-to-your-package) and [Ensuring workflow access to your package](/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility#ensuring-workflow-access-to-your-package)."
{% endif %}
### Authenticating with a personal access token
@@ -94,12 +101,24 @@ $ npm login --scope=@<em>OWNER</em> --registry=https://<em>HOSTNAME</em>/_regist
{% endnote %}
{% ifversion packages-npm-v2 %}
The {% data variables.product.prodname_registry %} registry stores npm packages within your organization or personal account, and allows you to associate a package with a repository. You can choose whether to inherit permissions from a repository, or set granular permissions independently of a repository.
{% endif %}
By default, {% data variables.product.prodname_registry %} publishes a package in the {% data variables.product.prodname_dotcom %} repository you specify in the name field of the *package.json* file. For example, you would publish a package named `@my-org/test` to the `my-org/test` {% data variables.product.prodname_dotcom %} repository. If you're running [npm v8.5.3](https://github.com/npm/cli/releases/tag/v8.5.3) or later, you can add a summary for the package listing page by including a *README.md* file in your package directory. For more information, see "[Working with package.json](https://docs.npmjs.com/getting-started/using-a-package.json)" and "[How to create Node.js Modules](https://docs.npmjs.com/getting-started/creating-node-modules)" in the npm documentation.
You can publish multiple packages to the same {% data variables.product.prodname_dotcom %} repository by including a `URL` field in the *package.json* file. For more information, see "[Publishing multiple packages to the same repository](#publishing-multiple-packages-to-the-same-repository)."
{% ifversion fpt or ghec %}
When a package is published, it isn't automatically linked to a repository. You can however choose to link your published package to a repository using the user interface or command line. For more information, see "[Connecting a repository to a package](/packages/learn-github-packages/connecting-a-repository-to-a-package)."
{% endif %}
You can set up the scope mapping for your project using either a local *.npmrc* file in the project or using the `publishConfig` option in the *package.json*. {% data variables.product.prodname_registry %} only supports scoped npm packages. Scoped packages have names with the format of `@owner/name`. Scoped packages always begin with an `@` symbol. You may need to update the name in your *package.json* to use the scoped name. For example, `"name": "@codertocat/hello-world-npm"`.
{% ifversion packages-npm-v2 %}
When you first publish a package, the default visibility is private. When a package is linked to repository, the package visibility is dependent on the repository's visibility. To change the visibility or set access permissions, see "[Configuring a package's access control and visibility](/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility)."
{% endif %}
{% data reusables.package_registry.viewing-packages %}
### Publishing a package using a local *.npmrc* file

View File

@@ -47,7 +47,7 @@ To set up a `www` or custom subdomain, such as `www.example.com` or `blog.exampl
{% data reusables.pages.sidebar-pages %}
4. Under "Custom domain", type your custom domain, then click **Save**. If you are publishing your site from a branch, this will create a commit that adds a `CNAME` file to the root of your source branch. If you are publishing your site with a custom {% data variables.product.prodname_actions %} workflow , no `CNAME` file is created. For more information about your publishing source, see "[Configuring a publishing source for your GitHub Pages site](/pages/getting-started-with-github-pages/configuring-a-publishing-source-for-your-github-pages-site)."
![Save custom domain button](/assets/images/help/pages/save-custom-subdomain.png)
5. Navigate to your DNS provider and create a `CNAME` record that points your subdomain to the default domain for your site. For example, if you want to use the subdomain `www.example.com` for your user site, create a `CNAME` record that points `www.example.com` to `<user>.github.io`. If you want to use the subdomain `www.anotherexample.com` for your organization site, create a `CNAME` record that points `www.anotherexample.com` to `<organization>.github.io`. The `CNAME` record should always point to `<user>.github.io` or `<organization>.github.io`, excluding the repository name. {% data reusables.pages.contact-dns-provider %} {% data reusables.pages.default-domain-information %}
5. Navigate to your DNS provider and create a `CNAME` record that points your subdomain to the default domain for your site. For example, if you want to use the subdomain `www.example.com` for your user site, create a `CNAME` record that points `www.example.com` to `<user>.github.io`. If you want to use the subdomain `another.example.com` for your organization site, create a `CNAME` record that points `another.example.com` to `<organization>.github.io`. The `CNAME` record should always point to `<user>.github.io` or `<organization>.github.io`, excluding the repository name. {% data reusables.pages.contact-dns-provider %} {% data reusables.pages.default-domain-information %}
{% indented_data_reference reusables.pages.wildcard-dns-warning spaces=3 %}
{% data reusables.command_line.open_the_multi_os_terminal %}

View File

@@ -17,4 +17,4 @@ versions:
An _installation_ refers to any user or organization account that has installed the app. For information on how to authenticate as an installation and limit access to specific repositories, see "[Authenticating as an installation](/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-an-installation)."
To list all GitHub App installations for an organization, see "[List app installations for an organization](/rest/reference/orgs#list-app-installations-for-an-organization)."
To list all GitHub App installations for an organization, see "[List app installations for an organization](/rest/orgs/orgs#list-app-installations-for-an-organization)."

View File

@@ -41,7 +41,7 @@ A check run is an individual test that is part of a check suite. Each run includ
![Check runs workflow](/assets/images/check_runs.png)
If a check run is in a incomplete state for more than 14 days, then the check run's `conclusion` becomes `stale` and appears on {% data variables.product.prodname_dotcom %} as stale with {% octicon "issue-reopened" aria-label="The issue-reopened icon" %}. Only {% data variables.product.prodname_dotcom %} can mark check runs as `stale`. For more information about possible conclusions of a check run, see the [`conclusion` parameter](/rest/reference/checks#create-a-check-run--parameters).
If a check run is in an incomplete state for more than 14 days, then the check run's `conclusion` becomes `stale` and appears on {% data variables.product.prodname_dotcom %} as stale with {% octicon "issue-reopened" aria-label="The issue-reopened icon" %}. Only {% data variables.product.prodname_dotcom %} can mark check runs as `stale`. For more information about possible conclusions of a check run, see the [`conclusion` parameter](/rest/reference/checks#create-a-check-run--parameters).
As soon as you receive the [`check_suite`](/webhooks/event-payloads/#check_suite) webhook, you can create the check run, even if the check is not complete. You can update the `status` of the check run as it completes with the values `queued`, `in_progress`, or `completed`, and you can update the `output` as more details become available. A check run can contain timestamps, a link to more details on your external site, detailed annotations for specific lines of code, and information about the analysis performed.

View File

@@ -166,7 +166,7 @@ curl --request GET \
{% note %}
**Note:** In most cases, you can use `Authorization: Bearer` or `Authorization: token`. JSON web tokens (JWTs) will only work with `Authorization: Bearer`.
**Note:** {% data reusables.getting-started.bearer-vs-token %}
{% endnote %}

View File

@@ -86,10 +86,16 @@ If you have two-factor authentication enabled, make sure you understand how to [
{% endnote %}
{% note %}
**Note:** {% data reusables.getting-started.bearer-vs-token %}
{% endnote %}
If you're using the API to access an organization that enforces [SAML SSO][saml-sso] for authentication, you'll need to create a personal access token (PAT) and [authorize the token][allowlist] for that organization. Visit the URL specified in `X-GitHub-SSO` to authorize the token for the organization.
```shell
$ curl -v -H "Authorization: token <em>TOKEN</em>" {% data variables.product.api_url_pre %}/repos/octodocs-test/test
$ curl -v -H "Authorization: Bearer <em>TOKEN</em>" {% data variables.product.api_url_pre %}/repos/octodocs-test/test
> X-GitHub-SSO: required; url=https://github.com/orgs/octodocs-test/sso?authorization_request=AZSCKtL4U8yX1H3sCQIVnVgmjmon5fWxks5YrqhJgah0b2tlbl9pZM4EuMz4
{
@@ -101,7 +107,7 @@ $ curl -v -H "Authorization: token <em>TOKEN</em>" {% data variables.product.api
When requesting data that could come from multiple organizations (for example, [requesting a list of issues created by the user][user-issues]), the `X-GitHub-SSO` header indicates which organizations require you to authorize your personal access token:
```shell
$ curl -v -H "Authorization: token <em>TOKEN</em>" {% data variables.product.api_url_pre %}/user/issues
$ curl -v -H "Authorization: Bearer <em>TOKEN</em>" {% data variables.product.api_url_pre %}/user/issues
> X-GitHub-SSO: partial-results; organizations=21955855,20582480
```

Some files were not shown because too many files have changed in this diff Show More