Remove assorted azure refs from root directory (#54168)
This commit is contained in:
112
Dockerfile.azure
112
Dockerfile.azure
@@ -1,112 +0,0 @@
|
||||
# This Dockerfile is used for docker-based deployments to Azure for both preview environments and production
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# BASE IMAGE
|
||||
# --------------------------------------------------------------------------------
|
||||
# To update the sha, run `docker pull node:$VERSION-alpine`
|
||||
# look for something like: `Digest: sha256:0123456789abcdef`
|
||||
FROM node:22-alpine@sha256:c13b26e7e602ef2f1074aef304ce6e9b7dd284c419b35d89fcf3cc8e44a8def9 AS base
|
||||
|
||||
# This directory is owned by the node user
|
||||
ARG APP_HOME=/home/node/app
|
||||
|
||||
# Make sure we don't run anything as the root user
|
||||
USER node
|
||||
|
||||
WORKDIR $APP_HOME
|
||||
|
||||
|
||||
# ---------------
|
||||
# ALL DEPS
|
||||
# ---------------
|
||||
FROM base AS all_deps
|
||||
|
||||
COPY --chown=node:node package.json package-lock.json ./
|
||||
|
||||
RUN npm ci --no-optional --registry https://registry.npmjs.org/
|
||||
|
||||
# For Next.js v12+
|
||||
# This the appropriate necessary extra for node:VERSION-alpine
|
||||
# Other options are https://www.npmjs.com/search?q=%40next%2Fswc
|
||||
RUN npm i @next/swc-linux-x64-musl --no-save || npm i @next/swc-linux-arm64-musl --no-save
|
||||
|
||||
|
||||
# ---------------
|
||||
# PROD DEPS
|
||||
# ---------------
|
||||
FROM all_deps AS prod_deps
|
||||
|
||||
RUN npm prune --production
|
||||
|
||||
|
||||
# ---------------
|
||||
# BUILDER
|
||||
# ---------------
|
||||
FROM all_deps AS builder
|
||||
|
||||
COPY src ./src
|
||||
# The star is because it's an optional directory
|
||||
COPY .remotejson-cache* ./.remotejson-cache
|
||||
# The star is because it's an optional file
|
||||
COPY .pageinfo-cache.json.br* ./.pageinfo-cache.json.br
|
||||
# Certain content is necessary for being able to build
|
||||
COPY content/index.md ./content/index.md
|
||||
COPY content/rest ./content/rest
|
||||
COPY data ./data
|
||||
|
||||
COPY next.config.js ./next.config.js
|
||||
COPY tsconfig.json ./tsconfig.json
|
||||
|
||||
RUN npm run build
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# PREVIEW IMAGE - no translations
|
||||
# --------------------------------------------------------------------------------
|
||||
|
||||
FROM base AS preview
|
||||
|
||||
# Copy just prod dependencies
|
||||
COPY --chown=node:node --from=prod_deps $APP_HOME/node_modules $APP_HOME/node_modules
|
||||
|
||||
# Copy our front-end code
|
||||
COPY --chown=node:node --from=builder $APP_HOME/.next $APP_HOME/.next
|
||||
|
||||
# We should always be running in production mode
|
||||
ENV NODE_ENV=production
|
||||
|
||||
# Preferred port for server.js
|
||||
ENV PORT=4000
|
||||
|
||||
ENV ENABLED_LANGUAGES="en"
|
||||
|
||||
# This makes it possible to set `--build-arg BUILD_SHA=abc123`
|
||||
# and it then becomes available as an environment variable in the docker run.
|
||||
ARG BUILD_SHA
|
||||
ENV BUILD_SHA=$BUILD_SHA
|
||||
|
||||
# Copy only what's needed to run the server
|
||||
COPY --chown=node:node package.json ./
|
||||
COPY --chown=node:node assets ./assets
|
||||
COPY --chown=node:node content ./content
|
||||
COPY --chown=node:node src ./src
|
||||
COPY --chown=node:node .remotejson-cache* ./.remotejson-cache
|
||||
COPY --chown=node:node .pageinfo-cache.json.br* ./.pageinfo-cache.json.br
|
||||
COPY --chown=node:node data ./data
|
||||
COPY --chown=node:node next.config.js ./
|
||||
COPY --chown=node:node tsconfig.json ./
|
||||
|
||||
EXPOSE $PORT
|
||||
|
||||
CMD ["node_modules/.bin/tsx", "src/frame/server.ts"]
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# PRODUCTION IMAGE - includes all translations
|
||||
# --------------------------------------------------------------------------------
|
||||
FROM preview AS production
|
||||
|
||||
# Override what was set for previews
|
||||
# Make this match the default of `Object.keys(languages)` in src/languages/lib/languages.js
|
||||
ENV ENABLED_LANGUAGES "en,zh,es,pt,ru,ja,fr,de,ko"
|
||||
|
||||
# Copy in all translations
|
||||
COPY --chown=node:node translations ./translations
|
||||
@@ -4,12 +4,7 @@ Staging and production deployments are automated by a deployer service created a
|
||||
|
||||
### Preview deployments
|
||||
|
||||
When a pull request is **opened**, **reopened**, or **synchronized** (i.e has new commits), it is automatically deployed to a unique preview URL.
|
||||
|
||||
If a preview deployment fails, you can trigger a new deployment in a few ways:
|
||||
- close and re-open the pull request
|
||||
- push another commit to the branch
|
||||
- click **Update Branch** on the pull request page on github.com, if it's clickable
|
||||
When a pull request contains only content changes, it can be previewed without a deployment. Code changes will require a deployment. GitHub Staff can deploy such a PR to a staging environment.
|
||||
|
||||
### Production deployments
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
"analyze-comment": "tsx src/events/scripts/analyze-comment-cli.ts",
|
||||
"archive-version": "tsx --max-old-space-size=16384 src/ghes-releases/scripts/archive-version.ts",
|
||||
"build": "next build",
|
||||
"check-canary-slots": "tsx src/workflows/check-canary-slots.ts",
|
||||
"check-content-type": "tsx src/workflows/check-content-type.ts",
|
||||
"check-github-github-links": "tsx src/links/scripts/check-github-github-links.ts",
|
||||
"close-dangling-prs": "tsx src/workflows/close-dangling-prs.ts",
|
||||
@@ -29,7 +28,6 @@
|
||||
"content-changes-table-comment": "tsx src/workflows/content-changes-table-comment.ts",
|
||||
"copy-fixture-data": "tsx src/tests/scripts/copy-fixture-data.js",
|
||||
"count-translation-corruptions": "tsx src/languages/scripts/count-translation-corruptions.ts",
|
||||
"create-acr-token": "tsx src/workflows/acr-create-token.ts",
|
||||
"create-enterprise-issue": "tsx src/ghes-releases/scripts/create-enterprise-issue.js",
|
||||
"debug": "cross-env NODE_ENV=development ENABLED_LANGUAGES=en nodemon --inspect src/frame/server.ts",
|
||||
"delete-orphan-translation-files": "tsx src/workflows/delete-orphan-translation-files.ts",
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
import { execSync } from 'child_process'
|
||||
import * as core from '@actions/core'
|
||||
import dotenv from 'dotenv'
|
||||
|
||||
type IsoDateString = string
|
||||
|
||||
// For local testing set environment variables in the .env file
|
||||
dotenv.config()
|
||||
|
||||
const acrTokenName = process.env.ACR_TOKEN_NAME
|
||||
const acrServer = process.env.CONTAINER_REGISTRY_SERVER
|
||||
const repo = process.env.GITHUB_REPOSITORY
|
||||
|
||||
function main() {
|
||||
// Get the current time and add 30 minutes to it
|
||||
// Convert Date format from YYYY-MM-DDTHH:mm:ss.sssZ to
|
||||
// YYYY-MM-DDTHH:mm:ssZ (remove .sss)
|
||||
const expirationDate: IsoDateString =
|
||||
new Date(Date.now() + 30 * 60 * 1000).toISOString().split('.')[0] + 'Z'
|
||||
|
||||
let resp
|
||||
try {
|
||||
const cmd = `az acr token create \
|
||||
--name ${acrTokenName} \
|
||||
--registry ${acrServer} \
|
||||
--repository ${repo} \
|
||||
content/write \
|
||||
content/read \
|
||||
--expiration ${expirationDate} \
|
||||
--output json`
|
||||
|
||||
console.log('Executing az acr token create command.')
|
||||
resp = JSON.parse(execSync(cmd, { encoding: 'utf8' }))
|
||||
} catch (error) {
|
||||
console.error('An error occurred while creating ACR token with the Azure CLI')
|
||||
throw error
|
||||
}
|
||||
|
||||
const acrTokenValue = resp?.credentials?.passwords[0]?.value
|
||||
if (!acrTokenValue) {
|
||||
throw new Error(
|
||||
'The response from the Azure CLI was not in the expected format: \n' +
|
||||
JSON.stringify(resp, null, 2),
|
||||
)
|
||||
}
|
||||
|
||||
// Set the ACR_TOKEN_VALUE environment variable so
|
||||
// that it can be used in the subsequent steps
|
||||
core.exportVariable('ACR_TOKEN_VALUE', acrTokenValue)
|
||||
execSync(`echo $ACR_TOKEN_VALUE`)
|
||||
}
|
||||
|
||||
main()
|
||||
@@ -1,85 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
import { execSync } from 'child_process'
|
||||
import yaml from 'js-yaml'
|
||||
|
||||
const slotName = process.env.SLOT_NAME || ''
|
||||
const appServiceName = process.env.APP_SERVICE_NAME || ''
|
||||
const resourceGroupName = process.env.RESOURCE_GROUP_NAME || ''
|
||||
const expectedSHA = process.env.EXPECTED_SHA || ''
|
||||
const waitDuration = parseInt(process.env.CHECK_INTERVAL || '', 10) || 10000
|
||||
const maxWaitingTimeSeconds = parseInt(process.env.MAX_WAITING_TIME || '', 10) || 10 * 60 * 1000
|
||||
|
||||
function getBuildSha(slot: string, appService: string, resourceGroup: string) {
|
||||
console.log('Getting Canary App Service Docker config')
|
||||
const t0 = Date.now()
|
||||
let config
|
||||
try {
|
||||
config = JSON.parse(
|
||||
execSync(
|
||||
`az webapp config container show --show-multicontainer-config --slot ${slot} -n ${appService} -g ${resourceGroup}`,
|
||||
{ encoding: 'utf8' },
|
||||
),
|
||||
)
|
||||
} catch {
|
||||
console.log('Error getting the Canary App Service Slot config')
|
||||
return null
|
||||
}
|
||||
|
||||
// The config is an array of objects. One of the objects
|
||||
// contains a copy of the Docker compose configuration file
|
||||
// pushed to the slot (see src/workflows/docker-compose.prod.tmpl.yaml).
|
||||
// The value key contains the stringified YAML file, so we
|
||||
// need to parse it to JSON to extract the image sha.
|
||||
const dockerComposeYaml = config.find(
|
||||
(obj: Record<string, any>) => obj.name === 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
|
||||
).value
|
||||
|
||||
let dockerComposeConfig
|
||||
try {
|
||||
dockerComposeConfig = yaml.load(dockerComposeYaml) as Record<string, any>
|
||||
} catch {
|
||||
console.log('Error loading the YAML configuration data from the Canary App Service Slot config')
|
||||
return null
|
||||
}
|
||||
// The image key looks like this:
|
||||
// `ghdocsprod.azurecr.io/github/docs-internal:d7ee70f225a0f10f293ffdd2d43931acf02c6751`
|
||||
const sha = dockerComposeConfig.services['ghdocs-prod'].image.split(':')[1]
|
||||
console.log(`Fetched Canary App Service Slot configuration}. Took ${Date.now() - t0}ms`)
|
||||
return sha
|
||||
}
|
||||
|
||||
function getStatesForSlot(slot: string, appService: string, resourceGroup: string) {
|
||||
return JSON.parse(
|
||||
execSync(
|
||||
`az webapp list-instances --slot ${slot} --query "[].state" -n ${appService} -g ${resourceGroup}`,
|
||||
{ encoding: 'utf8' },
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
let attempts = 0
|
||||
async function doCheck() {
|
||||
attempts++
|
||||
console.log('Attempt:', attempts)
|
||||
const buildSha = getBuildSha(slotName, appServiceName, resourceGroupName)
|
||||
console.log('Canary build SHA:', buildSha || '*unknown/failed*', 'Expected SHA:', expectedSHA)
|
||||
|
||||
const states = getStatesForSlot(slotName, appServiceName, resourceGroupName)
|
||||
console.log('Instance states:', states)
|
||||
|
||||
const isAllReady = states.every((s: string) => s === 'READY')
|
||||
|
||||
if (buildSha === expectedSHA && isAllReady) {
|
||||
console.log('Got the expected build SHA and all slots are ready! 🚀')
|
||||
return
|
||||
}
|
||||
|
||||
if (attempts * waitDuration > maxWaitingTimeSeconds) {
|
||||
throw new Error(`Giving up after a total of ${(attempts * waitDuration) / 1000} seconds`)
|
||||
}
|
||||
|
||||
console.log(`checking again in ${waitDuration}ms`)
|
||||
setTimeout(doCheck, waitDuration)
|
||||
}
|
||||
|
||||
doCheck()
|
||||
Reference in New Issue
Block a user