Move delay for Heroku Preboot to before marking deployment successful (#21736)
* Move delay for Heroku Preboot from post-deploy to pre-success * Shorten arbitrary delay before first Fastly purge
This commit is contained in:
@@ -6,6 +6,7 @@ import { setOutput } from '@actions/core'
|
||||
|
||||
const SLEEP_INTERVAL = 5000
|
||||
const HEROKU_LOG_LINES_TO_SHOW = 25
|
||||
const DELAY_FOR_PREBOOT_SWAP = 135000 // 2:15
|
||||
|
||||
// Allow for a few 404 (Not Found) or 429 (Too Many Requests) responses from the
|
||||
// semi-unreliable Heroku API when we're polling for status updates
|
||||
@@ -13,6 +14,7 @@ const ALLOWED_MISSING_RESPONSE_COUNT = 5
|
||||
|
||||
export default async function deployToProduction({
|
||||
octokit,
|
||||
includeDelayForPreboot = true,
|
||||
// These parameters will only be set by Actions
|
||||
sourceBlobUrl = null,
|
||||
runId = null,
|
||||
@@ -332,11 +334,20 @@ export default async function deployToProduction({
|
||||
)} seconds.`
|
||||
)
|
||||
|
||||
//
|
||||
// TODO:
|
||||
// Should we consider adding an explicit 2-minute pause here to allow for
|
||||
// Heroku Preboot to actually swap in the new dynos?
|
||||
//
|
||||
// IMPORTANT:
|
||||
// If Heroku Preboot is enabled, then there is an additional delay of at
|
||||
// least 2 minutes before the new dynos are swapped into active serving.
|
||||
// If we move off Heroku in the future, this should be revisited and
|
||||
// updated/removed as relevant to align with the new hosting platform.
|
||||
if (includeDelayForPreboot) {
|
||||
console.log(`Waiting for Heroku Preboot to swap dynos (${DELAY_FOR_PREBOOT_SWAP} ms)...`)
|
||||
await sleep(DELAY_FOR_PREBOOT_SWAP)
|
||||
|
||||
// TODO:
|
||||
// Is there a faster alternative than this arbitrary delay? For example,
|
||||
// is there some Heroku API we can query to see when this release is
|
||||
// considered to be the live one, or when the old dynos are shut down?
|
||||
}
|
||||
|
||||
// Report success!
|
||||
const successMessage = `Deployment succeeded after ${Math.round(
|
||||
|
||||
@@ -2,7 +2,6 @@ import sleep from 'await-sleep'
|
||||
import got from 'got'
|
||||
|
||||
const ONE_SECOND = 1000
|
||||
const ONE_MINUTE = 60 * ONE_SECOND
|
||||
|
||||
async function purgeFastlyBySurrogateKey({ apiToken, serviceId, surrogateKey }) {
|
||||
const key = surrogateKey
|
||||
@@ -17,17 +16,10 @@ async function purgeFastlyBySurrogateKey({ apiToken, serviceId, surrogateKey })
|
||||
return got.post(requestPath, { headers, json: true })
|
||||
}
|
||||
|
||||
// This delay (includeDelayForPreboot) can potentially be removed in the
|
||||
// future if the deployment workflow is updated to include a delay to offset
|
||||
// Heroku Preboot before this script runs.
|
||||
export default async function purgeEdgeCache({ includeDelayForPreboot = true } = {}) {
|
||||
// If Heroku Preboot is enabled, then there is an additional delay of at
|
||||
// least 2 minutes before the new dynos are swapped into active serving.
|
||||
const delayForPrebootSwap = 2 * ONE_MINUTE + 30 * ONE_SECOND
|
||||
|
||||
export default async function purgeEdgeCache() {
|
||||
// Give the app some extra time to wake up before the thundering herd of
|
||||
// Fastly requests.
|
||||
const delayBeforeFirstPurge = ONE_MINUTE
|
||||
const delayBeforeFirstPurge = 30 * ONE_SECOND
|
||||
|
||||
// Evidence has shown that it's necessary to purge twice to ensure all
|
||||
// customers see fresh content.
|
||||
@@ -47,11 +39,6 @@ export default async function purgeEdgeCache({ includeDelayForPreboot = true } =
|
||||
surrogateKey: FASTLY_SURROGATE_KEY,
|
||||
}
|
||||
|
||||
if (includeDelayForPreboot) {
|
||||
console.log('Waiting for Heroku Preboot to swap dynos...')
|
||||
await sleep(delayForPrebootSwap)
|
||||
}
|
||||
|
||||
console.log('Waiting extra time to prevent a Thundering Herd problem...')
|
||||
await sleep(delayBeforeFirstPurge)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user