diff --git a/.github/workflows/sync-openapi.yml b/.github/workflows/sync-openapi.yml index bbf7a83119..fda313da70 100644 --- a/.github/workflows/sync-openapi.yml +++ b/.github/workflows/sync-openapi.yml @@ -42,6 +42,13 @@ jobs: path: rest-api-description ref: ${{ inputs.SOURCE_BRANCH }} + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + # By default, only the most recent commit of the `main` branch + # will be checked out + repository: github/models-gateway + path: models-gateway + - uses: ./.github/actions/node-npm-setup - name: Sync the REST, Webhooks, and GitHub Apps schemas @@ -49,7 +56,9 @@ jobs: # Needed for gh GITHUB_TOKEN: ${{ secrets.DOCS_BOT_PAT_BASE }} run: | - npm run sync-rest -- --source-repo rest-api-description --output rest github-apps webhooks rest-redirects + npm run sync-rest -- \ + --source-repos rest-api-description models-gateway \ + --output rest github-apps webhooks rest-redirects git status echo "Deleting the cloned github/rest-api-description repo..." rm -rf rest-api-description diff --git a/content/rest/index.md b/content/rest/index.md index 9ff5868af1..0b23678327 100644 --- a/content/rest/index.md +++ b/content/rest/index.md @@ -73,6 +73,7 @@ children: - /meta - /metrics - /migrations + - /models - /oauth-authorizations - /orgs - /packages diff --git a/content/rest/models/catalog.md b/content/rest/models/catalog.md new file mode 100644 index 0000000000..55ae0944c7 --- /dev/null +++ b/content/rest/models/catalog.md @@ -0,0 +1,17 @@ +--- +title: REST API endpoints for models catalog +shortTitle: Catalog +intro: Use the REST API to get a list of models available for use, including details like ID, supported input/output modalities, and rate limits. +versions: # DO NOT MANUALLY EDIT. CHANGES WILL BE OVERWRITTEN BY A š¤ + fpt: '*' +topics: + - API +autogenerated: rest +allowTitleToDifferFromFilename: true +--- + +## About {% data variables.product.prodname_github_models %} catalog + +You can use the REST API to explore available models in the {% data variables.product.prodname_github_models %} catalog. + + diff --git a/content/rest/models/index.md b/content/rest/models/index.md new file mode 100644 index 0000000000..4016dde81a --- /dev/null +++ b/content/rest/models/index.md @@ -0,0 +1,12 @@ +--- +title: Models +topics: + - API +autogenerated: rest +allowTitleToDifferFromFilename: true +children: + - /catalog + - /inference +versions: + fpt: '*' +--- diff --git a/content/rest/models/inference.md b/content/rest/models/inference.md new file mode 100644 index 0000000000..fd7d570368 --- /dev/null +++ b/content/rest/models/inference.md @@ -0,0 +1,24 @@ +--- +title: REST API endpoints for models inference +shortTitle: Inference +intro: Use the REST API to submit a chat completion request to a specified model, with or without organizational attribution. +versions: # DO NOT MANUALLY EDIT. CHANGES WILL BE OVERWRITTEN BY A š¤ + fpt: '*' +topics: + - API +autogenerated: rest +allowTitleToDifferFromFilename: true +--- + +## About {% data variables.product.prodname_github_models %} inference + +You can use the REST API to run inference requests using the {% data variables.product.prodname_github_models %} platform. + +The API supports: + +* Accessing top models from OpenAI, DeepSeek, Microsoft, Llama, and more. +* Running chat-based inference requests with full control over sampling and response parameters. +* Streaming or non-streaming completions. +* Organizational attribution and usage tracking. + + diff --git a/package-lock.json b/package-lock.json index 531ca6e3ac..d1cd2faef7 100644 --- a/package-lock.json +++ b/package-lock.json @@ -32,6 +32,7 @@ "cookie-parser": "^1.4.7", "cuss": "2.2.0", "dayjs": "^1.11.13", + "dereference-json-schema": "^0.2.1", "dotenv": "^16.4.7", "escape-string-regexp": "5.0.0", "express": "4.21.2", @@ -6713,6 +6714,12 @@ "node": ">=6" } }, + "node_modules/dereference-json-schema": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/dereference-json-schema/-/dereference-json-schema-0.2.1.tgz", + "integrity": "sha512-uzJsrg225owJyRQ8FNTPHIuBOdSzIZlHhss9u6W8mp7jJldHqGuLv9cULagP/E26QVJDnjtG8U7Dw139mM1ydA==", + "license": "MIT" + }, "node_modules/destroy": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", diff --git a/package.json b/package.json index aa728bcc00..7b8b238e5d 100644 --- a/package.json +++ b/package.json @@ -266,6 +266,7 @@ "cookie-parser": "^1.4.7", "cuss": "2.2.0", "dayjs": "^1.11.13", + "dereference-json-schema": "^0.2.1", "dotenv": "^16.4.7", "escape-string-regexp": "5.0.0", "express": "4.21.2", diff --git a/src/rest/README.md b/src/rest/README.md index 9078acdea5..8ec595d73b 100644 --- a/src/rest/README.md +++ b/src/rest/README.md @@ -36,7 +36,7 @@ To run the REST pipeline locally: 1. Clone the [`github/rest-api-description`](https://github.com/github/rest-api-description) repository inside your local `docs-internal` repository. 1. Set a `GITHUB_TOKEN` in your `.env` with (classic) `repo` scopes & enable SSO for the github org. -1. Run `npm run sync-rest -- -s rest-api-description -o rest`. Note, by default `-o rest` is specified, so you can omit it. +1. Run `npm run sync-rest -- -s rest-api-description models-gateway -o rest`. Note, by default `-o rest` is specified, so you can omit it. ## About this directory diff --git a/src/rest/components/RestCodeSamples.tsx b/src/rest/components/RestCodeSamples.tsx index 46ea76f516..5f3fb0ae89 100644 --- a/src/rest/components/RestCodeSamples.tsx +++ b/src/rest/components/RestCodeSamples.tsx @@ -76,8 +76,13 @@ export function RestCodeSamples({ operation, slug, heading }: Props) { // Menu options for the language selector const languageSelectOptions: CodeSampleKeys[] = [CodeSampleKeys.curl] - // Management Console and GHES Manage API operations are not supported by Octokit - if (operation.subcategory !== 'management-console' && operation.subcategory !== 'manage-ghes') { + // Management Console, GHES Manage API, and GitHub Models + // operations are not supported by Octokit + if ( + operation.category !== 'models' && + operation.subcategory !== 'management-console' && + operation.subcategory !== 'manage-ghes' + ) { languageSelectOptions.push(CodeSampleKeys.javascript) // Not all examples support the GH CLI language option. If any of diff --git a/src/rest/components/RestOperation.tsx b/src/rest/components/RestOperation.tsx index 6d710d03b6..8f580cdf67 100644 --- a/src/rest/components/RestOperation.tsx +++ b/src/rest/components/RestOperation.tsx @@ -26,6 +26,13 @@ const DEFAULT_ACCEPT_HEADER = { isRequired: false, } +const REQUIRED_CONTENT_TYPE_HEADER = { + name: 'content-type', + type: 'string', + description: `
Setting to application/json is required.
Get a list of models available for use, including details like supported input/output modalities,\npublisher, and rate limits.
", + "statusCodes": [ + { + "httpStatusCode": "200", + "description": "OK
" + } + ] + } + ], + "inference": [ + { + "serverUrl": "https://models.github.ai", + "verb": "post", + "requestPath": "/orgs/{org}/inference/chat/completions", + "title": "Run an inference request attributed to an organization", + "category": "models", + "subcategory": "inference", + "parameters": [ + { + "in": "query", + "required": false, + "name": "api-version", + "description": "The API version to use. Optional, but required for some features.
", + "schema": { + "type": "string" + }, + "example": "2024-05-01-preview" + }, + { + "in": "path", + "name": "org", + "required": true, + "description": "The organization login associated with the organization to which the request is to be attributed.
", + "schema": { + "type": "string" + } + } + ], + "bodyParameters": [ + { + "type": "string", + "name": "model", + "in": "body", + "description": "ID of the specific model to use for the request. The model ID should be in the format of {publisher}/{model_name} where \"openai/gpt-4.1\" is an example of a model ID. You can find supported models in the catalog/models endpoint.
", + "isRequired": true + }, + { + "type": "array of objects", + "name": "messages", + "in": "body", + "description": "The collection of context messages associated with this chat completion request. Typical usage begins with a chat message for the System role that provides instructions for the behavior of the assistant, followed by alternating messages between the User and Assistant roles.
", + "isRequired": true, + "childParamsGroups": [ + { + "type": "string", + "name": "role", + "description": "The chat role associated with this message
", + "isRequired": true, + "enum": [ + "assistant", + "developer", + "system", + "user" + ] + }, + { + "type": "string", + "name": "content", + "description": "The content of the message
", + "isRequired": true + } + ] + }, + { + "type": "number", + "name": "frequency_penalty", + "in": "body", + "description": "A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and decrease the likelihood of the model repeating the same statements verbatim. Supported range is [-2, 2].
" + }, + { + "type": "integer", + "name": "max_tokens", + "in": "body", + "description": "The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length. For example, if your prompt is 100 tokens and you set max_tokens to 50, the API will return a completion with a maximum of 50 tokens.
" + }, + { + "type": "array of strings", + "name": "modalities", + "in": "body", + "description": "The modalities that the model is allowed to use for the chat completions response. The default modality is text. Indicating an unsupported modality combination results in a 422 error.\nSupported values are: text, audio
A value that influences the probability of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase the model's likelihood to output new tokens. Supported range is [-2, 2].
" + }, + { + "type": "object", + "name": "response_format", + "in": "body", + "description": "The desired format for the response.
", + "childParamsGroups": [ + { + "type": "object", + "name": "Object", + "description": "", + "childParamsGroups": [ + { + "type": "string", + "name": "type", + "description": "", + "enum": [ + "text", + "json_object" + ] + } + ] + }, + { + "type": "object", + "name": "Schema for structured JSON response", + "description": "", + "isRequired": [ + "type", + "json_schema" + ], + "childParamsGroups": [ + { + "type": "string", + "name": "type", + "description": "The type of the response.
", + "isRequired": true, + "enum": [ + "json_schema" + ] + }, + { + "type": "object", + "name": "json_schema", + "description": "The JSON schema for the response.
", + "isRequired": true + } + ] + } + ], + "oneOfObject": true + }, + { + "type": "integer", + "name": "seed", + "in": "body", + "description": "If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
" + }, + { + "type": "boolean", + "name": "stream", + "in": "body", + "description": "A value indicating whether chat completions should be streamed for this request.
", + "default": false + }, + { + "type": "object", + "name": "stream_options", + "in": "body", + "description": "Whether to include usage information in the response. Requires stream to be set to true.
", + "childParamsGroups": [ + { + "type": "boolean", + "name": "include_usage", + "description": "Whether to include usage information in the response.
", + "default": false + } + ] + }, + { + "type": "array of strings", + "name": "stop", + "in": "body", + "description": "A collection of textual sequences that will end completion generation.
" + }, + { + "type": "number", + "name": "temperature", + "in": "body", + "description": "The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completion request as the interaction of these two settings is difficult to predict. Supported range is [0, 1]. Decimal values are supported.
" + }, + { + "type": "string", + "name": "tool_choice", + "in": "body", + "description": "If specified, the model will configure which of the provided tools it can use for the chat completions response.
", + "enum": [ + "auto", + "required", + "none" + ] + }, + { + "type": "array of objects", + "name": "tools", + "in": "body", + "description": "A list of tools the model may request to call. Currently, only functions are supported as a tool. The model may respond with a function call request and provide the input arguments in JSON format for that function.
", + "childParamsGroups": [ + { + "type": "object", + "name": "function", + "description": "", + "childParamsGroups": [ + { + "type": "string", + "name": "name", + "description": "The name of the function to be called.
" + }, + { + "type": "string", + "name": "description", + "description": "A description of what the function does. The model will use this description when selecting the function and interpreting its parameters.
" + }, + { + "type": "", + "name": "parameters", + "description": "The parameters the function accepts, described as a JSON Schema object.
" + } + ] + }, + { + "type": "string", + "name": "type", + "description": "", + "enum": [ + "function" + ] + } + ] + }, + { + "type": "number", + "name": "top_p", + "in": "body", + "description": "An alternative to sampling with temperature called nucleus sampling. This value causes the model to consider the results of tokens with the provided probability mass. As an example, a value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same request as the interaction of these two settings is difficult to predict. Supported range is [0, 1]. Decimal values are supported.
" + } + ], + "codeExamples": [ + { + "key": "default", + "request": { + "contentType": "application/json", + "description": "Example", + "acceptHeader": "application/vnd.github.v3+json", + "bodyParameters": { + "model": "openai/gpt-4.1", + "messages": [ + { + "role": "user", + "content": "What is the capital of France?" + } + ] + }, + "parameters": { + "org": "ORG" + } + }, + "response": { + "statusCode": "200", + "contentType": "application/json", + "description": "", + "example": { + "choices": [ + { + "message": { + "content": "The capital of France is Paris.", + "role": "assistant" + } + } + ] + }, + "schema": { + "type": "object", + "oneOf": [ + { + "title": "Non Streaming Response", + "description": "A non-streaming response for the inference request.", + "type": "object", + "properties": { + "choices": { + "type": "array", + "items": { + "type": "object", + "properties": { + "message": { + "description": "The message associated with the completion.", + "type": "object", + "properties": { + "content": { + "description": "The content of the message.", + "type": "string" + }, + "role": { + "description": "The role of the message.", + "type": "string" + } + } + } + } + } + } + } + }, + { + "title": "Streaming Response", + "description": "A streaming response for the inference request", + "type": "object", + "properties": { + "data": { + "description": "Some details about the response.", + "type": "object", + "properties": { + "choices": { + "type": "array", + "items": { + "type": "object", + "properties": { + "delta": { + "description": "Container for the content of the streamed response.", + "type": "object", + "properties": { + "content": { + "description": "The content of the streamed response.", + "type": "string" + } + } + } + } + } + } + } + } + } + } + ] + } + } + } + ], + "previews": [], + "descriptionHTML": "This endpoint allows you to run an inference request attributed to a specific organization. You must be a member of the organization to use this endpoint.\nThe request body should contain the model ID and the messages for the chat completion request.\nThe response will include either a non-streaming or streaming response based on the request parameters.
", + "statusCodes": [ + { + "httpStatusCode": "200", + "description": "OK
" + } + ] + }, + { + "serverUrl": "https://models.github.ai", + "verb": "post", + "requestPath": "/inference/chat/completions", + "title": "Run an inference request", + "category": "models", + "subcategory": "inference", + "parameters": [ + { + "in": "query", + "required": false, + "name": "api-version", + "description": "The API version to use. Optional, but required for some features.
", + "schema": { + "type": "string" + }, + "example": "2024-05-01-preview" + } + ], + "bodyParameters": [ + { + "type": "string", + "name": "model", + "in": "body", + "description": "ID of the specific model to use for the request. The model ID should be in the format of {publisher}/{model_name} where \"openai/gpt-4.1\" is an example of a model ID. You can find supported models in the catalog/models endpoint.
", + "isRequired": true + }, + { + "type": "array of objects", + "name": "messages", + "in": "body", + "description": "The collection of context messages associated with this chat completion request. Typical usage begins with a chat message for the System role that provides instructions for the behavior of the assistant, followed by alternating messages between the User and Assistant roles.
", + "isRequired": true, + "childParamsGroups": [ + { + "type": "string", + "name": "role", + "description": "The chat role associated with this message
", + "isRequired": true, + "enum": [ + "assistant", + "developer", + "system", + "user" + ] + }, + { + "type": "string", + "name": "content", + "description": "The content of the message
", + "isRequired": true + } + ] + }, + { + "type": "number", + "name": "frequency_penalty", + "in": "body", + "description": "A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated text. Positive values will make tokens less likely to appear as their frequency increases and decrease the likelihood of the model repeating the same statements verbatim. Supported range is [-2, 2].
" + }, + { + "type": "integer", + "name": "max_tokens", + "in": "body", + "description": "The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length. For example, if your prompt is 100 tokens and you set max_tokens to 50, the API will return a completion with a maximum of 50 tokens.
" + }, + { + "type": "array of strings", + "name": "modalities", + "in": "body", + "description": "The modalities that the model is allowed to use for the chat completions response. The default modality is text. Indicating an unsupported modality combination results in a 422 error.\nSupported values are: text, audio
A value that influences the probability of generated tokens appearing based on their existing presence in generated text. Positive values will make tokens less likely to appear when they already exist and increase the model's likelihood to output new tokens. Supported range is [-2, 2].
" + }, + { + "type": "object", + "name": "response_format", + "in": "body", + "description": "The desired format for the response.
", + "childParamsGroups": [ + { + "type": "object", + "name": "Object", + "description": "", + "childParamsGroups": [ + { + "type": "string", + "name": "type", + "description": "", + "enum": [ + "text", + "json_object" + ] + } + ] + }, + { + "type": "object", + "name": "Schema for structured JSON response", + "description": "", + "isRequired": [ + "type", + "json_schema" + ], + "childParamsGroups": [ + { + "type": "string", + "name": "type", + "description": "The type of the response.
", + "isRequired": true, + "enum": [ + "json_schema" + ] + }, + { + "type": "object", + "name": "json_schema", + "description": "The JSON schema for the response.
", + "isRequired": true + } + ] + } + ], + "oneOfObject": true + }, + { + "type": "integer", + "name": "seed", + "in": "body", + "description": "If specified, the system will make a best effort to sample deterministically such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed.
" + }, + { + "type": "boolean", + "name": "stream", + "in": "body", + "description": "A value indicating whether chat completions should be streamed for this request.
", + "default": false + }, + { + "type": "object", + "name": "stream_options", + "in": "body", + "description": "Whether to include usage information in the response. Requires stream to be set to true.
", + "childParamsGroups": [ + { + "type": "boolean", + "name": "include_usage", + "description": "Whether to include usage information in the response.
", + "default": false + } + ] + }, + { + "type": "array of strings", + "name": "stop", + "in": "body", + "description": "A collection of textual sequences that will end completion generation.
" + }, + { + "type": "number", + "name": "temperature", + "in": "body", + "description": "The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completion request as the interaction of these two settings is difficult to predict. Supported range is [0, 1]. Decimal values are supported.
" + }, + { + "type": "string", + "name": "tool_choice", + "in": "body", + "description": "If specified, the model will configure which of the provided tools it can use for the chat completions response.
", + "enum": [ + "auto", + "required", + "none" + ] + }, + { + "type": "array of objects", + "name": "tools", + "in": "body", + "description": "A list of tools the model may request to call. Currently, only functions are supported as a tool. The model may respond with a function call request and provide the input arguments in JSON format for that function.
", + "childParamsGroups": [ + { + "type": "object", + "name": "function", + "description": "", + "childParamsGroups": [ + { + "type": "string", + "name": "name", + "description": "The name of the function to be called.
" + }, + { + "type": "string", + "name": "description", + "description": "A description of what the function does. The model will use this description when selecting the function and interpreting its parameters.
" + }, + { + "type": "", + "name": "parameters", + "description": "The parameters the function accepts, described as a JSON Schema object.
" + } + ] + }, + { + "type": "string", + "name": "type", + "description": "", + "enum": [ + "function" + ] + } + ] + }, + { + "type": "number", + "name": "top_p", + "in": "body", + "description": "An alternative to sampling with temperature called nucleus sampling. This value causes the model to consider the results of tokens with the provided probability mass. As an example, a value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify temperature and top_p for the same request as the interaction of these two settings is difficult to predict. Supported range is [0, 1]. Decimal values are supported.
" + } + ], + "codeExamples": [ + { + "key": "default", + "request": { + "contentType": "application/json", + "description": "Example", + "acceptHeader": "application/vnd.github.v3+json", + "bodyParameters": { + "model": "openai/gpt-4.1", + "messages": [ + { + "role": "user", + "content": "What is the capital of France?" + } + ] + } + }, + "response": { + "statusCode": "200", + "contentType": "application/json", + "description": "", + "example": { + "choices": [ + { + "message": { + "content": "The capital of France is Paris.", + "role": "assistant" + } + } + ] + }, + "schema": { + "type": "object", + "oneOf": [ + { + "title": "Non Streaming Response", + "description": "A non-streaming response for the inference request.", + "type": "object", + "properties": { + "choices": { + "type": "array", + "items": { + "type": "object", + "properties": { + "message": { + "description": "The message associated with the completion.", + "type": "object", + "properties": { + "content": { + "description": "The content of the message.", + "type": "string" + }, + "role": { + "description": "The role of the message.", + "type": "string" + } + } + } + } + } + } + } + }, + { + "title": "Streaming Response", + "description": "A streaming response for the inference request", + "type": "object", + "properties": { + "data": { + "description": "Some details about the response.", + "type": "object", + "properties": { + "choices": { + "type": "array", + "items": { + "type": "object", + "properties": { + "delta": { + "description": "Container for the content of the streamed response.", + "type": "object", + "properties": { + "content": { + "description": "The content of the streamed response.", + "type": "string" + } + } + } + } + } + } + } + } + } + } + ] + } + } + } + ], + "previews": [], + "descriptionHTML": "This endpoint allows you to run an inference request.
", + "statusCodes": [ + { + "httpStatusCode": "200", + "description": "OK
" + } + ] + } + ] + }, "orgs": { "orgs": [ { @@ -497689,8 +498476,8 @@ } } ], - "descriptionHTML": "", "previews": [], + "descriptionHTML": "", "statusCodes": [ { "httpStatusCode": "204", @@ -498071,8 +498858,8 @@ } } ], - "descriptionHTML": "", "previews": [], + "descriptionHTML": "", "statusCodes": [ { "httpStatusCode": "200", @@ -516103,8 +516890,8 @@ } } ], - "descriptionHTML": "", "previews": [], + "descriptionHTML": "", "statusCodes": [ { "httpStatusCode": "200", @@ -516528,8 +517315,8 @@ } } ], - "descriptionHTML": "", "previews": [], + "descriptionHTML": "", "statusCodes": [ { "httpStatusCode": "200", @@ -516638,8 +517425,8 @@ } } ], - "descriptionHTML": "", "previews": [], + "descriptionHTML": "", "statusCodes": [ { "httpStatusCode": "200", @@ -536484,8 +537271,8 @@ } } ], - "descriptionHTML": "", "previews": [], + "descriptionHTML": "", "statusCodes": [ { "httpStatusCode": "200", @@ -621744,8 +622531,8 @@ } } ], - "descriptionHTML": "", "previews": [], + "descriptionHTML": "", "statusCodes": [ { "httpStatusCode": "204", @@ -622494,8 +623281,8 @@ } } ], - "descriptionHTML": "", "previews": [], + "descriptionHTML": "", "statusCodes": [ { "httpStatusCode": "204", diff --git a/src/rest/scripts/update-files.ts b/src/rest/scripts/update-files.ts index 5d0ea3643e..67d20cd6bb 100755 --- a/src/rest/scripts/update-files.ts +++ b/src/rest/scripts/update-files.ts @@ -21,6 +21,7 @@ import { allVersions } from '@/versions/lib/all-versions' import { syncWebhookData } from '../../webhooks/scripts/sync' import { syncGitHubAppsData } from '../../github-apps/scripts/sync' import { syncRestRedirects } from './utils/get-redirects' +import { MODELS_GATEWAY_ROOT, injectModelsSchema } from './utils/inject-models-schema' const __dirname = path.dirname(fileURLToPath(import.meta.url)) const TEMP_OPENAPI_DIR = path.join(__dirname, '../../../rest-api-description/openApiTemp') @@ -45,10 +46,10 @@ program ) .addOption( new Option( - '-s, --source-repo