diff --git a/desktop/src/app/components/auth/auth.i18n.yml b/desktop/src/app/components/auth/auth.i18n.yml
index f40a44f391..61beaf0d28 100644
--- a/desktop/src/app/components/auth/auth.i18n.yml
+++ b/desktop/src/app/components/auth/auth.i18n.yml
@@ -6,3 +6,6 @@ auth:
label: Sign in
cancel-button:
label: Cancel
+ encryption-unavailable:
+ title: Secure Storage Unavailable
+ message: Secure credential storage is unavailable. You will need to sign in each time you start the application. On Linux, install libsecret to enable credential storage.
diff --git a/desktop/src/app/components/welcome/welcome.component.ts b/desktop/src/app/components/welcome/welcome.component.ts
index 134cb3a420..3c743cc6d1 100644
--- a/desktop/src/app/components/welcome/welcome.component.ts
+++ b/desktop/src/app/components/welcome/welcome.component.ts
@@ -1,5 +1,5 @@
-import { Component } from "@angular/core";
-import { AuthService, NavigatorService } from "app/services";
+import { Component, OnInit } from "@angular/core";
+import { AuthService, NavigatorService, SafeStorageService } from "app/services";
import { autobind } from "@batch-flask/core";
import { first } from "rxjs/operators";
@@ -7,16 +7,29 @@ import { first } from "rxjs/operators";
selector: "be-welcome",
templateUrl: "./welcome.html",
})
-export class WelcomeComponent {
+export class WelcomeComponent implements OnInit {
+ public encryptionUnavailable = false;
+
public static breadcrumb() {
return { name: "Home" };
}
constructor(
private authService: AuthService,
- private navigationService: NavigatorService
+ private navigationService: NavigatorService,
+ private safeStorage: SafeStorageService
) {}
+ public async ngOnInit() {
+ // Check if encryption is available
+ try {
+ this.encryptionUnavailable = !(await this.safeStorage.isEncryptionAvailable());
+ } catch (error) {
+ console.warn("Failed to check encryption availability:", error);
+ this.encryptionUnavailable = true;
+ }
+ }
+
@autobind()
public async signIn() {
await this.authService.login();
diff --git a/desktop/src/app/components/welcome/welcome.html b/desktop/src/app/components/welcome/welcome.html
index 64bcc2feee..959cef9e44 100644
--- a/desktop/src/app/components/welcome/welcome.html
+++ b/desktop/src/app/components/welcome/welcome.html
@@ -1,6 +1,12 @@
{{"common.welcome" | i18n}}
+
+
+
{{"auth.encryption-unavailable.title" | i18n}}
+
{{"auth.encryption-unavailable.message" | i18n}}
+
+
diff --git a/desktop/src/app/services/azure-batch/certificate/certificate.service.ts b/desktop/src/app/services/azure-batch/certificate/certificate.service.ts
index 927919775c..79a6f927f0 100644
--- a/desktop/src/app/services/azure-batch/certificate/certificate.service.ts
+++ b/desktop/src/app/services/azure-batch/certificate/certificate.service.ts
@@ -210,7 +210,7 @@ export class CertificateService {
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore start is not in the @types/node-forge
md.start();
- md.update(certDer);
+ md.update(certDer); // CodeQL [SM01510] Used only to generate identification thumbprint, not for security purposes
const digest = md.digest();
return digest.toHex();
}
diff --git a/desktop/src/app/services/index.ts b/desktop/src/app/services/index.ts
index baaca3fed9..7f0fea8e69 100644
--- a/desktop/src/app/services/index.ts
+++ b/desktop/src/app/services/index.ts
@@ -33,3 +33,4 @@ export * from "./themes";
export * from "./network";
export * from "./user-configuration";
export * from "./version";
+export * from "./safe-storage.service";
diff --git a/desktop/src/app/services/safe-storage.service.spec.ts b/desktop/src/app/services/safe-storage.service.spec.ts
new file mode 100644
index 0000000000..40456821cb
--- /dev/null
+++ b/desktop/src/app/services/safe-storage.service.spec.ts
@@ -0,0 +1,55 @@
+import { SafeStorageService } from "./safe-storage.service";
+import { IpcEvent } from "common/constants";
+
+describe("SafeStorageService", () => {
+ let service: SafeStorageService;
+ let ipcSpy;
+
+ beforeEach(() => {
+ ipcSpy = {
+ send: jasmine.createSpy("send").and.returnValue(Promise.resolve())
+ };
+
+ service = new SafeStorageService(ipcSpy);
+ });
+
+ it("should call IPC send for setPassword", async () => {
+ await service.setPassword("testService", "testAccount", "testPassword");
+
+ expect(ipcSpy.send).toHaveBeenCalledWith(IpcEvent.safeStorage.setPassword, {
+ key: "testService:testAccount",
+ password: "testPassword"
+ });
+ });
+
+ it("should call IPC send for getPassword", async () => {
+ ipcSpy.send.and.returnValue(Promise.resolve("retrievedPassword"));
+
+ const result = await service.getPassword("testService", "testAccount");
+
+ expect(ipcSpy.send).toHaveBeenCalledWith(IpcEvent.safeStorage.getPassword, {
+ key: "testService:testAccount"
+ });
+ expect(result).toBe("retrievedPassword");
+ });
+
+ it("should call IPC send for deletePassword", async () => {
+ ipcSpy.send.and.returnValue(Promise.resolve(true));
+
+ const result = await service.deletePassword("testService", "testAccount");
+
+ expect(ipcSpy.send).toHaveBeenCalledWith(IpcEvent.safeStorage.deletePassword, {
+ key: "testService:testAccount"
+ });
+ expect(result).toBe(true);
+ });
+
+ it("should call IPC send for isEncryptionAvailable", async () => {
+ ipcSpy.send.and.returnValue(Promise.resolve(true));
+
+ const result = await service.isEncryptionAvailable();
+
+ expect(ipcSpy.send).toHaveBeenCalledWith(IpcEvent.safeStorage.isEncryptionAvailable);
+ expect(result).toBe(true);
+ });
+});
\ No newline at end of file
diff --git a/desktop/src/app/services/safe-storage.service.ts b/desktop/src/app/services/safe-storage.service.ts
new file mode 100644
index 0000000000..25c68cdec6
--- /dev/null
+++ b/desktop/src/app/services/safe-storage.service.ts
@@ -0,0 +1,52 @@
+import { Injectable } from "@angular/core";
+import { IpcService } from "@batch-flask/electron";
+import { IpcEvent } from "common/constants";
+
+/**
+ * Service wrapping Electron's safeStorage API for secure credential storage.
+ * This service is for use in the renderer process and communicates with the main process via IPC.
+ */
+@Injectable({ providedIn: "root" })
+export class SafeStorageService {
+ constructor(private ipc: IpcService) {}
+
+ /**
+ * Store a password securely using Electron's safeStorage API
+ * @param service Service name (equivalent to keytar service)
+ * @param account Account name (equivalent to keytar account)
+ * @param password Password to store
+ */
+ public async setPassword(service: string, account: string, password: string): Promise {
+ const key = `${service}:${account}`;
+ return this.ipc.send(IpcEvent.safeStorage.setPassword, { key, password });
+ }
+
+ /**
+ * Retrieve a password securely using Electron's safeStorage API
+ * @param service Service name (equivalent to keytar service)
+ * @param account Account name (equivalent to keytar account)
+ * @returns Promise resolving to password or null if not found
+ */
+ public async getPassword(service: string, account: string): Promise {
+ const key = `${service}:${account}`;
+ return this.ipc.send(IpcEvent.safeStorage.getPassword, { key });
+ }
+
+ /**
+ * Delete a password from secure storage
+ * @param service Service name
+ * @param account Account name
+ */
+ public async deletePassword(service: string, account: string): Promise {
+ const key = `${service}:${account}`;
+ return this.ipc.send(IpcEvent.safeStorage.deletePassword, { key });
+ }
+
+ /**
+ * Check if encryption is available
+ * Important on Linux where safeStorage may fall back to unencrypted storage
+ */
+ public async isEncryptionAvailable(): Promise {
+ return this.ipc.send(IpcEvent.safeStorage.isEncryptionAvailable);
+ }
+}
diff --git a/desktop/src/app/utils/storage-utils.ts b/desktop/src/app/utils/storage-utils.ts
index 56f4a7f74a..15c28a38e2 100644
--- a/desktop/src/app/utils/storage-utils.ts
+++ b/desktop/src/app/utils/storage-utils.ts
@@ -107,7 +107,7 @@ export class StorageUtils {
private static async _getJobIdHash(jobId: string): Promise {
const jobIdBytes = new TextEncoder().encode(jobId);
- const hash = await crypto.subtle.digest("SHA-1", jobIdBytes);
+ const hash = await crypto.subtle.digest("SHA-1", jobIdBytes); // CodeQL [SM04514] SHA-1 is only used for generating unique identifiers, not for security purposes
return this._hex(hash);
}
diff --git a/desktop/src/client/client.module.ts b/desktop/src/client/client.module.ts
index 5575bb31f8..fd08438260 100644
--- a/desktop/src/client/client.module.ts
+++ b/desktop/src/client/client.module.ts
@@ -24,6 +24,7 @@ import { LocalDataStore } from "./core/local-data-store";
import { BatchExplorerProperties } from "./core/properties";
import { ClientTelemetryModule } from "./core/telemetry";
import { TerminalService } from "./core/terminal";
+import { SafeStorageMainService } from "./core/secure-data-store/safe-storage-main.service";
import { MenuModule } from "./menu/menu.module";
import { ProxySettingsManager } from "./proxy";
@@ -32,6 +33,7 @@ import { ProxySettingsManager } from "./proxy";
*/
const servicesToInitialize = [
TerminalService,
+ SafeStorageMainService,
];
// make sure that the services are created on app start
diff --git a/desktop/src/client/core/batch-explorer-application.ts b/desktop/src/client/core/batch-explorer-application.ts
index 54cd9292a3..a609919fda 100644
--- a/desktop/src/client/core/batch-explorer-application.ts
+++ b/desktop/src/client/core/batch-explorer-application.ts
@@ -24,6 +24,8 @@ import { AADService, AuthenticationWindow } from "./aad";
import { BatchExplorerInitializer } from "./batch-explorer-initializer";
import { MainWindowManager } from "./main-window-manager";
import { StorageBlobAdapter } from "./storage";
+import { SafeStorageMainService } from "./secure-data-store/safe-storage-main.service";
+import { SecureDataStore } from "./secure-data-store/secure-data-store";
import { filter, first, map } from "rxjs/operators";
const osName = `${os.platform()}-${os.arch()}/${os.release()}`;
@@ -61,6 +63,7 @@ export class BatchExplorerApplication {
private telemetryService: TelemetryService,
private telemetryManager: TelemetryManager,
private storageBlobAdapter: StorageBlobAdapter,
+ private safeStorageMainService: SafeStorageMainService,
configurationStore: UserConfigurationService
) {
this.windows = new MainWindowManager(this, this.telemetryManager);
@@ -81,7 +84,10 @@ export class BatchExplorerApplication {
this.proxySettings = this.injector.get(ProxySettingsManager);
this.ipcMain.init();
+ this.safeStorageMainService.init();
await this.aadService.init();
+
+ await this._checkForLegacyCredentials();
this._registerProtocol();
this._setupProcessEvents();
this._registerFileProtocol();
@@ -365,4 +371,12 @@ export class BatchExplorerApplication {
callback({ cancel: false, requestHeaders: details.requestHeaders });
});
}
-}
+
+ private async _checkForLegacyCredentials() {
+ const secureDataStore = this.injector.get(SecureDataStore);
+ if (await secureDataStore.legacyDataDetected()) {
+ log.warn("Logging out to clear legacy encrypted credentials.");
+ await this.aadService.logout();
+ }
+ }
+}
\ No newline at end of file
diff --git a/desktop/src/client/core/secure-data-store/crypto-service.spec.ts b/desktop/src/client/core/secure-data-store/crypto-service.spec.ts
index 3e6eda1434..7601106b41 100644
--- a/desktop/src/client/core/secure-data-store/crypto-service.spec.ts
+++ b/desktop/src/client/core/secure-data-store/crypto-service.spec.ts
@@ -1,37 +1,43 @@
-import { CryptoService } from "./crypto-service";
+import { CryptoService, UnsupportedEncryptionVersionError } from "./crypto-service";
describe("CryptoService", () => {
let service: CryptoService;
let keytarSpy;
- let masterKey: string | null = null;
+ let masterKeys: { [key: string]: string | null } = {};
beforeEach(() => {
- // Fake testing key needs to be 32 characters long
- masterKey = "------fake-key-for-testing------";
+ // Current key is 64 chars (32 bytes hex)
+ masterKeys = {
+ "BatchExplorer:master-v2": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
+ };
+
keytarSpy = {
- setPassword: jasmine.createSpy("setPassword").and.callFake((x) => {
- masterKey = x;
- return Promise.resolve();
+ setPassword: jasmine.createSpy("setPassword").and.callFake(async (service, account, password) => {
+ const key = `${service}:${account}`;
+ masterKeys[key] = password;
+ }),
+ getPassword: jasmine.createSpy("getPassword").and.callFake(async (service, account) => {
+ const key = `${service}:${account}`;
+ return masterKeys[key] || null;
}),
- getPassword: jasmine.createSpy("getPassword").and.callFake(() => Promise.resolve(masterKey)),
};
service = new CryptoService(keytarSpy);
});
- it("sets a new master password if not is found", async () => {
- masterKey = null;
+ it("sets a new master password if not found", async () => {
+ masterKeys["BatchExplorer:master-v2"] = null;
service = new CryptoService(keytarSpy);
- const key = await (service as any)._masterKey;
+ const key = await (service as any)._ensureMasterKey();
expect(key).not.toBeFalsy();
- expect(key!.length).toBe(32);
+ expect(key.length).toBe(64);
});
it("retrieve the master password", async () => {
- const key = await (service as any)._masterKey;
+ const key = await (service as any)._ensureMasterKey();
expect(key).not.toBeFalsy();
- expect(key!.length).toBe(32);
+ expect(key.length).toBe(64);
});
it("encrypt and decrypt a string correctly", async () => {
@@ -41,4 +47,23 @@ describe("CryptoService", () => {
const decrypted = await service.decrypt(encrypted);
expect(decrypted).toEqual(message);
});
+
+ it("rejects legacy encrypted data", async () => {
+ // Simulate legacy format: no version header, just [iv(16)] + [encrypted_data]
+ const crypto = require("crypto");
+ const legacyKeyHex = "0123456789abcdef0123456789abcdef"; // 32-char hex string
+
+ // Create legacy encrypted data that's long enough to pass length check
+ const message = "legacy message that needs to be long enough for the minimum length";
+ let iv: Buffer;
+ do {
+ iv = crypto.randomBytes(16);
+ } while (iv[0] <= 10);
+
+ const cipher = crypto.createCipheriv("aes-256-ctr", legacyKeyHex, iv);
+ const legacyEncrypted = Buffer.concat([iv, cipher.update(Buffer.from(message)), cipher.final()]);
+
+ await expectAsync(service.decrypt(legacyEncrypted.toString("base64")))
+ .toBeRejectedWithError(/Unsupported encryption version.*Please re-authenticate/);
+ });
});
diff --git a/desktop/src/client/core/secure-data-store/crypto-service.ts b/desktop/src/client/core/secure-data-store/crypto-service.ts
index fb5fc6fa40..4ecda859b1 100644
--- a/desktop/src/client/core/secure-data-store/crypto-service.ts
+++ b/desktop/src/client/core/secure-data-store/crypto-service.ts
@@ -6,7 +6,7 @@ import { KeytarService } from "./keytar.service";
* Keytar service and key to save the master key
*/
const BATCH_APPLICATION = "BatchExplorer";
-const KEYTAR_KEY = "master";
+const KEYTAR_KEY = "master-v2";
/**
* Length of the initialization vector
@@ -14,19 +14,49 @@ const KEYTAR_KEY = "master";
const IV_BYTES = 16;
/**
- * Algorithm to use when encrypting
+ * Current encryption algorithm: AES-256-GCM with 32-byte key
*/
-const ENCRYPT_ALGORITHM = "aes-256-ctr";
+const ALGORITHM = "aes-256-gcm";
+const ALGORITHM_VERSION = 2;
+
+/**
+ * GCM tag length for authenticated encryption
+ */
+const GCM_TAG_LENGTH = 16;
+
+/**
+ * Version header length (1 byte)
+ */
+const VERSION_HEADER_LENGTH = 1;
+
+/**
+ * Key size in bytes (32 bytes = 256 bits)
+ */
+const KEY_BYTES = 32;
// What encoding to use when converting buffer to string
const DEFAULT_STRING_ENCODING = "base64";
+export class UnsupportedEncryptionVersionError extends Error {
+ constructor(version: number) {
+ super(`Unsupported encryption version: ${version}. Please re-authenticate.`);
+ this.name = "UnsupportedEncryptionVersionError";
+ }
+}
+
@Injectable({ providedIn: "root" })
export class CryptoService {
- private _masterKey: Promise;
+ private _masterKey: Promise | null = null;
constructor(private keytar: KeytarService) {
- this._masterKey = this._loadMasterKey();
+ // Don't load keys in constructor - defer until first use to avoid IPC initialization issues
+ }
+
+ private _ensureMasterKey(): Promise {
+ if (!this._masterKey) {
+ this._masterKey = this._loadMasterKey();
+ }
+ return this._masterKey;
}
public async encrypt(content: Buffer): Promise;
@@ -52,33 +82,56 @@ export class CryptoService {
}
private async _encryptBuffer(content: Buffer): Promise {
- const key = await this._masterKey;
+ const keyHex = await this._ensureMasterKey();
+ const key = Buffer.from(keyHex, "hex") as crypto.CipherKey;
const iv = this._getIV();
- const cipher = crypto.createCipheriv(ENCRYPT_ALGORITHM, key, iv);
- return Buffer.concat([iv, cipher.update(content), cipher.final()]);
+
+ const cipher = crypto.createCipheriv(ALGORITHM as any, key, iv as crypto.BinaryLike);
+ const encrypted = cipher.update(content as crypto.BinaryLike);
+ cipher.final();
+ const tag = cipher.getAuthTag();
+
+ // Format: [version(1)] + [iv(16)] + [tag(16)] + [encrypted_data]
+ return Buffer.concat([
+ new Uint8Array([ALGORITHM_VERSION]),
+ iv,
+ tag,
+ encrypted
+ ]);
}
private async _decryptBuffer(content: Buffer): Promise {
- const key = await this._masterKey;
- const iv = content.slice(0, IV_BYTES);
- const decipher = crypto.createDecipheriv(ENCRYPT_ALGORITHM, key, iv);
- return Buffer.concat([decipher.update(content.slice(16)), decipher.final()]);
+ // Verify minimum length and version header
+ if (content.length < VERSION_HEADER_LENGTH + IV_BYTES + GCM_TAG_LENGTH) {
+ throw new Error("Invalid encrypted data: content too short");
+ }
+
+ const version = content[0];
+ if (version !== ALGORITHM_VERSION) {
+ throw new UnsupportedEncryptionVersionError(version);
+ }
+
+ const keyHex = await this._ensureMasterKey();
+ const key = Buffer.from(keyHex, "hex") as crypto.CipherKey;
+ const iv = content.slice(VERSION_HEADER_LENGTH, VERSION_HEADER_LENGTH + IV_BYTES);
+ const tag = content.slice(VERSION_HEADER_LENGTH + IV_BYTES, VERSION_HEADER_LENGTH + IV_BYTES + GCM_TAG_LENGTH);
+ const encrypted = content.slice(VERSION_HEADER_LENGTH + IV_BYTES + GCM_TAG_LENGTH);
+
+ const decipher = crypto.createDecipheriv(ALGORITHM as any, key, iv as crypto.BinaryLike);
+ decipher.setAuthTag(tag);
+ return Buffer.concat([decipher.update(encrypted), decipher.final()]);
}
private async _loadMasterKey(): Promise {
let masterKey = await this.keytar.getPassword(BATCH_APPLICATION, KEYTAR_KEY);
if (!masterKey) {
- masterKey = this._generateMasterKey();
+ masterKey = crypto.randomBytes(KEY_BYTES).toString("hex");
await this.keytar.setPassword(BATCH_APPLICATION, KEYTAR_KEY, masterKey);
}
return masterKey;
}
- private _generateMasterKey() {
- return crypto.randomBytes(16).toString("hex");
- }
-
- private _getIV() {
+ private _getIV(): Buffer {
return crypto.randomBytes(IV_BYTES);
}
}
diff --git a/desktop/src/client/core/secure-data-store/index.ts b/desktop/src/client/core/secure-data-store/index.ts
index 905bfa25c6..436ba85d61 100644
--- a/desktop/src/client/core/secure-data-store/index.ts
+++ b/desktop/src/client/core/secure-data-store/index.ts
@@ -1,2 +1,4 @@
-export * from "./secure-data-store";
export * from "./crypto-service";
+export * from "./keytar.service";
+export * from "./safe-storage-main.service";
+export * from "./secure-data-store";
diff --git a/desktop/src/client/core/secure-data-store/keytar.service.spec.ts b/desktop/src/client/core/secure-data-store/keytar.service.spec.ts
new file mode 100644
index 0000000000..a2312cb5ce
--- /dev/null
+++ b/desktop/src/client/core/secure-data-store/keytar.service.spec.ts
@@ -0,0 +1,87 @@
+import { KeytarService } from "./keytar.service";
+
+describe("KeytarService", () => {
+ let service: KeytarService;
+ let safeStorageSpy;
+
+ beforeEach(() => {
+ safeStorageSpy = {
+ setPassword: jasmine.createSpy("setPassword").and.returnValue(Promise.resolve()),
+ getPassword: jasmine.createSpy("getPassword").and.returnValue(Promise.resolve(null))
+ };
+
+ service = new KeytarService(safeStorageSpy);
+ });
+
+ describe("setPassword", () => {
+ it("should call safeStorage.setPassword with correct arguments", async () => {
+ await service.setPassword("testService", "testAccount", "testPassword");
+
+ expect(safeStorageSpy.setPassword).toHaveBeenCalledWith(
+ "testService:testAccount",
+ "testPassword"
+ );
+ });
+
+ it("should not throw when safeStorage.setPassword succeeds", async () => {
+ await expectAsync(service.setPassword("testService", "testAccount", "testPassword"))
+ .toBeResolved();
+ });
+
+ it("should catch and log errors when safeStorage.setPassword fails", async () => {
+ const consoleWarnSpy = spyOn(console, "warn");
+ const error = new Error("Encryption not available");
+ safeStorageSpy.setPassword.and.returnValue(Promise.reject(error));
+
+ await service.setPassword("testService", "testAccount", "testPassword");
+
+ expect(consoleWarnSpy).toHaveBeenCalledWith(
+ "Failed to store credentials securely for testService:testAccount:",
+ "Encryption not available"
+ );
+ });
+
+ it("should not throw when safeStorage.setPassword fails", async () => {
+ safeStorageSpy.setPassword.and.returnValue(
+ Promise.reject(new Error("Encryption not available"))
+ );
+
+ await expectAsync(service.setPassword("testService", "testAccount", "testPassword"))
+ .toBeResolved();
+ });
+ });
+
+ describe("getPassword", () => {
+ it("should call safeStorage.getPassword with correct arguments", async () => {
+ await service.getPassword("testService", "testAccount");
+
+ expect(safeStorageSpy.getPassword).toHaveBeenCalledWith(
+ "testService:testAccount"
+ );
+ });
+
+ it("should return the password from safeStorage.getPassword", async () => {
+ safeStorageSpy.getPassword.and.returnValue(Promise.resolve("retrievedPassword"));
+
+ const result = await service.getPassword("testService", "testAccount");
+
+ expect(result).toBe("retrievedPassword");
+ });
+
+ it("should return null when password is not found", async () => {
+ safeStorageSpy.getPassword.and.returnValue(Promise.resolve(null));
+
+ const result = await service.getPassword("testService", "testAccount");
+
+ expect(result).toBeNull();
+ });
+
+ it("should propagate errors from safeStorage.getPassword", async () => {
+ const error = new Error("Storage error");
+ safeStorageSpy.getPassword.and.returnValue(Promise.reject(error));
+
+ await expectAsync(service.getPassword("testService", "testAccount"))
+ .toBeRejectedWith(error);
+ });
+ });
+});
diff --git a/desktop/src/client/core/secure-data-store/keytar.service.ts b/desktop/src/client/core/secure-data-store/keytar.service.ts
index ea436da6d1..959095d635 100644
--- a/desktop/src/client/core/secure-data-store/keytar.service.ts
+++ b/desktop/src/client/core/secure-data-store/keytar.service.ts
@@ -1,19 +1,28 @@
import { Injectable } from "@angular/core";
+import { SafeStorageMainService } from "./safe-storage-main.service";
/**
- * Service wrapping keytar to make it easier to mock
+ * Service wrapping credential storage. Uses Electron's safeStorage API.
+ * This is for use in the main process.
*/
@Injectable({ providedIn: "root" })
export class KeytarService {
- private keytar: any;
- constructor() {
- this.keytar = require("keytar");
- }
+ constructor(private safeStorage: SafeStorageMainService) {}
+
public async setPassword(service: string, account: string, password: string) {
- return this.keytar.setPassword(service, account, password);
+ try {
+ const key = `${service}:${account}`;
+ return await this.safeStorage.setPassword(key, password);
+ } catch (error) {
+ // If encryption is not available, log the error but allow the app
+ // to try memory-only auth token storage
+ console.warn(`Failed to store credentials securely for ${service}:${account}:`,
+ error.message);
+ }
}
public async getPassword(service: string, account: string): Promise {
- return this.keytar.getPassword(service, account);
+ const key = `${service}:${account}`;
+ return this.safeStorage.getPassword(key);
}
}
diff --git a/desktop/src/client/core/secure-data-store/safe-storage-main.service.ts b/desktop/src/client/core/secure-data-store/safe-storage-main.service.ts
new file mode 100644
index 0000000000..21d5c9cff2
--- /dev/null
+++ b/desktop/src/client/core/secure-data-store/safe-storage-main.service.ts
@@ -0,0 +1,105 @@
+import { Injectable } from "@angular/core";
+import { BlIpcMain } from "../bl-ipc-main";
+import { safeStorage } from "electron";
+import { GlobalStorage } from "@batch-flask/core";
+import { IpcEvent } from "common/constants";
+
+/**
+ * Handles safeStorage operations using Electron's safeStorage API.
+ */
+@Injectable({ providedIn: "root" })
+export class SafeStorageMainService {
+ private _storageKey = "safeStorageData";
+
+ constructor(
+ private ipcMain: BlIpcMain,
+ private _storage: GlobalStorage
+ ) {
+ }
+
+ public init() {
+ this._setupIpcHandlers();
+ }
+
+ private _setupIpcHandlers() {
+ this.ipcMain.on(IpcEvent.safeStorage.setPassword, async (data) => {
+ return this.setPassword(data.key, data.password);
+ });
+
+ this.ipcMain.on(IpcEvent.safeStorage.getPassword, async (data) => {
+ return this.getPassword(data.key);
+ });
+
+ this.ipcMain.on(IpcEvent.safeStorage.deletePassword, async (data) => {
+ return this.deletePassword(data.key);
+ });
+
+ this.ipcMain.on(IpcEvent.safeStorage.isEncryptionAvailable, async () => {
+ return safeStorage.isEncryptionAvailable();
+ });
+ }
+
+ public async setPassword(key: string, password: string): Promise {
+ if (!safeStorage.isEncryptionAvailable()) {
+ throw new Error("Encryption is not available on this platform. Cannot store credentials securely.");
+ }
+
+ // Get existing data or create new storage object
+ const storageData = await this._getStorageData();
+
+ // Encrypt the password
+ const encrypted = safeStorage.encryptString(password);
+
+ // Store the encrypted data with base64 encoding for JSON serialization
+ storageData[key] = encrypted.toString("base64");
+
+ // Save back to storage
+ await this._saveStorageData(storageData);
+ }
+
+ public async getPassword(key: string): Promise {
+ if (!safeStorage.isEncryptionAvailable()) {
+ return null;
+ }
+
+ const storageData = await this._getStorageData();
+ const encryptedBase64 = storageData[key];
+
+ if (!encryptedBase64) {
+ return null;
+ }
+
+ try {
+ const encryptedBuffer = Buffer.from(encryptedBase64, "base64");
+ return safeStorage.decryptString(encryptedBuffer);
+ } catch (error) {
+ console.warn(`Failed to decrypt password for key ${key}:`, error);
+ return null;
+ }
+ }
+
+ public async deletePassword(key: string): Promise {
+ const storageData = await this._getStorageData();
+
+ if (storageData[key]) {
+ delete storageData[key];
+ await this._saveStorageData(storageData);
+ return true;
+ }
+
+ return false;
+ }
+
+ private async _getStorageData(): Promise<{ [key: string]: string }> {
+ try {
+ const data = await this._storage.get(this._storageKey);
+ return data || {};
+ } catch (error) {
+ return {};
+ }
+ }
+
+ private async _saveStorageData(data: { [key: string]: string }): Promise {
+ await this._storage.set(this._storageKey, data);
+ }
+}
\ No newline at end of file
diff --git a/desktop/src/client/core/secure-data-store/secure-data-store.spec.ts b/desktop/src/client/core/secure-data-store/secure-data-store.spec.ts
index 47be7fd5da..1d06ddde48 100644
--- a/desktop/src/client/core/secure-data-store/secure-data-store.spec.ts
+++ b/desktop/src/client/core/secure-data-store/secure-data-store.spec.ts
@@ -1,5 +1,6 @@
import * as path from "path";
import { SecureDataStore } from "./secure-data-store";
+import { UnsupportedEncryptionVersionError } from "./crypto-service";
describe("SecureDataStore", () => {
let store: SecureDataStore;
@@ -25,6 +26,7 @@ describe("SecureDataStore", () => {
fileContent = x;
return Promise.resolve();
}),
+ delete: jasmine.createSpy("delete").and.returnValue(Promise.resolve()),
};
cryptoSpy = {
@@ -73,4 +75,24 @@ describe("SecureDataStore", () => {
expect(fileContent).toEqual("==encryptedtext==");
});
});
+
+ describe("When file has legacy encrypted data", () => {
+ beforeEach(async () => {
+ fileContent = "legacy-encrypted-content";
+ cryptoSpy.decrypt = jasmine.createSpy("decrypt").and.returnValue(
+ Promise.reject(new UnsupportedEncryptionVersionError(1))
+ );
+
+ store = new SecureDataStore(fsSpy, cryptoSpy);
+ });
+
+ it("sets legacyDataDetected flag to true", async () => {
+ expect(await store.legacyDataDetected()).toBe(true);
+ });
+
+ it("starts with empty store", async () => {
+ const value = await store.getItem("key1");
+ expect(value).toBeUndefined();
+ });
+ });
});
diff --git a/desktop/src/client/core/secure-data-store/secure-data-store.ts b/desktop/src/client/core/secure-data-store/secure-data-store.ts
index 4618af68b1..734b2b2e49 100644
--- a/desktop/src/client/core/secure-data-store/secure-data-store.ts
+++ b/desktop/src/client/core/secure-data-store/secure-data-store.ts
@@ -3,7 +3,7 @@ import { DataStore, InMemoryDataStore } from "@batch-flask/core";
import { FileSystemService } from "@batch-flask/electron";
import { log } from "@batch-flask/utils";
import * as path from "path";
-import { CryptoService } from "./crypto-service";
+import { CryptoService, UnsupportedEncryptionVersionError } from "./crypto-service";
const SECRET_DATA_FILE = "data/secure.enc";
@@ -13,12 +13,18 @@ const SECRET_DATA_FILE = "data/secure.enc";
@Injectable({ providedIn: "root" })
export class SecureDataStore extends InMemoryDataStore implements DataStore {
private _loadPromise: Promise;
+ private _legacyDataDetected = false;
constructor(private fs: FileSystemService, private crypto: CryptoService) {
super();
this._loadPromise = this._load();
}
+ public async legacyDataDetected(): Promise {
+ await this._loadPromise;
+ return this._legacyDataDetected;
+ }
+
public async setItem(key: string, value: T) {
await this._loadPromise;
await super.setItem(key, value);
@@ -56,13 +62,18 @@ export class SecureDataStore extends InMemoryDataStore implements DataStore {
if (!encryptedContent) {
return;
}
- const content = await this.crypto.decrypt(encryptedContent);
try {
+ const content = await this.crypto.decrypt(encryptedContent);
const data = JSON.parse(content);
this._data = new Map(Object.entries(data));
} catch (e) {
- log.error("Invalid JSON in the secret store file. This could be because we lost the master key.");
+ if (e instanceof UnsupportedEncryptionVersionError) {
+ log.warn("Legacy credential encryption detected.");
+ this._legacyDataDetected = true;
+ } else {
+ log.error("Invalid JSON in the secret store file. This could be because we lost the master key.", e);
+ }
}
}
diff --git a/desktop/src/client/main-window/main-window.ts b/desktop/src/client/main-window/main-window.ts
index 28296e926c..9c4ab7eb61 100644
--- a/desktop/src/client/main-window/main-window.ts
+++ b/desktop/src/client/main-window/main-window.ts
@@ -76,7 +76,6 @@ export class MainWindow extends GenericWindow {
show: false, // Don't show the window until it is ready
titleBarStyle: process.platform === "darwin" ? "hidden" : "default",
webPreferences: {
- webSecurity: false,
allowRunningInsecureContent: false,
nodeIntegration: true,
contextIsolation: false,
diff --git a/desktop/src/common/constants/constants.ts b/desktop/src/common/constants/constants.ts
index 7db10e3ce6..40068d5ef2 100644
--- a/desktop/src/common/constants/constants.ts
+++ b/desktop/src/common/constants/constants.ts
@@ -228,6 +228,12 @@ export const IpcEvent = {
downloadBlob: "STORAGE_BLOB_DOWNLOAD_BLOB",
uploadFile: "STORAGE_BLOB_UPLOAD_FILE",
deleteBlob: "STORAGE_BLOB_DELETE_BLOB",
+ },
+ safeStorage: {
+ setPassword: "SafeStorage:setPassword",
+ getPassword: "SafeStorage:getPassword",
+ deletePassword: "SafeStorage:deletePassword",
+ isEncryptionAvailable: "SafeStorage:isEncryptionAvailable",
}
};
diff --git a/eng/emitter-package.json b/eng/emitter-package.json
index 07c7e775a7..b41ee60d52 100644
--- a/eng/emitter-package.json
+++ b/eng/emitter-package.json
@@ -2,15 +2,15 @@
"name": "typescript-emitter-package",
"main": "dist/src/index.js",
"dependencies": {
- "@azure-tools/typespec-ts": "0.33.0",
- "@azure-tools/typespec-azure-core": "0.45.0",
- "@azure-tools/typespec-autorest": "0.45.0",
- "@azure-tools/typespec-client-generator-core": "0.45.4",
- "@azure-tools/typespec-azure-resource-manager": "0.45.0",
- "@azure-tools/typespec-azure-rulesets": "0.45.0",
- "@typespec/compiler": "0.59.1",
- "@typespec/http": "0.59.1",
- "@typespec/rest": "0.59.1",
- "@typespec/versioning": "0.59.0"
+ "@azure-tools/typespec-ts": "0.45.1",
+ "@azure-tools/typespec-azure-core": "0.61.0",
+ "@azure-tools/typespec-autorest": "0.61.0",
+ "@azure-tools/typespec-client-generator-core": "0.61.0",
+ "@azure-tools/typespec-azure-resource-manager": "0.61.0",
+ "@azure-tools/typespec-azure-rulesets": "0.61.0",
+ "@typespec/compiler": "1.5.0",
+ "@typespec/http": "1.5.0",
+ "@typespec/rest": "0.75.0",
+ "@typespec/versioning": "0.75.0"
}
}
diff --git a/package-lock.json b/package-lock.json
index 3a6508d103..112fd2bfcb 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -395,13 +395,10 @@
}
},
"node_modules/@babel/runtime": {
- "version": "7.22.15",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.15.tgz",
- "integrity": "sha512-T0O+aa+4w0u06iNmapipJXMV4HoUir03hpx3/YqXXhu9xim3w+dVphjFWl1OH8NbZHw5Lbm9k45drDkgq2VNNA==",
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz",
+ "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==",
"dev": true,
- "dependencies": {
- "regenerator-runtime": "^0.14.0"
- },
"engines": {
"node": ">=6.9.0"
}
@@ -699,6 +696,49 @@
"node": ">=6.9.0"
}
},
+ "node_modules/@inquirer/external-editor": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/@inquirer/external-editor/-/external-editor-1.0.3.tgz",
+ "integrity": "sha512-RWbSrDiYmO4LbejWY7ttpxczuwQyZLBUyygsA9Nsv95hpzUWwnNTVQmAq3xuh7vNwCp07UTmE5i11XAEExx4RA==",
+ "dev": true,
+ "dependencies": {
+ "chardet": "^2.1.1",
+ "iconv-lite": "^0.7.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@types/node": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@inquirer/external-editor/node_modules/chardet": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/chardet/-/chardet-2.1.1.tgz",
+ "integrity": "sha512-PsezH1rqdV9VvyNhxxOW32/d75r01NY7TQCmOqomRo15ZSOKbpTFVsfjghxo6JloQUCGnH4k1LGu0R4yCLlWQQ==",
+ "dev": true
+ },
+ "node_modules/@inquirer/external-editor/node_modules/iconv-lite": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.1.tgz",
+ "integrity": "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw==",
+ "dev": true,
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
"node_modules/@isaacs/cliui": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
@@ -717,9 +757,9 @@
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-regex": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
- "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
+ "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
"dev": true,
"engines": {
"node": ">=12"
@@ -729,9 +769,9 @@
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-styles": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
- "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
+ "version": "6.2.3",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
+ "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
"dev": true,
"engines": {
"node": ">=12"
@@ -764,9 +804,9 @@
}
},
"node_modules/@isaacs/cliui/node_modules/strip-ansi": {
- "version": "7.1.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
- "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
+ "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
"dev": true,
"dependencies": {
"ansi-regex": "^6.0.1"
@@ -917,24 +957,6 @@
"node": "^14.17.0 || >=16.0.0"
}
},
- "node_modules/@lerna/create/node_modules/@nx/devkit": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/devkit/-/devkit-16.7.4.tgz",
- "integrity": "sha512-SLito+/TAeDYR+d7IIpp/sBJm41WM+nIevILv0TSQW4Pq0ylUy1nUvV8Pe7l1ohZccDrQuebMUWPwGO0hv8SeQ==",
- "dev": true,
- "dependencies": {
- "@nrwl/devkit": "16.7.4",
- "ejs": "^3.1.7",
- "enquirer": "~2.3.6",
- "ignore": "^5.0.4",
- "semver": "7.5.3",
- "tmp": "~0.2.1",
- "tslib": "^2.3.0"
- },
- "peerDependencies": {
- "nx": ">= 15 <= 17"
- }
- },
"node_modules/@lerna/create/node_modules/chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
@@ -951,39 +973,6 @@
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
- "node_modules/@lerna/create/node_modules/fast-glob": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.7.tgz",
- "integrity": "sha512-rYGMRwip6lUMvYD3BTScMwT1HtAs2d71SMv66Vrxs0IekGZEjhM0pcMfjQPnknBt2zeCwQMEupiN02ZP4DiT1Q==",
- "dev": true,
- "dependencies": {
- "@nodelib/fs.stat": "^2.0.2",
- "@nodelib/fs.walk": "^1.2.3",
- "glob-parent": "^5.1.2",
- "merge2": "^1.3.0",
- "micromatch": "^4.0.4"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/@lerna/create/node_modules/glob": {
- "version": "7.1.4",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz",
- "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==",
- "dev": true,
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.0.4",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- },
- "engines": {
- "node": "*"
- }
- },
"node_modules/@lerna/create/node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
@@ -1017,118 +1006,6 @@
"node": ">=8"
}
},
- "node_modules/@lerna/create/node_modules/nx": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/nx/-/nx-16.7.4.tgz",
- "integrity": "sha512-L0Cbikk5kO+IBH0UQ2BOAut5ndeHXBlACKzjOPOCluY8WYh2sxWYt9/N/juFBN3XXRX7ionTr1PhWUzNE0Mzqw==",
- "dev": true,
- "hasInstallScript": true,
- "dependencies": {
- "@nrwl/tao": "16.7.4",
- "@parcel/watcher": "2.0.4",
- "@yarnpkg/lockfile": "^1.1.0",
- "@yarnpkg/parsers": "3.0.0-rc.46",
- "@zkochan/js-yaml": "0.0.6",
- "axios": "^1.0.0",
- "chalk": "^4.1.0",
- "cli-cursor": "3.1.0",
- "cli-spinners": "2.6.1",
- "cliui": "^7.0.2",
- "dotenv": "~16.3.1",
- "enquirer": "~2.3.6",
- "fast-glob": "3.2.7",
- "figures": "3.2.0",
- "flat": "^5.0.2",
- "fs-extra": "^11.1.0",
- "glob": "7.1.4",
- "ignore": "^5.0.4",
- "js-yaml": "4.1.0",
- "jsonc-parser": "3.2.0",
- "lines-and-columns": "~2.0.3",
- "minimatch": "3.0.5",
- "node-machine-id": "1.1.12",
- "npm-run-path": "^4.0.1",
- "open": "^8.4.0",
- "semver": "7.5.3",
- "string-width": "^4.2.3",
- "strong-log-transformer": "^2.1.0",
- "tar-stream": "~2.2.0",
- "tmp": "~0.2.1",
- "tsconfig-paths": "^4.1.2",
- "tslib": "^2.3.0",
- "v8-compile-cache": "2.3.0",
- "yargs": "^17.6.2",
- "yargs-parser": "21.1.1"
- },
- "bin": {
- "nx": "bin/nx.js"
- },
- "optionalDependencies": {
- "@nx/nx-darwin-arm64": "16.7.4",
- "@nx/nx-darwin-x64": "16.7.4",
- "@nx/nx-freebsd-x64": "16.7.4",
- "@nx/nx-linux-arm-gnueabihf": "16.7.4",
- "@nx/nx-linux-arm64-gnu": "16.7.4",
- "@nx/nx-linux-arm64-musl": "16.7.4",
- "@nx/nx-linux-x64-gnu": "16.7.4",
- "@nx/nx-linux-x64-musl": "16.7.4",
- "@nx/nx-win32-arm64-msvc": "16.7.4",
- "@nx/nx-win32-x64-msvc": "16.7.4"
- },
- "peerDependencies": {
- "@swc-node/register": "^1.4.2",
- "@swc/core": "^1.2.173"
- },
- "peerDependenciesMeta": {
- "@swc-node/register": {
- "optional": true
- },
- "@swc/core": {
- "optional": true
- }
- }
- },
- "node_modules/@lerna/create/node_modules/nx/node_modules/yargs": {
- "version": "17.7.2",
- "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
- "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
- "dev": true,
- "dependencies": {
- "cliui": "^8.0.1",
- "escalade": "^3.1.1",
- "get-caller-file": "^2.0.5",
- "require-directory": "^2.1.1",
- "string-width": "^4.2.3",
- "y18n": "^5.0.5",
- "yargs-parser": "^21.1.1"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@lerna/create/node_modules/nx/node_modules/yargs-parser": {
- "version": "21.1.1",
- "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
- "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
- "dev": true,
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@lerna/create/node_modules/nx/node_modules/yargs/node_modules/cliui": {
- "version": "8.0.1",
- "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
- "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
- "dev": true,
- "dependencies": {
- "string-width": "^4.2.0",
- "strip-ansi": "^6.0.1",
- "wrap-ansi": "^7.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
"node_modules/@lerna/create/node_modules/resolve-from": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
@@ -1157,9 +1034,9 @@
}
},
"node_modules/@lerna/create/node_modules/rimraf/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -1272,39 +1149,6 @@
"node": "^14.17.0 || >=16.0.0"
}
},
- "node_modules/@lerna/legacy-package-management/node_modules/@nx/devkit": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/devkit/-/devkit-16.7.4.tgz",
- "integrity": "sha512-SLito+/TAeDYR+d7IIpp/sBJm41WM+nIevILv0TSQW4Pq0ylUy1nUvV8Pe7l1ohZccDrQuebMUWPwGO0hv8SeQ==",
- "dev": true,
- "dependencies": {
- "@nrwl/devkit": "16.7.4",
- "ejs": "^3.1.7",
- "enquirer": "~2.3.6",
- "ignore": "^5.0.4",
- "semver": "7.5.3",
- "tmp": "~0.2.1",
- "tslib": "^2.3.0"
- },
- "peerDependencies": {
- "nx": ">= 15 <= 17"
- }
- },
- "node_modules/@lerna/legacy-package-management/node_modules/@nx/devkit/node_modules/semver": {
- "version": "7.5.3",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
- "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
- "dev": true,
- "dependencies": {
- "lru-cache": "^6.0.0"
- },
- "bin": {
- "semver": "bin/semver.js"
- },
- "engines": {
- "node": ">=10"
- }
- },
"node_modules/@lerna/legacy-package-management/node_modules/chalk": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
@@ -1321,41 +1165,6 @@
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
- "node_modules/@lerna/legacy-package-management/node_modules/fast-glob": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.7.tgz",
- "integrity": "sha512-rYGMRwip6lUMvYD3BTScMwT1HtAs2d71SMv66Vrxs0IekGZEjhM0pcMfjQPnknBt2zeCwQMEupiN02ZP4DiT1Q==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "@nodelib/fs.stat": "^2.0.2",
- "@nodelib/fs.walk": "^1.2.3",
- "glob-parent": "^5.1.2",
- "merge2": "^1.3.0",
- "micromatch": "^4.0.4"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/@lerna/legacy-package-management/node_modules/glob": {
- "version": "7.1.4",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz",
- "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.0.4",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- },
- "engines": {
- "node": "*"
- }
- },
"node_modules/@lerna/legacy-package-management/node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
@@ -1431,132 +1240,10 @@
"node": ">=8"
}
},
- "node_modules/@lerna/legacy-package-management/node_modules/nx": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/nx/-/nx-16.7.4.tgz",
- "integrity": "sha512-L0Cbikk5kO+IBH0UQ2BOAut5ndeHXBlACKzjOPOCluY8WYh2sxWYt9/N/juFBN3XXRX7ionTr1PhWUzNE0Mzqw==",
- "dev": true,
- "hasInstallScript": true,
- "peer": true,
- "dependencies": {
- "@nrwl/tao": "16.7.4",
- "@parcel/watcher": "2.0.4",
- "@yarnpkg/lockfile": "^1.1.0",
- "@yarnpkg/parsers": "3.0.0-rc.46",
- "@zkochan/js-yaml": "0.0.6",
- "axios": "^1.0.0",
- "chalk": "^4.1.0",
- "cli-cursor": "3.1.0",
- "cli-spinners": "2.6.1",
- "cliui": "^7.0.2",
- "dotenv": "~16.3.1",
- "enquirer": "~2.3.6",
- "fast-glob": "3.2.7",
- "figures": "3.2.0",
- "flat": "^5.0.2",
- "fs-extra": "^11.1.0",
- "glob": "7.1.4",
- "ignore": "^5.0.4",
- "js-yaml": "4.1.0",
- "jsonc-parser": "3.2.0",
- "lines-and-columns": "~2.0.3",
- "minimatch": "3.0.5",
- "node-machine-id": "1.1.12",
- "npm-run-path": "^4.0.1",
- "open": "^8.4.0",
- "semver": "7.5.3",
- "string-width": "^4.2.3",
- "strong-log-transformer": "^2.1.0",
- "tar-stream": "~2.2.0",
- "tmp": "~0.2.1",
- "tsconfig-paths": "^4.1.2",
- "tslib": "^2.3.0",
- "v8-compile-cache": "2.3.0",
- "yargs": "^17.6.2",
- "yargs-parser": "21.1.1"
- },
- "bin": {
- "nx": "bin/nx.js"
- },
- "optionalDependencies": {
- "@nx/nx-darwin-arm64": "16.7.4",
- "@nx/nx-darwin-x64": "16.7.4",
- "@nx/nx-freebsd-x64": "16.7.4",
- "@nx/nx-linux-arm-gnueabihf": "16.7.4",
- "@nx/nx-linux-arm64-gnu": "16.7.4",
- "@nx/nx-linux-arm64-musl": "16.7.4",
- "@nx/nx-linux-x64-gnu": "16.7.4",
- "@nx/nx-linux-x64-musl": "16.7.4",
- "@nx/nx-win32-arm64-msvc": "16.7.4",
- "@nx/nx-win32-x64-msvc": "16.7.4"
- },
- "peerDependencies": {
- "@swc-node/register": "^1.4.2",
- "@swc/core": "^1.2.173"
- },
- "peerDependenciesMeta": {
- "@swc-node/register": {
- "optional": true
- },
- "@swc/core": {
- "optional": true
- }
- }
- },
- "node_modules/@lerna/legacy-package-management/node_modules/nx/node_modules/semver": {
- "version": "7.5.3",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
- "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "lru-cache": "^6.0.0"
- },
- "bin": {
- "semver": "bin/semver.js"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/@lerna/legacy-package-management/node_modules/nx/node_modules/yargs": {
- "version": "17.7.2",
- "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
- "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "cliui": "^8.0.1",
- "escalade": "^3.1.1",
- "get-caller-file": "^2.0.5",
- "require-directory": "^2.1.1",
- "string-width": "^4.2.3",
- "y18n": "^5.0.5",
- "yargs-parser": "^21.1.1"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@lerna/legacy-package-management/node_modules/nx/node_modules/yargs/node_modules/cliui": {
- "version": "8.0.1",
- "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
- "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "string-width": "^4.2.0",
- "strip-ansi": "^6.0.1",
- "wrap-ansi": "^7.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@lerna/legacy-package-management/node_modules/pretty-format": {
- "version": "29.4.3",
- "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.4.3.tgz",
- "integrity": "sha512-cvpcHTc42lcsvOOAzd3XuNWTcvk1Jmnzqeu+WsOuiPmxUJTnkbAcFNsRKvEpBEUFVUgy/GTZLulZDcDEi+CIlA==",
+ "node_modules/@lerna/legacy-package-management/node_modules/pretty-format": {
+ "version": "29.4.3",
+ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.4.3.tgz",
+ "integrity": "sha512-cvpcHTc42lcsvOOAzd3XuNWTcvk1Jmnzqeu+WsOuiPmxUJTnkbAcFNsRKvEpBEUFVUgy/GTZLulZDcDEi+CIlA==",
"dev": true,
"dependencies": {
"@jest/schemas": "^29.4.3",
@@ -1613,9 +1300,9 @@
}
},
"node_modules/@lerna/legacy-package-management/node_modules/rimraf/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -1669,16 +1356,6 @@
"node": ">=10"
}
},
- "node_modules/@lerna/legacy-package-management/node_modules/yargs-parser": {
- "version": "21.1.1",
- "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
- "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
- "dev": true,
- "peer": true,
- "engines": {
- "node": ">=12"
- }
- },
"node_modules/@microsoft/load-themed-styles": {
"version": "1.10.295",
"resolved": "https://registry.npmjs.org/@microsoft/load-themed-styles/-/load-themed-styles-1.10.295.tgz",
@@ -1789,9 +1466,9 @@
}
},
"node_modules/@npmcli/arborist/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -2087,40 +1764,38 @@
}
},
"node_modules/@npmcli/map-workspaces/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/@npmcli/map-workspaces/node_modules/glob": {
- "version": "10.3.3",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.3.tgz",
- "integrity": "sha512-92vPiMb/iqpmEgsOoIDvTjc50wf9CCCvMzsi6W0JLPeUKE8TWP1a73PgqSrqy7iAZxaSD1YdzU7QZR5LF51MJw==",
+ "version": "10.5.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz",
+ "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==",
"dev": true,
"dependencies": {
"foreground-child": "^3.1.0",
- "jackspeak": "^2.0.3",
- "minimatch": "^9.0.1",
- "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0",
- "path-scurry": "^1.10.1"
+ "jackspeak": "^3.1.2",
+ "minimatch": "^9.0.4",
+ "minipass": "^7.1.2",
+ "package-json-from-dist": "^1.0.0",
+ "path-scurry": "^1.11.1"
},
"bin": {
- "glob": "dist/cjs/src/bin.js"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
+ "glob": "dist/esm/bin.mjs"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/@npmcli/map-workspaces/node_modules/minimatch": {
- "version": "9.0.3",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
- "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
"dependencies": {
"brace-expansion": "^2.0.1"
@@ -2132,6 +1807,15 @@
"url": "https://github.com/sponsors/isaacs"
}
},
+ "node_modules/@npmcli/map-workspaces/node_modules/minipass": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
+ "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
+ "dev": true,
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
"node_modules/@npmcli/metavuln-calculator": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/@npmcli/metavuln-calculator/-/metavuln-calculator-5.0.1.tgz",
@@ -2192,31 +1876,29 @@
}
},
"node_modules/@npmcli/package-json/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/@npmcli/package-json/node_modules/glob": {
- "version": "10.3.3",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.3.tgz",
- "integrity": "sha512-92vPiMb/iqpmEgsOoIDvTjc50wf9CCCvMzsi6W0JLPeUKE8TWP1a73PgqSrqy7iAZxaSD1YdzU7QZR5LF51MJw==",
+ "version": "10.5.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz",
+ "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==",
"dev": true,
"dependencies": {
"foreground-child": "^3.1.0",
- "jackspeak": "^2.0.3",
- "minimatch": "^9.0.1",
- "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0",
- "path-scurry": "^1.10.1"
+ "jackspeak": "^3.1.2",
+ "minimatch": "^9.0.4",
+ "minipass": "^7.1.2",
+ "package-json-from-dist": "^1.0.0",
+ "path-scurry": "^1.11.1"
},
"bin": {
- "glob": "dist/cjs/src/bin.js"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
+ "glob": "dist/esm/bin.mjs"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
@@ -2253,504 +1935,167 @@
}
},
"node_modules/@npmcli/package-json/node_modules/minimatch": {
- "version": "9.0.3",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
- "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
- "dev": true,
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/@npmcli/package-json/node_modules/normalize-package-data": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-5.0.0.tgz",
- "integrity": "sha512-h9iPVIfrVZ9wVYQnxFgtw1ugSvGEMOlyPWWtm8BMJhnwyEL/FLbYbTY3V3PpjI/BUK67n9PEWDu6eHzu1fB15Q==",
- "dev": true,
- "dependencies": {
- "hosted-git-info": "^6.0.0",
- "is-core-module": "^2.8.1",
- "semver": "^7.3.5",
- "validate-npm-package-license": "^3.0.4"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@npmcli/package-json/node_modules/npm-normalize-package-bin": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz",
- "integrity": "sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ==",
- "dev": true,
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@npmcli/promise-spawn": {
- "version": "6.0.2",
- "resolved": "https://registry.npmjs.org/@npmcli/promise-spawn/-/promise-spawn-6.0.2.tgz",
- "integrity": "sha512-gGq0NJkIGSwdbUt4yhdF8ZrmkGKVz9vAdVzpOfnom+V8PLSmSOVhZwbNvZZS1EYcJN5hzzKBxmmVVAInM6HQLg==",
- "dev": true,
- "dependencies": {
- "which": "^3.0.0"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@npmcli/promise-spawn/node_modules/which": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/which/-/which-3.0.1.tgz",
- "integrity": "sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg==",
- "dev": true,
- "dependencies": {
- "isexe": "^2.0.0"
- },
- "bin": {
- "node-which": "bin/which.js"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@npmcli/query": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/@npmcli/query/-/query-3.0.0.tgz",
- "integrity": "sha512-MFNDSJNgsLZIEBVZ0Q9w9K7o07j5N4o4yjtdz2uEpuCZlXGMuPENiRaFYk0vRqAA64qVuUQwC05g27fRtfUgnA==",
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
"dependencies": {
- "postcss-selector-parser": "^6.0.10"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@npmcli/run-script": {
- "version": "6.0.2",
- "resolved": "https://registry.npmjs.org/@npmcli/run-script/-/run-script-6.0.2.tgz",
- "integrity": "sha512-NCcr1uQo1k5U+SYlnIrbAh3cxy+OQT1VtqiAbxdymSlptbzBb62AjH2xXgjNCoP073hoa1CfCAcwoZ8k96C4nA==",
- "dev": true,
- "dependencies": {
- "@npmcli/node-gyp": "^3.0.0",
- "@npmcli/promise-spawn": "^6.0.0",
- "node-gyp": "^9.0.0",
- "read-package-json-fast": "^3.0.0",
- "which": "^3.0.0"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@npmcli/run-script/node_modules/which": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/which/-/which-3.0.1.tgz",
- "integrity": "sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg==",
- "dev": true,
- "dependencies": {
- "isexe": "^2.0.0"
- },
- "bin": {
- "node-which": "bin/which.js"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@nrwl/devkit": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nrwl/devkit/-/devkit-16.7.4.tgz",
- "integrity": "sha512-Gt2q3cqDWzGP1woavGIo4bl8g9YaXic/Xfsl7qPq0LHJedLj49p1vXetB0wawkavSE2MTyo7yDh6YDK/38XoLw==",
- "dev": true,
- "dependencies": {
- "@nx/devkit": "16.7.4"
- }
- },
- "node_modules/@nrwl/devkit/node_modules/@nx/devkit": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/devkit/-/devkit-16.7.4.tgz",
- "integrity": "sha512-SLito+/TAeDYR+d7IIpp/sBJm41WM+nIevILv0TSQW4Pq0ylUy1nUvV8Pe7l1ohZccDrQuebMUWPwGO0hv8SeQ==",
- "dev": true,
- "dependencies": {
- "@nrwl/devkit": "16.7.4",
- "ejs": "^3.1.7",
- "enquirer": "~2.3.6",
- "ignore": "^5.0.4",
- "semver": "7.5.3",
- "tmp": "~0.2.1",
- "tslib": "^2.3.0"
- },
- "peerDependencies": {
- "nx": ">= 15 <= 17"
- }
- },
- "node_modules/@nrwl/devkit/node_modules/fast-glob": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.7.tgz",
- "integrity": "sha512-rYGMRwip6lUMvYD3BTScMwT1HtAs2d71SMv66Vrxs0IekGZEjhM0pcMfjQPnknBt2zeCwQMEupiN02ZP4DiT1Q==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "@nodelib/fs.stat": "^2.0.2",
- "@nodelib/fs.walk": "^1.2.3",
- "glob-parent": "^5.1.2",
- "merge2": "^1.3.0",
- "micromatch": "^4.0.4"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/@nrwl/devkit/node_modules/glob": {
- "version": "7.1.4",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz",
- "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.0.4",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- },
- "engines": {
- "node": "*"
- }
- },
- "node_modules/@nrwl/devkit/node_modules/glob-parent": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
- "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "is-glob": "^4.0.1"
- },
- "engines": {
- "node": ">= 6"
- }
- },
- "node_modules/@nrwl/devkit/node_modules/minimatch": {
- "version": "3.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.5.tgz",
- "integrity": "sha512-tUpxzX0VAzJHjLu0xUfFv1gwVp9ba3IOuRAVH2EGuRW8a5emA2FlACLqiT/lDVtS1W+TGNwqz3sWaNyLgDJWuw==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "brace-expansion": "^1.1.7"
- },
- "engines": {
- "node": "*"
- }
- },
- "node_modules/@nrwl/devkit/node_modules/nx": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/nx/-/nx-16.7.4.tgz",
- "integrity": "sha512-L0Cbikk5kO+IBH0UQ2BOAut5ndeHXBlACKzjOPOCluY8WYh2sxWYt9/N/juFBN3XXRX7ionTr1PhWUzNE0Mzqw==",
- "dev": true,
- "hasInstallScript": true,
- "peer": true,
- "dependencies": {
- "@nrwl/tao": "16.7.4",
- "@parcel/watcher": "2.0.4",
- "@yarnpkg/lockfile": "^1.1.0",
- "@yarnpkg/parsers": "3.0.0-rc.46",
- "@zkochan/js-yaml": "0.0.6",
- "axios": "^1.0.0",
- "chalk": "^4.1.0",
- "cli-cursor": "3.1.0",
- "cli-spinners": "2.6.1",
- "cliui": "^7.0.2",
- "dotenv": "~16.3.1",
- "enquirer": "~2.3.6",
- "fast-glob": "3.2.7",
- "figures": "3.2.0",
- "flat": "^5.0.2",
- "fs-extra": "^11.1.0",
- "glob": "7.1.4",
- "ignore": "^5.0.4",
- "js-yaml": "4.1.0",
- "jsonc-parser": "3.2.0",
- "lines-and-columns": "~2.0.3",
- "minimatch": "3.0.5",
- "node-machine-id": "1.1.12",
- "npm-run-path": "^4.0.1",
- "open": "^8.4.0",
- "semver": "7.5.3",
- "string-width": "^4.2.3",
- "strong-log-transformer": "^2.1.0",
- "tar-stream": "~2.2.0",
- "tmp": "~0.2.1",
- "tsconfig-paths": "^4.1.2",
- "tslib": "^2.3.0",
- "v8-compile-cache": "2.3.0",
- "yargs": "^17.6.2",
- "yargs-parser": "21.1.1"
- },
- "bin": {
- "nx": "bin/nx.js"
- },
- "optionalDependencies": {
- "@nx/nx-darwin-arm64": "16.7.4",
- "@nx/nx-darwin-x64": "16.7.4",
- "@nx/nx-freebsd-x64": "16.7.4",
- "@nx/nx-linux-arm-gnueabihf": "16.7.4",
- "@nx/nx-linux-arm64-gnu": "16.7.4",
- "@nx/nx-linux-arm64-musl": "16.7.4",
- "@nx/nx-linux-x64-gnu": "16.7.4",
- "@nx/nx-linux-x64-musl": "16.7.4",
- "@nx/nx-win32-arm64-msvc": "16.7.4",
- "@nx/nx-win32-x64-msvc": "16.7.4"
- },
- "peerDependencies": {
- "@swc-node/register": "^1.4.2",
- "@swc/core": "^1.2.173"
- },
- "peerDependenciesMeta": {
- "@swc-node/register": {
- "optional": true
- },
- "@swc/core": {
- "optional": true
- }
- }
- },
- "node_modules/@nrwl/devkit/node_modules/yargs": {
- "version": "17.7.2",
- "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
- "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
- "dev": true,
- "peer": true,
- "dependencies": {
- "cliui": "^8.0.1",
- "escalade": "^3.1.1",
- "get-caller-file": "^2.0.5",
- "require-directory": "^2.1.1",
- "string-width": "^4.2.3",
- "y18n": "^5.0.5",
- "yargs-parser": "^21.1.1"
+ "brace-expansion": "^2.0.1"
},
"engines": {
- "node": ">=12"
+ "node": ">=16 || 14 >=14.17"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
}
},
- "node_modules/@nrwl/devkit/node_modules/yargs-parser": {
- "version": "21.1.1",
- "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
- "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "node_modules/@npmcli/package-json/node_modules/minipass": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
+ "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
"dev": true,
- "peer": true,
"engines": {
- "node": ">=12"
+ "node": ">=16 || 14 >=14.17"
}
},
- "node_modules/@nrwl/devkit/node_modules/yargs/node_modules/cliui": {
- "version": "8.0.1",
- "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
- "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "node_modules/@npmcli/package-json/node_modules/normalize-package-data": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-5.0.0.tgz",
+ "integrity": "sha512-h9iPVIfrVZ9wVYQnxFgtw1ugSvGEMOlyPWWtm8BMJhnwyEL/FLbYbTY3V3PpjI/BUK67n9PEWDu6eHzu1fB15Q==",
"dev": true,
- "peer": true,
"dependencies": {
- "string-width": "^4.2.0",
- "strip-ansi": "^6.0.1",
- "wrap-ansi": "^7.0.0"
+ "hosted-git-info": "^6.0.0",
+ "is-core-module": "^2.8.1",
+ "semver": "^7.3.5",
+ "validate-npm-package-license": "^3.0.4"
},
"engines": {
- "node": ">=12"
+ "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
- "node_modules/@nrwl/tao": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nrwl/tao/-/tao-16.7.4.tgz",
- "integrity": "sha512-hH03oF+yVmaf19UZfyLDSuVEh0KasU5YfYezuNsdRkXNdTU/WmpDrk4qoo0j6fVoMPrqbbPOn1YMRtulP2WyYA==",
+ "node_modules/@npmcli/package-json/node_modules/npm-normalize-package-bin": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz",
+ "integrity": "sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ==",
"dev": true,
- "dependencies": {
- "nx": "16.7.4",
- "tslib": "^2.3.0"
- },
- "bin": {
- "tao": "index.js"
+ "engines": {
+ "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
- "node_modules/@nrwl/tao/node_modules/fast-glob": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.7.tgz",
- "integrity": "sha512-rYGMRwip6lUMvYD3BTScMwT1HtAs2d71SMv66Vrxs0IekGZEjhM0pcMfjQPnknBt2zeCwQMEupiN02ZP4DiT1Q==",
+ "node_modules/@npmcli/promise-spawn": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/@npmcli/promise-spawn/-/promise-spawn-6.0.2.tgz",
+ "integrity": "sha512-gGq0NJkIGSwdbUt4yhdF8ZrmkGKVz9vAdVzpOfnom+V8PLSmSOVhZwbNvZZS1EYcJN5hzzKBxmmVVAInM6HQLg==",
"dev": true,
"dependencies": {
- "@nodelib/fs.stat": "^2.0.2",
- "@nodelib/fs.walk": "^1.2.3",
- "glob-parent": "^5.1.2",
- "merge2": "^1.3.0",
- "micromatch": "^4.0.4"
+ "which": "^3.0.0"
},
"engines": {
- "node": ">=8"
+ "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
- "node_modules/@nrwl/tao/node_modules/glob": {
- "version": "7.1.4",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz",
- "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==",
+ "node_modules/@npmcli/promise-spawn/node_modules/which": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-3.0.1.tgz",
+ "integrity": "sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg==",
"dev": true,
"dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.0.4",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/which.js"
},
"engines": {
- "node": "*"
+ "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
- "node_modules/@nrwl/tao/node_modules/glob-parent": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
- "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "node_modules/@npmcli/query": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@npmcli/query/-/query-3.0.0.tgz",
+ "integrity": "sha512-MFNDSJNgsLZIEBVZ0Q9w9K7o07j5N4o4yjtdz2uEpuCZlXGMuPENiRaFYk0vRqAA64qVuUQwC05g27fRtfUgnA==",
"dev": true,
"dependencies": {
- "is-glob": "^4.0.1"
+ "postcss-selector-parser": "^6.0.10"
},
"engines": {
- "node": ">= 6"
+ "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
- "node_modules/@nrwl/tao/node_modules/minimatch": {
- "version": "3.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.5.tgz",
- "integrity": "sha512-tUpxzX0VAzJHjLu0xUfFv1gwVp9ba3IOuRAVH2EGuRW8a5emA2FlACLqiT/lDVtS1W+TGNwqz3sWaNyLgDJWuw==",
+ "node_modules/@npmcli/run-script": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/@npmcli/run-script/-/run-script-6.0.2.tgz",
+ "integrity": "sha512-NCcr1uQo1k5U+SYlnIrbAh3cxy+OQT1VtqiAbxdymSlptbzBb62AjH2xXgjNCoP073hoa1CfCAcwoZ8k96C4nA==",
"dev": true,
"dependencies": {
- "brace-expansion": "^1.1.7"
+ "@npmcli/node-gyp": "^3.0.0",
+ "@npmcli/promise-spawn": "^6.0.0",
+ "node-gyp": "^9.0.0",
+ "read-package-json-fast": "^3.0.0",
+ "which": "^3.0.0"
},
"engines": {
- "node": "*"
+ "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
- "node_modules/@nrwl/tao/node_modules/nx": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/nx/-/nx-16.7.4.tgz",
- "integrity": "sha512-L0Cbikk5kO+IBH0UQ2BOAut5ndeHXBlACKzjOPOCluY8WYh2sxWYt9/N/juFBN3XXRX7ionTr1PhWUzNE0Mzqw==",
+ "node_modules/@npmcli/run-script/node_modules/which": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-3.0.1.tgz",
+ "integrity": "sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg==",
"dev": true,
- "hasInstallScript": true,
"dependencies": {
- "@nrwl/tao": "16.7.4",
- "@parcel/watcher": "2.0.4",
- "@yarnpkg/lockfile": "^1.1.0",
- "@yarnpkg/parsers": "3.0.0-rc.46",
- "@zkochan/js-yaml": "0.0.6",
- "axios": "^1.0.0",
- "chalk": "^4.1.0",
- "cli-cursor": "3.1.0",
- "cli-spinners": "2.6.1",
- "cliui": "^7.0.2",
- "dotenv": "~16.3.1",
- "enquirer": "~2.3.6",
- "fast-glob": "3.2.7",
- "figures": "3.2.0",
- "flat": "^5.0.2",
- "fs-extra": "^11.1.0",
- "glob": "7.1.4",
- "ignore": "^5.0.4",
- "js-yaml": "4.1.0",
- "jsonc-parser": "3.2.0",
- "lines-and-columns": "~2.0.3",
- "minimatch": "3.0.5",
- "node-machine-id": "1.1.12",
- "npm-run-path": "^4.0.1",
- "open": "^8.4.0",
- "semver": "7.5.3",
- "string-width": "^4.2.3",
- "strong-log-transformer": "^2.1.0",
- "tar-stream": "~2.2.0",
- "tmp": "~0.2.1",
- "tsconfig-paths": "^4.1.2",
- "tslib": "^2.3.0",
- "v8-compile-cache": "2.3.0",
- "yargs": "^17.6.2",
- "yargs-parser": "21.1.1"
+ "isexe": "^2.0.0"
},
"bin": {
- "nx": "bin/nx.js"
- },
- "optionalDependencies": {
- "@nx/nx-darwin-arm64": "16.7.4",
- "@nx/nx-darwin-x64": "16.7.4",
- "@nx/nx-freebsd-x64": "16.7.4",
- "@nx/nx-linux-arm-gnueabihf": "16.7.4",
- "@nx/nx-linux-arm64-gnu": "16.7.4",
- "@nx/nx-linux-arm64-musl": "16.7.4",
- "@nx/nx-linux-x64-gnu": "16.7.4",
- "@nx/nx-linux-x64-musl": "16.7.4",
- "@nx/nx-win32-arm64-msvc": "16.7.4",
- "@nx/nx-win32-x64-msvc": "16.7.4"
- },
- "peerDependencies": {
- "@swc-node/register": "^1.4.2",
- "@swc/core": "^1.2.173"
+ "node-which": "bin/which.js"
},
- "peerDependenciesMeta": {
- "@swc-node/register": {
- "optional": true
- },
- "@swc/core": {
- "optional": true
- }
+ "engines": {
+ "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
}
},
- "node_modules/@nrwl/tao/node_modules/yargs": {
- "version": "17.7.2",
- "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
- "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "node_modules/@nrwl/devkit": {
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nrwl/devkit/-/devkit-16.10.0.tgz",
+ "integrity": "sha512-fRloARtsDQoQgQ7HKEy0RJiusg/HSygnmg4gX/0n/Z+SUS+4KoZzvHjXc6T5ZdEiSjvLypJ+HBM8dQzIcVACPQ==",
"dev": true,
"dependencies": {
- "cliui": "^8.0.1",
- "escalade": "^3.1.1",
- "get-caller-file": "^2.0.5",
- "require-directory": "^2.1.1",
- "string-width": "^4.2.3",
- "y18n": "^5.0.5",
- "yargs-parser": "^21.1.1"
- },
- "engines": {
- "node": ">=12"
+ "@nx/devkit": "16.10.0"
}
},
- "node_modules/@nrwl/tao/node_modules/yargs-parser": {
- "version": "21.1.1",
- "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
- "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "node_modules/@nrwl/tao": {
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nrwl/tao/-/tao-16.10.0.tgz",
+ "integrity": "sha512-QNAanpINbr+Pod6e1xNgFbzK1x5wmZl+jMocgiEFXZ67KHvmbD6MAQQr0MMz+GPhIu7EE4QCTLTyCEMlAG+K5Q==",
"dev": true,
- "engines": {
- "node": ">=12"
+ "dependencies": {
+ "nx": "16.10.0",
+ "tslib": "^2.3.0"
+ },
+ "bin": {
+ "tao": "index.js"
}
},
- "node_modules/@nrwl/tao/node_modules/yargs/node_modules/cliui": {
- "version": "8.0.1",
- "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
- "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "node_modules/@nx/devkit": {
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/devkit/-/devkit-16.10.0.tgz",
+ "integrity": "sha512-IvKQqRJFDDiaj33SPfGd3ckNHhHi6ceEoqCbAP4UuMXOPPVOX6H0KVk+9tknkPb48B7jWIw6/AgOeWkBxPRO5w==",
"dev": true,
"dependencies": {
- "string-width": "^4.2.0",
- "strip-ansi": "^6.0.1",
- "wrap-ansi": "^7.0.0"
+ "@nrwl/devkit": "16.10.0",
+ "ejs": "^3.1.7",
+ "enquirer": "~2.3.6",
+ "ignore": "^5.0.4",
+ "semver": "7.5.3",
+ "tmp": "~0.2.1",
+ "tslib": "^2.3.0"
},
- "engines": {
- "node": ">=12"
+ "peerDependencies": {
+ "nx": ">= 15 <= 17"
}
},
"node_modules/@nx/nx-darwin-arm64": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-darwin-arm64/-/nx-darwin-arm64-16.7.4.tgz",
- "integrity": "sha512-pRNjxn6KlcR6iGkU1j/1pzcogwXFv97pYiZaibpF7UV0vfdEUA3EETpDcs+hbNAcKMvVtn/TgN857/5LQ/lGUg==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-darwin-arm64/-/nx-darwin-arm64-16.10.0.tgz",
+ "integrity": "sha512-YF+MIpeuwFkyvM5OwgY/rTNRpgVAI/YiR0yTYCZR+X3AAvP775IVlusNgQ3oedTBRUzyRnI4Tknj1WniENFsvQ==",
"cpu": [
"arm64"
],
@@ -2764,9 +2109,9 @@
}
},
"node_modules/@nx/nx-darwin-x64": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-darwin-x64/-/nx-darwin-x64-16.7.4.tgz",
- "integrity": "sha512-GANXeabAAWRoF85WDla2ZPxtr8vnqvXjwyCIhRCda8hlKiVCpM98GemucN25z97G5H6MgyV9Dd9t9jrr2Fn0Og==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-darwin-x64/-/nx-darwin-x64-16.10.0.tgz",
+ "integrity": "sha512-ypi6YxwXgb0kg2ixKXE3pwf5myVNUgWf1CsV5OzVccCM8NzheMO51KDXTDmEpXdzUsfT0AkO1sk5GZeCjhVONg==",
"cpu": [
"x64"
],
@@ -2780,9 +2125,9 @@
}
},
"node_modules/@nx/nx-freebsd-x64": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-freebsd-x64/-/nx-freebsd-x64-16.7.4.tgz",
- "integrity": "sha512-zmBBDYjPaHhIHx1YASUJJIy+oz7mCrj5f0f3kOzfMraQOjkQZ0xYgNNUzBqmnYu1855yiphu94MkAMYJnbk0jw==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-freebsd-x64/-/nx-freebsd-x64-16.10.0.tgz",
+ "integrity": "sha512-UeEYFDmdbbDkTQamqvtU8ibgu5jQLgFF1ruNb/U4Ywvwutw2d4ruOMl2e0u9hiNja9NFFAnDbvzrDcMo7jYqYw==",
"cpu": [
"x64"
],
@@ -2796,9 +2141,9 @@
}
},
"node_modules/@nx/nx-linux-arm-gnueabihf": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-linux-arm-gnueabihf/-/nx-linux-arm-gnueabihf-16.7.4.tgz",
- "integrity": "sha512-d3Cmz/vdtoSasTUANoh4ZYLJESNA3+PCP/HnXNqmrr6AEHo+T8DcI+qsamO3rmYUSFxTMAeMyoihZMU8OKGZ1A==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-linux-arm-gnueabihf/-/nx-linux-arm-gnueabihf-16.10.0.tgz",
+ "integrity": "sha512-WV3XUC2DB6/+bz1sx+d1Ai9q2Cdr+kTZRN50SOkfmZUQyEBaF6DRYpx/a4ahhxH3ktpNfyY8Maa9OEYxGCBkQA==",
"cpu": [
"arm"
],
@@ -2812,9 +2157,9 @@
}
},
"node_modules/@nx/nx-linux-arm64-gnu": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-linux-arm64-gnu/-/nx-linux-arm64-gnu-16.7.4.tgz",
- "integrity": "sha512-W1u4O78lTHCwvUP0vakeKWFXeSZ13nYzbd6FARICnImY2my8vz41rLm6aU9TYWaiOGEGL2xKpHKSgiNwbLjhFw==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-linux-arm64-gnu/-/nx-linux-arm64-gnu-16.10.0.tgz",
+ "integrity": "sha512-aWIkOUw995V3ItfpAi5FuxQ+1e9EWLS1cjWM1jmeuo+5WtaKToJn5itgQOkvSlPz+HSLgM3VfXMvOFALNk125g==",
"cpu": [
"arm64"
],
@@ -2828,9 +2173,9 @@
}
},
"node_modules/@nx/nx-linux-arm64-musl": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-linux-arm64-musl/-/nx-linux-arm64-musl-16.7.4.tgz",
- "integrity": "sha512-Dc8IQFvhfH/Z3GmhBBNNxGd2Ehw6Y5SePEgJj1c2JyPdoVtc2OjGzkUaZkT4z5z77VKtju6Yi10T6Enps+y+kw==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-linux-arm64-musl/-/nx-linux-arm64-musl-16.10.0.tgz",
+ "integrity": "sha512-uO6Gg+irqpVcCKMcEPIQcTFZ+tDI02AZkqkP7koQAjniLEappd8DnUBSQdcn53T086pHpdc264X/ZEpXFfrKWQ==",
"cpu": [
"arm64"
],
@@ -2844,9 +2189,9 @@
}
},
"node_modules/@nx/nx-linux-x64-gnu": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-linux-x64-gnu/-/nx-linux-x64-gnu-16.7.4.tgz",
- "integrity": "sha512-4B58C/pXeuovSznBOeicsxNieBApbGMoi2du8jR6Is1gYFPv4l8fFHQHHGAa1l5XJC5JuGJqFywS4elInWprNw==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-linux-x64-gnu/-/nx-linux-x64-gnu-16.10.0.tgz",
+ "integrity": "sha512-134PW/u/arNFAQKpqMJniC7irbChMPz+W+qtyKPAUXE0XFKPa7c1GtlI/wK2dvP9qJDZ6bKf0KtA0U/m2HMUOA==",
"cpu": [
"x64"
],
@@ -2860,9 +2205,9 @@
}
},
"node_modules/@nx/nx-linux-x64-musl": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-linux-x64-musl/-/nx-linux-x64-musl-16.7.4.tgz",
- "integrity": "sha512-spqqvEdGSSeV2ByJHkex5m8MRQfM6lQlnon25XgVBdPR47lKMWSikUsaWCiE7bVAFU9BFyWY2L4HfZ4+LiNY7A==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-linux-x64-musl/-/nx-linux-x64-musl-16.10.0.tgz",
+ "integrity": "sha512-q8sINYLdIJxK/iUx9vRk5jWAWb/2O0PAbOJFwv4qkxBv4rLoN7y+otgCZ5v0xfx/zztFgk/oNY4lg5xYjIso2Q==",
"cpu": [
"x64"
],
@@ -2876,9 +2221,9 @@
}
},
"node_modules/@nx/nx-win32-arm64-msvc": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-win32-arm64-msvc/-/nx-win32-arm64-msvc-16.7.4.tgz",
- "integrity": "sha512-etNnbuCcSqAYOeDcS6si6qw0WR/IS87ovTzLS17ETKpdHcHN5nM4l02CQyupKiD58ShxrXHxXmvgBfbXxoN5Ew==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-win32-arm64-msvc/-/nx-win32-arm64-msvc-16.10.0.tgz",
+ "integrity": "sha512-moJkL9kcqxUdJSRpG7dET3UeLIciwrfP08mzBQ12ewo8K8FzxU8ZUsTIVVdNrwt01CXOdXoweGfdQLjJ4qTURA==",
"cpu": [
"arm64"
],
@@ -2892,9 +2237,9 @@
}
},
"node_modules/@nx/nx-win32-x64-msvc": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/nx-win32-x64-msvc/-/nx-win32-x64-msvc-16.7.4.tgz",
- "integrity": "sha512-y6pugK6ino1wvo2FbgtXG2cVbEm3LzJwOSBKBRBXSWhUgjP7T92uGfOt6KVQKpaqDvS9lA9TO/2DcygcLHXh7A==",
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/@nx/nx-win32-x64-msvc/-/nx-win32-x64-msvc-16.10.0.tgz",
+ "integrity": "sha512-5iV2NKZnzxJwZZ4DM5JVbRG/nkhAbzEskKaLBB82PmYGKzaDHuMHP1lcPoD/rtYMlowZgNA/RQndfKvPBPwmXA==",
"cpu": [
"x64"
],
@@ -3465,9 +2810,9 @@
}
},
"node_modules/@tufjs/models/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -3920,9 +3265,9 @@
}
},
"node_modules/@yarnpkg/parsers/node_modules/js-yaml": {
- "version": "3.14.1",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
- "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
"dev": true,
"dependencies": {
"argparse": "^1.0.7",
@@ -4220,13 +3565,13 @@
}
},
"node_modules/axios": {
- "version": "1.7.5",
- "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.5.tgz",
- "integrity": "sha512-fZu86yCo+svH3uqJ/yTdQ0QHpQu5oL+/QE+QPSv6BZSkDAoky9vytxp7u5qk83OJFS3kEBcesWni9WTZAv3tSw==",
+ "version": "1.13.2",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz",
+ "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==",
"dev": true,
"dependencies": {
"follow-redirects": "^1.15.6",
- "form-data": "^4.0.0",
+ "form-data": "^4.0.4",
"proxy-from-env": "^1.1.0"
}
},
@@ -4298,9 +3643,9 @@
}
},
"node_modules/brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0",
@@ -4391,31 +3736,29 @@
}
},
"node_modules/cacache/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/cacache/node_modules/glob": {
- "version": "10.3.3",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.3.tgz",
- "integrity": "sha512-92vPiMb/iqpmEgsOoIDvTjc50wf9CCCvMzsi6W0JLPeUKE8TWP1a73PgqSrqy7iAZxaSD1YdzU7QZR5LF51MJw==",
+ "version": "10.5.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz",
+ "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==",
"dev": true,
"dependencies": {
"foreground-child": "^3.1.0",
- "jackspeak": "^2.0.3",
- "minimatch": "^9.0.1",
- "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0",
- "path-scurry": "^1.10.1"
+ "jackspeak": "^3.1.2",
+ "minimatch": "^9.0.4",
+ "minipass": "^7.1.2",
+ "package-json-from-dist": "^1.0.0",
+ "path-scurry": "^1.11.1"
},
"bin": {
- "glob": "dist/cjs/src/bin.js"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
+ "glob": "dist/esm/bin.mjs"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
@@ -4431,9 +3774,9 @@
}
},
"node_modules/cacache/node_modules/minimatch": {
- "version": "9.0.3",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
- "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
"dependencies": {
"brace-expansion": "^2.0.1"
@@ -4446,9 +3789,9 @@
}
},
"node_modules/cacache/node_modules/minipass": {
- "version": "7.0.3",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.3.tgz",
- "integrity": "sha512-LhbbwCfz3vsb12j/WkWQPZfKTsgqIe1Nf/ti1pKjYESGLHIVjWU96G9/ljLH4F9mWNVhlQOm0VySdAWzf05dpg==",
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
+ "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
"dev": true,
"engines": {
"node": ">=16 || 14 >=14.17"
@@ -4479,6 +3822,19 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
@@ -4963,9 +4319,9 @@
}
},
"node_modules/cross-spawn": {
- "version": "7.0.3",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
- "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
"dev": true,
"dependencies": {
"path-key": "^3.1.0",
@@ -5280,6 +4636,29 @@
"url": "https://github.com/motdotla/dotenv?sponsor=1"
}
},
+ "node_modules/dotenv-expand": {
+ "version": "10.0.0",
+ "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-10.0.0.tgz",
+ "integrity": "sha512-GopVGCpVS1UKH75VKHGuQFqS1Gusej0z4FyQkPdwjil2gNIv+LNsqBlboOzpJFZKVT95GkCyWJbBSdFEFUWI2A==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "dev": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/duplexer": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz",
@@ -5439,6 +4818,24 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/es-get-iterator": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz",
@@ -5465,6 +4862,33 @@
"integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
"dev": true
},
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/es-to-primitive": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
@@ -5942,9 +5366,9 @@
}
},
"node_modules/filelist/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -6076,13 +5500,15 @@
}
},
"node_modules/form-data": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
- "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
"dev": true,
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
"mime-types": "^2.1.12"
},
"engines": {
@@ -6137,10 +5563,13 @@
"dev": true
},
"node_modules/function-bind": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
- "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==",
- "dev": true
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
},
"node_modules/function.prototype.name": {
"version": "1.1.5",
@@ -6204,15 +5633,24 @@
}
},
"node_modules/get-intrinsic": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz",
- "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==",
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
"dev": true,
"dependencies": {
- "function-bind": "^1.1.1",
- "has": "^1.0.3",
- "has-proto": "^1.0.1",
- "has-symbols": "^1.0.3"
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
@@ -6248,6 +5686,19 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "dev": true,
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/get-stream": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.0.tgz",
@@ -6427,12 +5878,12 @@
}
},
"node_modules/gopd": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
- "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"dev": true,
- "dependencies": {
- "get-intrinsic": "^1.1.3"
+ "engines": {
+ "node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
@@ -6522,22 +5973,10 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/has-proto": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz",
- "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==",
- "dev": true,
- "engines": {
- "node": ">= 0.4"
- },
- "funding": {
- "url": "https://github.com/sponsors/ljharb"
- }
- },
"node_modules/has-symbols": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
- "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
"dev": true,
"engines": {
"node": ">= 0.4"
@@ -6547,12 +5986,12 @@
}
},
"node_modules/has-tostringtag": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz",
- "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==",
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
"dev": true,
"dependencies": {
- "has-symbols": "^1.0.2"
+ "has-symbols": "^1.0.3"
},
"engines": {
"node": ">= 0.4"
@@ -6567,6 +6006,18 @@
"integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==",
"dev": true
},
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "dev": true,
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/header-case": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/header-case/-/header-case-2.0.4.tgz",
@@ -6694,9 +6145,9 @@
}
},
"node_modules/ignore-walk/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -6844,16 +6295,16 @@
}
},
"node_modules/inquirer": {
- "version": "8.2.5",
- "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.5.tgz",
- "integrity": "sha512-QAgPDQMEgrDssk1XiwwHoOGYF9BAbUcc1+j+FhEvaOt8/cKRqyLn0U5qA6F74fGhTMGxf92pOvPBeh29jQJDTQ==",
+ "version": "8.2.7",
+ "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.7.tgz",
+ "integrity": "sha512-UjOaSel/iddGZJ5xP/Eixh6dY1XghiBw4XK13rCCIJcJfyhhoul/7KhLLUGtebEj6GDYM6Vnx/mVsjx2L/mFIA==",
"dev": true,
"dependencies": {
+ "@inquirer/external-editor": "^1.0.0",
"ansi-escapes": "^4.2.1",
"chalk": "^4.1.1",
"cli-cursor": "^3.1.0",
"cli-width": "^3.0.0",
- "external-editor": "^3.0.3",
"figures": "^3.0.0",
"lodash": "^4.17.21",
"mute-stream": "0.0.8",
@@ -6863,12 +6314,26 @@
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0",
"through": "^2.3.6",
- "wrap-ansi": "^7.0.0"
+ "wrap-ansi": "^6.0.1"
},
"engines": {
"node": ">=12.0.0"
}
},
+ "node_modules/inquirer/node_modules/wrap-ansi": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz",
+ "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
"node_modules/internal-slot": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.5.tgz",
@@ -6883,11 +6348,14 @@
"node": ">= 0.4"
}
},
- "node_modules/ip": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/ip/-/ip-2.0.1.tgz",
- "integrity": "sha512-lJUL9imLTNi1ZfXT+DU6rBBdbiKGBuay9B6xGSPVjUeQwaH1RIGqef8RZkUtHioLmSNpPR5M4HVKJGm1j8FWVQ==",
- "dev": true
+ "node_modules/ip-address": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.1.0.tgz",
+ "integrity": "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==",
+ "dev": true,
+ "engines": {
+ "node": ">= 12"
+ }
},
"node_modules/is-arguments": {
"version": "1.1.1",
@@ -7346,16 +6814,13 @@
}
},
"node_modules/jackspeak": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.0.tgz",
- "integrity": "sha512-uKmsITSsF4rUWQHzqaRUuyAir3fZfW3f202Ee34lz/gZCi970CPZwyQXLGNgWJvvZbvFyzeyGq0+4fcG/mBKZg==",
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz",
+ "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==",
"dev": true,
"dependencies": {
"@isaacs/cliui": "^8.0.2"
},
- "engines": {
- "node": ">=14"
- },
"funding": {
"url": "https://github.com/sponsors/isaacs"
},
@@ -7602,279 +7067,86 @@
"p-map": "4.0.0",
"p-map-series": "2.1.0",
"p-pipe": "3.1.0",
- "p-queue": "6.6.2",
- "p-reduce": "2.1.0",
- "p-waterfall": "2.1.1",
- "pacote": "^15.2.0",
- "pify": "5.0.0",
- "read-cmd-shim": "4.0.0",
- "read-package-json": "6.0.4",
- "resolve-from": "5.0.0",
- "rimraf": "^4.4.1",
- "semver": "^7.3.8",
- "signal-exit": "3.0.7",
- "slash": "3.0.0",
- "ssri": "^9.0.1",
- "strong-log-transformer": "2.1.0",
- "tar": "6.1.11",
- "temp-dir": "1.0.0",
- "typescript": ">=3 < 6",
- "upath": "2.0.1",
- "uuid": "^9.0.0",
- "validate-npm-package-license": "3.0.4",
- "validate-npm-package-name": "5.0.0",
- "write-file-atomic": "5.0.1",
- "write-pkg": "4.0.0",
- "yargs": "16.2.0",
- "yargs-parser": "20.2.4"
- },
- "bin": {
- "lerna": "dist/cli.js"
- },
- "engines": {
- "node": "^14.17.0 || >=16.0.0"
- }
- },
- "node_modules/lerna/node_modules/@nx/devkit": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/@nx/devkit/-/devkit-16.7.4.tgz",
- "integrity": "sha512-SLito+/TAeDYR+d7IIpp/sBJm41WM+nIevILv0TSQW4Pq0ylUy1nUvV8Pe7l1ohZccDrQuebMUWPwGO0hv8SeQ==",
- "dev": true,
- "dependencies": {
- "@nrwl/devkit": "16.7.4",
- "ejs": "^3.1.7",
- "enquirer": "~2.3.6",
- "ignore": "^5.0.4",
- "semver": "7.5.3",
- "tmp": "~0.2.1",
- "tslib": "^2.3.0"
- },
- "peerDependencies": {
- "nx": ">= 15 <= 17"
- }
- },
- "node_modules/lerna/node_modules/@nx/devkit/node_modules/semver": {
- "version": "7.5.3",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
- "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
- "dev": true,
- "dependencies": {
- "lru-cache": "^6.0.0"
- },
- "bin": {
- "semver": "bin/semver.js"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/lerna/node_modules/chalk": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
- "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
- "dev": true,
- "dependencies": {
- "ansi-styles": "^4.1.0",
- "supports-color": "^7.1.0"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/chalk/chalk?sponsor=1"
- }
- },
- "node_modules/lerna/node_modules/fast-glob": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.7.tgz",
- "integrity": "sha512-rYGMRwip6lUMvYD3BTScMwT1HtAs2d71SMv66Vrxs0IekGZEjhM0pcMfjQPnknBt2zeCwQMEupiN02ZP4DiT1Q==",
- "dev": true,
- "dependencies": {
- "@nodelib/fs.stat": "^2.0.2",
- "@nodelib/fs.walk": "^1.2.3",
- "glob-parent": "^5.1.2",
- "merge2": "^1.3.0",
- "micromatch": "^4.0.4"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/lerna/node_modules/glob": {
- "version": "7.1.4",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz",
- "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==",
- "dev": true,
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.0.4",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- },
- "engines": {
- "node": "*"
- }
- },
- "node_modules/lerna/node_modules/glob-parent": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
- "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
- "dev": true,
- "dependencies": {
- "is-glob": "^4.0.1"
- },
- "engines": {
- "node": ">= 6"
- }
- },
- "node_modules/lerna/node_modules/minimatch": {
- "version": "3.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.5.tgz",
- "integrity": "sha512-tUpxzX0VAzJHjLu0xUfFv1gwVp9ba3IOuRAVH2EGuRW8a5emA2FlACLqiT/lDVtS1W+TGNwqz3sWaNyLgDJWuw==",
- "dev": true,
- "dependencies": {
- "brace-expansion": "^1.1.7"
- },
- "engines": {
- "node": "*"
- }
- },
- "node_modules/lerna/node_modules/minipass": {
- "version": "4.2.8",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-4.2.8.tgz",
- "integrity": "sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ==",
- "dev": true,
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/lerna/node_modules/nx": {
- "version": "16.7.4",
- "resolved": "https://registry.npmjs.org/nx/-/nx-16.7.4.tgz",
- "integrity": "sha512-L0Cbikk5kO+IBH0UQ2BOAut5ndeHXBlACKzjOPOCluY8WYh2sxWYt9/N/juFBN3XXRX7ionTr1PhWUzNE0Mzqw==",
- "dev": true,
- "hasInstallScript": true,
- "dependencies": {
- "@nrwl/tao": "16.7.4",
- "@parcel/watcher": "2.0.4",
- "@yarnpkg/lockfile": "^1.1.0",
- "@yarnpkg/parsers": "3.0.0-rc.46",
- "@zkochan/js-yaml": "0.0.6",
- "axios": "^1.0.0",
- "chalk": "^4.1.0",
- "cli-cursor": "3.1.0",
- "cli-spinners": "2.6.1",
- "cliui": "^7.0.2",
- "dotenv": "~16.3.1",
- "enquirer": "~2.3.6",
- "fast-glob": "3.2.7",
- "figures": "3.2.0",
- "flat": "^5.0.2",
- "fs-extra": "^11.1.0",
- "glob": "7.1.4",
- "ignore": "^5.0.4",
- "js-yaml": "4.1.0",
- "jsonc-parser": "3.2.0",
- "lines-and-columns": "~2.0.3",
- "minimatch": "3.0.5",
- "node-machine-id": "1.1.12",
- "npm-run-path": "^4.0.1",
- "open": "^8.4.0",
- "semver": "7.5.3",
- "string-width": "^4.2.3",
- "strong-log-transformer": "^2.1.0",
- "tar-stream": "~2.2.0",
- "tmp": "~0.2.1",
- "tsconfig-paths": "^4.1.2",
- "tslib": "^2.3.0",
- "v8-compile-cache": "2.3.0",
- "yargs": "^17.6.2",
- "yargs-parser": "21.1.1"
+ "p-queue": "6.6.2",
+ "p-reduce": "2.1.0",
+ "p-waterfall": "2.1.1",
+ "pacote": "^15.2.0",
+ "pify": "5.0.0",
+ "read-cmd-shim": "4.0.0",
+ "read-package-json": "6.0.4",
+ "resolve-from": "5.0.0",
+ "rimraf": "^4.4.1",
+ "semver": "^7.3.8",
+ "signal-exit": "3.0.7",
+ "slash": "3.0.0",
+ "ssri": "^9.0.1",
+ "strong-log-transformer": "2.1.0",
+ "tar": "6.1.11",
+ "temp-dir": "1.0.0",
+ "typescript": ">=3 < 6",
+ "upath": "2.0.1",
+ "uuid": "^9.0.0",
+ "validate-npm-package-license": "3.0.4",
+ "validate-npm-package-name": "5.0.0",
+ "write-file-atomic": "5.0.1",
+ "write-pkg": "4.0.0",
+ "yargs": "16.2.0",
+ "yargs-parser": "20.2.4"
},
"bin": {
- "nx": "bin/nx.js"
- },
- "optionalDependencies": {
- "@nx/nx-darwin-arm64": "16.7.4",
- "@nx/nx-darwin-x64": "16.7.4",
- "@nx/nx-freebsd-x64": "16.7.4",
- "@nx/nx-linux-arm-gnueabihf": "16.7.4",
- "@nx/nx-linux-arm64-gnu": "16.7.4",
- "@nx/nx-linux-arm64-musl": "16.7.4",
- "@nx/nx-linux-x64-gnu": "16.7.4",
- "@nx/nx-linux-x64-musl": "16.7.4",
- "@nx/nx-win32-arm64-msvc": "16.7.4",
- "@nx/nx-win32-x64-msvc": "16.7.4"
- },
- "peerDependencies": {
- "@swc-node/register": "^1.4.2",
- "@swc/core": "^1.2.173"
+ "lerna": "dist/cli.js"
},
- "peerDependenciesMeta": {
- "@swc-node/register": {
- "optional": true
- },
- "@swc/core": {
- "optional": true
- }
+ "engines": {
+ "node": "^14.17.0 || >=16.0.0"
}
},
- "node_modules/lerna/node_modules/nx/node_modules/semver": {
- "version": "7.5.3",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz",
- "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==",
+ "node_modules/lerna/node_modules/chalk": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz",
+ "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==",
"dev": true,
"dependencies": {
- "lru-cache": "^6.0.0"
- },
- "bin": {
- "semver": "bin/semver.js"
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
}
},
- "node_modules/lerna/node_modules/nx/node_modules/yargs": {
- "version": "17.7.2",
- "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
- "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "node_modules/lerna/node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"dependencies": {
- "cliui": "^8.0.1",
- "escalade": "^3.1.1",
- "get-caller-file": "^2.0.5",
- "require-directory": "^2.1.1",
- "string-width": "^4.2.3",
- "y18n": "^5.0.5",
- "yargs-parser": "^21.1.1"
+ "is-glob": "^4.0.1"
},
"engines": {
- "node": ">=12"
+ "node": ">= 6"
}
},
- "node_modules/lerna/node_modules/nx/node_modules/yargs-parser": {
- "version": "21.1.1",
- "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
- "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "node_modules/lerna/node_modules/minimatch": {
+ "version": "3.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.5.tgz",
+ "integrity": "sha512-tUpxzX0VAzJHjLu0xUfFv1gwVp9ba3IOuRAVH2EGuRW8a5emA2FlACLqiT/lDVtS1W+TGNwqz3sWaNyLgDJWuw==",
"dev": true,
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
"engines": {
- "node": ">=12"
+ "node": "*"
}
},
- "node_modules/lerna/node_modules/nx/node_modules/yargs/node_modules/cliui": {
- "version": "8.0.1",
- "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
- "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "node_modules/lerna/node_modules/minipass": {
+ "version": "4.2.8",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-4.2.8.tgz",
+ "integrity": "sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ==",
"dev": true,
- "dependencies": {
- "string-width": "^4.2.0",
- "strip-ansi": "^6.0.1",
- "wrap-ansi": "^7.0.0"
- },
"engines": {
- "node": ">=12"
+ "node": ">=8"
}
},
"node_modules/lerna/node_modules/resolve-from": {
@@ -7905,9 +7177,9 @@
}
},
"node_modules/lerna/node_modules/rimraf/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -8473,6 +7745,15 @@
"integrity": "sha512-oEacRUVeTJ5D5hW1UYd2qExYI0oELdYK72k1TKGvIeYJIbqQWAz476NAc7LNixSySUhcNl++d02DvX0ccDk9/w==",
"dev": true
},
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/mdurl": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz",
@@ -9283,9 +8564,9 @@
}
},
"node_modules/npm-packlist/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -9503,9 +8784,9 @@
"dev": true
},
"node_modules/npm-run-all/node_modules/cross-spawn": {
- "version": "6.0.5",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz",
- "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
+ "version": "6.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.6.tgz",
+ "integrity": "sha512-VqCUuhcd1iB+dsv8gxPttb5iZh/D0iubSP21g36KXdEuf6I5JiioesUVjpCdHV9MZRUfVFlvwtIUyPfxo5trtw==",
"dev": true,
"dependencies": {
"nice-try": "^1.0.4",
@@ -9626,6 +8907,149 @@
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
+ "node_modules/nx": {
+ "version": "16.10.0",
+ "resolved": "https://registry.npmjs.org/nx/-/nx-16.10.0.tgz",
+ "integrity": "sha512-gZl4iCC0Hx0Qe1VWmO4Bkeul2nttuXdPpfnlcDKSACGu3ZIo+uySqwOF8yBAxSTIf8xe2JRhgzJN1aFkuezEBg==",
+ "dev": true,
+ "hasInstallScript": true,
+ "dependencies": {
+ "@nrwl/tao": "16.10.0",
+ "@parcel/watcher": "2.0.4",
+ "@yarnpkg/lockfile": "^1.1.0",
+ "@yarnpkg/parsers": "3.0.0-rc.46",
+ "@zkochan/js-yaml": "0.0.6",
+ "axios": "^1.0.0",
+ "chalk": "^4.1.0",
+ "cli-cursor": "3.1.0",
+ "cli-spinners": "2.6.1",
+ "cliui": "^8.0.1",
+ "dotenv": "~16.3.1",
+ "dotenv-expand": "~10.0.0",
+ "enquirer": "~2.3.6",
+ "figures": "3.2.0",
+ "flat": "^5.0.2",
+ "fs-extra": "^11.1.0",
+ "glob": "7.1.4",
+ "ignore": "^5.0.4",
+ "jest-diff": "^29.4.1",
+ "js-yaml": "4.1.0",
+ "jsonc-parser": "3.2.0",
+ "lines-and-columns": "~2.0.3",
+ "minimatch": "3.0.5",
+ "node-machine-id": "1.1.12",
+ "npm-run-path": "^4.0.1",
+ "open": "^8.4.0",
+ "semver": "7.5.3",
+ "string-width": "^4.2.3",
+ "strong-log-transformer": "^2.1.0",
+ "tar-stream": "~2.2.0",
+ "tmp": "~0.2.1",
+ "tsconfig-paths": "^4.1.2",
+ "tslib": "^2.3.0",
+ "v8-compile-cache": "2.3.0",
+ "yargs": "^17.6.2",
+ "yargs-parser": "21.1.1"
+ },
+ "bin": {
+ "nx": "bin/nx.js"
+ },
+ "optionalDependencies": {
+ "@nx/nx-darwin-arm64": "16.10.0",
+ "@nx/nx-darwin-x64": "16.10.0",
+ "@nx/nx-freebsd-x64": "16.10.0",
+ "@nx/nx-linux-arm-gnueabihf": "16.10.0",
+ "@nx/nx-linux-arm64-gnu": "16.10.0",
+ "@nx/nx-linux-arm64-musl": "16.10.0",
+ "@nx/nx-linux-x64-gnu": "16.10.0",
+ "@nx/nx-linux-x64-musl": "16.10.0",
+ "@nx/nx-win32-arm64-msvc": "16.10.0",
+ "@nx/nx-win32-x64-msvc": "16.10.0"
+ },
+ "peerDependencies": {
+ "@swc-node/register": "^1.6.7",
+ "@swc/core": "^1.3.85"
+ },
+ "peerDependenciesMeta": {
+ "@swc-node/register": {
+ "optional": true
+ },
+ "@swc/core": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/nx/node_modules/cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "dev": true,
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/nx/node_modules/glob": {
+ "version": "7.1.4",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz",
+ "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==",
+ "deprecated": "Glob versions prior to v9 are no longer supported",
+ "dev": true,
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/nx/node_modules/minimatch": {
+ "version": "3.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.5.tgz",
+ "integrity": "sha512-tUpxzX0VAzJHjLu0xUfFv1gwVp9ba3IOuRAVH2EGuRW8a5emA2FlACLqiT/lDVtS1W+TGNwqz3sWaNyLgDJWuw==",
+ "dev": true,
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/nx/node_modules/yargs": {
+ "version": "17.7.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
+ "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "dev": true,
+ "dependencies": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/nx/node_modules/yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ }
+ },
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
@@ -9912,6 +9336,12 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/package-json-from-dist": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz",
+ "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==",
+ "dev": true
+ },
"node_modules/pacote": {
"version": "15.2.0",
"resolved": "https://registry.npmjs.org/pacote/-/pacote-15.2.0.tgz",
@@ -9945,9 +9375,9 @@
}
},
"node_modules/pacote/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
@@ -10190,29 +9620,26 @@
"dev": true
},
"node_modules/path-scurry": {
- "version": "1.10.1",
- "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz",
- "integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==",
+ "version": "1.11.1",
+ "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz",
+ "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==",
"dev": true,
"dependencies": {
- "lru-cache": "^9.1.1 || ^10.0.0",
+ "lru-cache": "^10.2.0",
"minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
},
"engines": {
- "node": ">=16 || 14 >=14.17"
+ "node": ">=16 || 14 >=14.18"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/path-scurry/node_modules/lru-cache": {
- "version": "10.0.1",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.0.1.tgz",
- "integrity": "sha512-IJ4uwUTi2qCccrioU6g9g/5rvvVl13bsdczUUcqbciD9iLr095yj8DQKdObriEvuNSx325N1rV1O0sJFszx75g==",
- "dev": true,
- "engines": {
- "node": "14 || >=16.14"
- }
+ "version": "10.4.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
+ "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==",
+ "dev": true
},
"node_modules/path-type": {
"version": "4.0.0",
@@ -10673,31 +10100,29 @@
}
},
"node_modules/read-package-json/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
+ "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/read-package-json/node_modules/glob": {
- "version": "10.3.3",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.3.tgz",
- "integrity": "sha512-92vPiMb/iqpmEgsOoIDvTjc50wf9CCCvMzsi6W0JLPeUKE8TWP1a73PgqSrqy7iAZxaSD1YdzU7QZR5LF51MJw==",
+ "version": "10.5.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz",
+ "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==",
"dev": true,
"dependencies": {
"foreground-child": "^3.1.0",
- "jackspeak": "^2.0.3",
- "minimatch": "^9.0.1",
- "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0",
- "path-scurry": "^1.10.1"
+ "jackspeak": "^3.1.2",
+ "minimatch": "^9.0.4",
+ "minipass": "^7.1.2",
+ "package-json-from-dist": "^1.0.0",
+ "path-scurry": "^1.11.1"
},
"bin": {
- "glob": "dist/cjs/src/bin.js"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
+ "glob": "dist/esm/bin.mjs"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
@@ -10734,9 +10159,9 @@
}
},
"node_modules/read-package-json/node_modules/minimatch": {
- "version": "9.0.3",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
- "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
"dependencies": {
"brace-expansion": "^2.0.1"
@@ -10748,6 +10173,15 @@
"url": "https://github.com/sponsors/isaacs"
}
},
+ "node_modules/read-package-json/node_modules/minipass": {
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
+ "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
+ "dev": true,
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ }
+ },
"node_modules/read-package-json/node_modules/normalize-package-data": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-5.0.0.tgz",
@@ -10987,12 +10421,6 @@
"node": ">=8"
}
},
- "node_modules/regenerator-runtime": {
- "version": "0.14.0",
- "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz",
- "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==",
- "dev": true
- },
"node_modules/regexp-tree": {
"version": "0.1.24",
"resolved": "https://registry.npmjs.org/regexp-tree/-/regexp-tree-0.1.24.tgz",
@@ -11389,16 +10817,16 @@
}
},
"node_modules/socks": {
- "version": "2.7.1",
- "resolved": "https://registry.npmjs.org/socks/-/socks-2.7.1.tgz",
- "integrity": "sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ==",
+ "version": "2.8.7",
+ "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.7.tgz",
+ "integrity": "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==",
"dev": true,
"dependencies": {
- "ip": "^2.0.0",
+ "ip-address": "^10.0.1",
"smart-buffer": "^4.2.0"
},
"engines": {
- "node": ">= 10.13.0",
+ "node": ">= 10.0.0",
"npm": ">= 3.0.0"
}
},
@@ -11891,15 +11319,12 @@
}
},
"node_modules/tmp": {
- "version": "0.2.1",
- "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz",
- "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==",
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz",
+ "integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==",
"dev": true,
- "dependencies": {
- "rimraf": "^3.0.0"
- },
"engines": {
- "node": ">=8.17.0"
+ "node": ">=14.14"
}
},
"node_modules/to-regex-range": {
diff --git a/packages/react/src/pool/update-node-comms-action.tsx b/packages/react/src/pool/update-node-comms-action.tsx
index 9db9040e0d..c817e4e5d0 100644
--- a/packages/react/src/pool/update-node-comms-action.tsx
+++ b/packages/react/src/pool/update-node-comms-action.tsx
@@ -5,8 +5,8 @@ import {
ValidationStatus,
} from "@azure/bonito-core/lib/form";
import {
+ LegacyPoolOutput,
NodeCommunicationMode,
- PoolOutput,
PoolService,
} from "@batch/ui-service/lib/pool";
import { Link } from "@fluentui/react/lib/Link";
@@ -36,10 +36,10 @@ export class UpdateNodeCommsAction extends AbstractAction {
- this._pool = await this._poolService.get(this._poolResourceId, {
+ this._pool = await this._poolService.getLegacy(this._poolResourceId, {
commandName: "UpdateNodeCommsAction/GetPool",
});
@@ -135,7 +135,7 @@ export class UpdateNodeCommsAction extends AbstractAction {
- await this._poolService.patch(
+ await this._poolService.patchLegacy(
this._poolResourceId,
{
id: this._poolResourceId,
diff --git a/packages/service/generate-client.ps1 b/packages/service/generate-client.ps1
index 52849c9c1a..ebcfc29ddf 100644
--- a/packages/service/generate-client.ps1
+++ b/packages/service/generate-client.ps1
@@ -8,6 +8,10 @@ $options = (
"generateTest=false; "
)
+#Sample commands to generate client from a local PR branch which is not merged to azure-rest-api-specs repo main branch yet
+#npx tsp-client sync --local-spec-repo C:\git\azure-rest-api-specs\specification\batch\Azure.Batch --output-dir "./src/internal/batch-rest/generated"
+#npx tsp-client generate --emitter-options $options --output-dir "./src/internal/batch-rest/generated"
+
npx tsp-client update `
--emitter-options $options `
--output-dir "./src/internal/batch-rest/generated"
diff --git a/packages/service/readme.md b/packages/service/readme.md
index 825da26c14..8788f9a096 100644
--- a/packages/service/readme.md
+++ b/packages/service/readme.md
@@ -32,6 +32,13 @@ This will execute `generate-client.ps1` which runs `tsp-client` command to fetch
- `src/internal/batch-rest/generated/src` - This holds the autogenerated code such as obtaining the client and Batch specific modeless interfaces. These files should generally not be modified manually.
+To generate Batch Data Plane RLC on a PR branch for an api version that is not merged to azure-rest-api-branch repo main branch yet, you can run tsp-client "sync" and "generate" command separately using a local PR branch of azure-rest-api-branch repo that contains the new api version, use sample commands in generate-client.ps1 as shown below:
+
+```shell
+npx tsp-client sync --local-spec-repo C:\git\azure-rest-api-specs\specification\batch\Azure.Batch --output-dir "./src/internal/batch-rest/generated"
+npx tsp-client generate --emitter-options $options --output-dir "./src/internal/batch-rest/generated"
+```
+
## Generating the Batch Management RLC
Batch Management RLC is generated based on swagger files by autorest. First make sure `input-file` in `swagger/README.md` is pointing to the latest API version. Then then run the following from the root of the `service` package:
@@ -42,4 +49,9 @@ npm run generate:arm-client
This will parse the command options from the README and regenerate the SDK code
+To generate Batch Management RLC for on a PR branch for an api version that is not merged to azure-rest-api-branch repo main branch yet, you can specify the PR branch name in input-file as shown below:
+
+- `https://raw.githubusercontent.com/Azure/azure-rest-api-specs/Batch-MagementPlane-2025-06-01/specification/batch/resource-manager/Microsoft.Batch/stable/2025-06-01/BatchManagement.json`
+- `https://raw.githubusercontent.com/Azure/azure-rest-api-specs/Batch-MagementPlane-2025-06-01/specification/batch/resource-manager/Microsoft.Batch/stable/2025-06-01/NetworkSecurityPerimeter.json`
+
- `src/internal/arm-batch-rest/generated` - This holds the swagger autogenerated code such as obtaining the client and Batch specific modeless interfaces. These files should generally not be modified manually.
diff --git a/packages/service/src/account/__tests__/fake-account-service.spec.ts b/packages/service/src/account/__tests__/fake-account-service.spec.ts
index 65a1dfa55d..4521ef32d7 100644
--- a/packages/service/src/account/__tests__/fake-account-service.spec.ts
+++ b/packages/service/src/account/__tests__/fake-account-service.spec.ts
@@ -26,4 +26,30 @@ describe("FakeAccountService", () => {
);
expect(account).toBeUndefined();
});
+
+ test("should patch account by ARM resource ID", async () => {
+ const account = await service.patch(hoboAcctResId, {
+ properties: {
+ publicNetworkAccess: "SecuredByPerimeter",
+ },
+ tags: { foo: "bar" },
+ });
+
+ expect(account?.name).toEqual("hobo");
+ expect(account?.properties?.publicNetworkAccess).toEqual(
+ "SecuredByPerimeter"
+ );
+ expect(account?.tags).toEqual({ foo: "bar" });
+ });
+
+ test("should list network security perimeter configurations", async () => {
+ const config =
+ await service.listNetworkSecurityPerimeterConfigurations(
+ hoboAcctResId
+ );
+ expect(config?.value?.length).toEqual(1);
+ expect(config?.value?.[0].name).toEqual(
+ "00000000-0000-0000-0000-000000000000.resourceAssociationName"
+ );
+ });
});
diff --git a/packages/service/src/account/__tests__/live-account-service.spec.ts b/packages/service/src/account/__tests__/live-account-service.spec.ts
index 16ac5675eb..92c4a89858 100644
--- a/packages/service/src/account/__tests__/live-account-service.spec.ts
+++ b/packages/service/src/account/__tests__/live-account-service.spec.ts
@@ -96,4 +96,102 @@ describe("LiveAccountService", () => {
const account = await service.get(hoboAcctResId);
expect(account).toBeUndefined();
});
+
+ test("Patch account successfully", async () => {
+ const oriAccount = fakeSet.getBatchAccount(hoboAcctResId);
+ const patchBody = { tags: { key123: "value123" } };
+ const mockResponse = {
+ ...oriAccount,
+ ...patchBody,
+ };
+
+ httpClient.addExpected(
+ new MockHttpResponse(
+ `${getArmUrl()}${hoboAcctResId}?api-version=${BatchApiVersion.arm}`,
+ {
+ status: 200,
+ body: JSON.stringify(mockResponse),
+ }
+ ),
+ {
+ method: "PATCH",
+ body: JSON.stringify(patchBody),
+ }
+ );
+
+ const result = await service.patch(hoboAcctResId, patchBody);
+ expect(result).toBeDefined();
+ expect(result?.tags).toEqual({ key123: "value123" });
+ });
+
+ test("Patch account returns undefined if not found", async () => {
+ const patchBody = { tags: { key: "value" } };
+
+ httpClient.addExpected(
+ new MockHttpResponse(
+ `${getArmUrl()}${hoboAcctResId}?api-version=${BatchApiVersion.arm}`,
+ {
+ status: 404,
+ body: "Not found",
+ }
+ ),
+ {
+ method: "PATCH",
+ body: JSON.stringify(patchBody),
+ }
+ );
+
+ const result = await service.patch(hoboAcctResId, patchBody);
+ expect(result).toBeUndefined();
+ });
+
+ test("Patch account throws error on unexpected status", async () => {
+ const patchBody = { tags: { key: "value" } };
+
+ httpClient.addExpected(
+ new MockHttpResponse(
+ `${getArmUrl()}${hoboAcctResId}?api-version=${BatchApiVersion.arm}`,
+ {
+ status: 500,
+ body: "Internal Server Error",
+ }
+ ),
+ {
+ method: "PATCH",
+ body: JSON.stringify(patchBody),
+ }
+ );
+
+ await expect(() => service.patch(hoboAcctResId, patchBody)).rejects
+ .toMatchInlineSnapshot(`
+ [Error: The Batch management plane returned an unexpected status code [unexpected 500 status]
+ Response body:
+ "Internal Server Error"]
+ `);
+ });
+
+ test("List network security perimeter configurations", async () => {
+ const mockConfig =
+ fakeSet.listNetworkSecurityPerimeterConfigurations(hoboAcctResId);
+
+ httpClient.addExpected(
+ new MockHttpResponse(
+ `${getArmUrl()}${hoboAcctResId}/networkSecurityPerimeterConfigurations?api-version=${BatchApiVersion.arm}`,
+ {
+ status: 200,
+ body: JSON.stringify(mockConfig),
+ }
+ )
+ );
+
+ const config =
+ await service.listNetworkSecurityPerimeterConfigurations(
+ hoboAcctResId
+ );
+ expect(config?.value?.length).toEqual(1);
+
+ expect(config?.value?.[0].name).toEqual(
+ "00000000-0000-0000-0000-000000000000.resourceAssociationName"
+ );
+ });
});
diff --git a/packages/service/src/account/account-service.ts b/packages/service/src/account/account-service.ts
index d65868783e..1f096054cf 100644
--- a/packages/service/src/account/account-service.ts
+++ b/packages/service/src/account/account-service.ts
@@ -1,5 +1,9 @@
import { OperationOptions } from "@azure/bonito-core";
-import { BatchAccountOutput } from "../arm-batch-models";
+import {
+ AccountBatchUpdateParameters,
+ BatchAccountOutput,
+ NetworkSecurityPerimeterConfigurationListResultOutput,
+} from "../arm-batch-models";
export interface AccountService {
/**
@@ -11,4 +15,26 @@ export interface AccountService {
accountResouceId: string,
opts?: OperationOptions
): Promise;
+
+ /**
+ * Update a batch account, return undefined if not found
+ * @param accountResouceId The resource id of the account
+ * @param parameters The parameters to update the account with
+ * @param opts
+ */
+ patch(
+ accountResouceId: string,
+ parameters: AccountBatchUpdateParameters,
+ opts?: OperationOptions
+ ): Promise;
+
+ /**
+ * list the network security perimeter configuration of a batch account
+ * @param accountResouceId The resource id of the account
+ * @param opts
+ */
+ listNetworkSecurityPerimeterConfigurations(
+ accountResouceId: string,
+ opts?: OperationOptions
+ ): Promise;
}
diff --git a/packages/service/src/account/fake-account-service.ts b/packages/service/src/account/fake-account-service.ts
index bb2faa18c1..17c47bcfd1 100644
--- a/packages/service/src/account/fake-account-service.ts
+++ b/packages/service/src/account/fake-account-service.ts
@@ -1,5 +1,9 @@
import { OperationOptions } from "@azure/bonito-core";
-import { BatchAccountOutput } from "../arm-batch-models";
+import {
+ AccountBatchUpdateParameters,
+ BatchAccountOutput,
+ NetworkSecurityPerimeterConfigurationListResultOutput,
+} from "../arm-batch-models";
import { AccountService } from "./account-service";
import { BatchFakeSet, BasicBatchFakeSet } from "../test-util/fakes";
@@ -17,4 +21,27 @@ export class FakeAccountService implements AccountService {
const result = this.fakeSet.getBatchAccount(accountResouceId);
return result;
}
+
+ async patch(
+ accountResouceId: string,
+ parameters: AccountBatchUpdateParameters,
+ opts?: OperationOptions
+ ): Promise {
+ const result = this.fakeSet.patchBatchAccount(
+ accountResouceId,
+ parameters
+ );
+ return result;
+ }
+
+ async listNetworkSecurityPerimeterConfigurations(
+ accountResouceId: string,
+ opts?: OperationOptions | undefined
+ ): Promise {
+ const result =
+ this.fakeSet.listNetworkSecurityPerimeterConfigurations(
+ accountResouceId
+ );
+ return result;
+ }
}
diff --git a/packages/service/src/account/live-account-service.ts b/packages/service/src/account/live-account-service.ts
index a6cff35449..513c1d09a4 100644
--- a/packages/service/src/account/live-account-service.ts
+++ b/packages/service/src/account/live-account-service.ts
@@ -4,7 +4,11 @@ import {
getArmUrl,
getCacheManager,
} from "@azure/bonito-core";
-import { BatchAccountOutput } from "../arm-batch-models";
+import {
+ AccountBatchUpdateParameters,
+ BatchAccountOutput,
+ NetworkSecurityPerimeterConfigurationListResultOutput,
+} from "../arm-batch-models";
import { AccountService } from "./account-service";
import { createARMBatchClient, isUnexpected } from "../internal/arm-batch-rest";
import {
@@ -53,4 +57,72 @@ export class LiveAccountService implements AccountService {
};
return cacheManager.getOrAdd(cacheKey, _get, { bypassCache });
}
+
+ async patch(
+ accountResouceId: string,
+ body: AccountBatchUpdateParameters,
+ opts?: OperationOptions
+ ): Promise {
+ const { subscriptionId, resourceGroupName, batchAccountName } =
+ parseBatchAccountIdInfo(accountResouceId);
+ const armBatchClient = createARMBatchClient({
+ baseUrl: getArmUrl(),
+ });
+
+ const res = await armBatchClient
+ .path(
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}",
+ subscriptionId,
+ resourceGroupName,
+ batchAccountName
+ )
+ .patch({
+ body,
+ headers: {
+ [CustomHttpHeaders.CommandName]:
+ opts?.commandName ?? "PatchAccount",
+ },
+ });
+
+ if (isUnexpected(res)) {
+ if (res.status === "404") {
+ return undefined;
+ }
+ throw createArmUnexpectedStatusCodeError(res);
+ }
+
+ return res.body;
+ }
+
+ async listNetworkSecurityPerimeterConfigurations(
+ accountResouceId: string,
+ opts?: OperationOptions
+ ): Promise {
+ const { subscriptionId, resourceGroupName, batchAccountName } =
+ parseBatchAccountIdInfo(accountResouceId);
+ const armBatchClient = createARMBatchClient({
+ baseUrl: getArmUrl(),
+ });
+
+ const res = await armBatchClient
+ .path(
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/networkSecurityPerimeterConfigurations",
+ subscriptionId,
+ resourceGroupName,
+ batchAccountName
+ )
+ .get({
+ headers: {
+ [CustomHttpHeaders.CommandName]:
+ opts?.commandName ??
+ "ListNetworkSecurityPerimeterConfigurations",
+ },
+ });
+
+ if (isUnexpected(res)) {
+ throw createArmUnexpectedStatusCodeError(res);
+ }
+
+ return res.body;
+ }
}
diff --git a/packages/service/src/arm-batch-models.ts b/packages/service/src/arm-batch-models.ts
index bae0d6077c..9463966e39 100644
--- a/packages/service/src/arm-batch-models.ts
+++ b/packages/service/src/arm-batch-models.ts
@@ -1 +1,2 @@
export * from "./internal/arm-batch-rest/generated/outputModels";
+export * from "./internal/arm-batch-rest/generated/models";
diff --git a/packages/service/src/batch-models.ts b/packages/service/src/batch-models.ts
index 989730a1a4..aa5a075fa9 100644
--- a/packages/service/src/batch-models.ts
+++ b/packages/service/src/batch-models.ts
@@ -1 +1,2 @@
export * from "./internal/batch-rest/generated/src/outputModels";
+export * from "./internal/batch-rest/generated/src/models";
diff --git a/packages/service/src/constants.ts b/packages/service/src/constants.ts
index b14a8ea56d..0e7b028181 100644
--- a/packages/service/src/constants.ts
+++ b/packages/service/src/constants.ts
@@ -1,4 +1,4 @@
export const BatchApiVersion = {
- arm: `2024-07-01`,
- data: `2024-07-01.20.0`,
+ arm: `2025-06-01`,
+ data: `2025-06-01`,
};
diff --git a/packages/service/src/internal/arm-batch-rest/generated/batchManagementClient.ts b/packages/service/src/internal/arm-batch-rest/generated/batchManagementClient.ts
index bc1adc0e7a..1d98f102db 100644
--- a/packages/service/src/internal/arm-batch-rest/generated/batchManagementClient.ts
+++ b/packages/service/src/internal/arm-batch-rest/generated/batchManagementClient.ts
@@ -14,7 +14,7 @@ export default function createClient(
options: ClientOptions = {}
): BatchManagementClient {
const baseUrl = options.baseUrl ?? `https://management.azure.com`;
- options.apiVersion = options.apiVersion ?? "2024-07-01";
+ options.apiVersion = options.apiVersion ?? "2025-06-01";
options = {
...options,
credentials: {
diff --git a/packages/service/src/internal/arm-batch-rest/generated/clientDefinitions.ts b/packages/service/src/internal/arm-batch-rest/generated/clientDefinitions.ts
index 3979826183..6cc908a886 100644
--- a/packages/service/src/internal/arm-batch-rest/generated/clientDefinitions.ts
+++ b/packages/service/src/internal/arm-batch-rest/generated/clientDefinitions.ts
@@ -28,12 +28,6 @@ import {
LocationListSupportedVirtualMachineSkusParameters,
LocationCheckNameAvailabilityParameters,
OperationsListParameters,
- CertificateListByBatchAccountParameters,
- CertificateCreateParameters,
- CertificateUpdateParameters,
- CertificateDeleteParameters,
- CertificateGetParameters,
- CertificateCancelDeletionParameters,
PrivateLinkResourceListByBatchAccountParameters,
PrivateLinkResourceGetParameters,
PrivateEndpointConnectionListByBatchAccountParameters,
@@ -46,7 +40,10 @@ import {
PoolDeleteParameters,
PoolGetParameters,
PoolDisableAutoScaleParameters,
- PoolStopResizeParameters
+ PoolStopResizeParameters,
+ NetworkSecurityPerimeterListConfigurationsParameters,
+ NetworkSecurityPerimeterGetConfigurationParameters,
+ NetworkSecurityPerimeterReconcileConfigurationParameters
} from "./parameters";
import {
BatchAccountCreate200Response,
@@ -106,20 +103,6 @@ import {
LocationCheckNameAvailabilityDefaultResponse,
OperationsList200Response,
OperationsListDefaultResponse,
- CertificateListByBatchAccount200Response,
- CertificateListByBatchAccountDefaultResponse,
- CertificateCreate200Response,
- CertificateCreateDefaultResponse,
- CertificateUpdate200Response,
- CertificateUpdateDefaultResponse,
- CertificateDelete200Response,
- CertificateDelete202Response,
- CertificateDelete204Response,
- CertificateDeleteDefaultResponse,
- CertificateGet200Response,
- CertificateGetDefaultResponse,
- CertificateCancelDeletion200Response,
- CertificateCancelDeletionDefaultResponse,
PrivateLinkResourceListByBatchAccount200Response,
PrivateLinkResourceListByBatchAccountDefaultResponse,
PrivateLinkResourceGet200Response,
@@ -149,7 +132,13 @@ import {
PoolDisableAutoScale200Response,
PoolDisableAutoScaleDefaultResponse,
PoolStopResize200Response,
- PoolStopResizeDefaultResponse
+ PoolStopResizeDefaultResponse,
+ NetworkSecurityPerimeterListConfigurations200Response,
+ NetworkSecurityPerimeterListConfigurationsDefaultResponse,
+ NetworkSecurityPerimeterGetConfiguration200Response,
+ NetworkSecurityPerimeterGetConfigurationDefaultResponse,
+ NetworkSecurityPerimeterReconcileConfiguration202Response,
+ NetworkSecurityPerimeterReconcileConfigurationDefaultResponse
} from "./responses";
import { Client, StreamableMethod } from "@azure-rest/core-client";
@@ -253,7 +242,7 @@ export interface BatchAccountGetDetector {
}
export interface BatchAccountListOutboundNetworkDependenciesEndpoints {
- /** Lists the endpoints that a Batch Compute Node under this Batch Account may call as part of Batch service administration. If you are deploying a Pool inside of a virtual network that you specify, you must make sure your network allows outbound access to these endpoints. Failure to allow access to these endpoints may cause Batch to mark the affected nodes as unusable. For more information about creating a pool inside of a virtual network, see https://docs.microsoft.com/en-us/azure/batch/batch-virtual-network. */
+ /** Lists the endpoints that a Batch Compute Node under this Batch Account may call as part of Batch service administration. If you are deploying a Pool inside of a virtual network that you specify, you must make sure your network allows outbound access to these endpoints. Failure to allow access to these endpoints may cause Batch to mark the affected nodes as unusable. For more information about creating a pool inside of a virtual network, see https://learn.microsoft.com/azure/batch/batch-virtual-network. */
get(
options?: BatchAccountListOutboundNetworkDependenciesEndpointsParameters
): StreamableMethod<
@@ -381,60 +370,6 @@ export interface OperationsList {
>;
}
-export interface CertificateListByBatchAccount {
- /** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
- get(
- options?: CertificateListByBatchAccountParameters
- ): StreamableMethod<
- | CertificateListByBatchAccount200Response
- | CertificateListByBatchAccountDefaultResponse
- >;
-}
-
-export interface CertificateCreate {
- /** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
- put(
- options: CertificateCreateParameters
- ): StreamableMethod<
- CertificateCreate200Response | CertificateCreateDefaultResponse
- >;
- /** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
- patch(
- options: CertificateUpdateParameters
- ): StreamableMethod<
- CertificateUpdate200Response | CertificateUpdateDefaultResponse
- >;
- /** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
- delete(
- options?: CertificateDeleteParameters
- ): StreamableMethod<
- | CertificateDelete200Response
- | CertificateDelete202Response
- | CertificateDelete204Response
- | CertificateDeleteDefaultResponse
- >;
- /** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
- get(
- options?: CertificateGetParameters
- ): StreamableMethod<
- CertificateGet200Response | CertificateGetDefaultResponse
- >;
-}
-
-export interface CertificateCancelDeletion {
- /**
- * If you try to delete a certificate that is being used by a pool or compute node, the status of the certificate changes to deleteFailed. If you decide that you want to continue using the certificate, you can use this operation to set the status of the certificate back to active. If you intend to delete the certificate, you do not need to run this operation after the deletion failed. You must make sure that the certificate is not being used by any resources, and then you can try again to delete the certificate.
- *
- * Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead.
- */
- post(
- options?: CertificateCancelDeletionParameters
- ): StreamableMethod<
- | CertificateCancelDeletion200Response
- | CertificateCancelDeletionDefaultResponse
- >;
-}
-
export interface PrivateLinkResourceListByBatchAccount {
/** Lists all of the private link resources in the specified account. */
get(
@@ -541,6 +476,36 @@ export interface PoolStopResize {
>;
}
+export interface NetworkSecurityPerimeterListConfigurations {
+ /** Lists all of the NSP configurations in the specified account. */
+ get(
+ options?: NetworkSecurityPerimeterListConfigurationsParameters
+ ): StreamableMethod<
+ | NetworkSecurityPerimeterListConfigurations200Response
+ | NetworkSecurityPerimeterListConfigurationsDefaultResponse
+ >;
+}
+
+export interface NetworkSecurityPerimeterGetConfiguration {
+ /** Gets information about the specified NSP configuration. */
+ get(
+ options?: NetworkSecurityPerimeterGetConfigurationParameters
+ ): StreamableMethod<
+ | NetworkSecurityPerimeterGetConfiguration200Response
+ | NetworkSecurityPerimeterGetConfigurationDefaultResponse
+ >;
+}
+
+export interface NetworkSecurityPerimeterReconcileConfiguration {
+ /** Reconciles the specified NSP configuration. */
+ post(
+ options?: NetworkSecurityPerimeterReconcileConfigurationParameters
+ ): StreamableMethod<
+ | NetworkSecurityPerimeterReconcileConfiguration202Response
+ | NetworkSecurityPerimeterReconcileConfigurationDefaultResponse
+ >;
+}
+
export interface Routes {
/** Resource for '/subscriptions/\{subscriptionId\}/resourceGroups/\{resourceGroupName\}/providers/Microsoft.Batch/batchAccounts/\{accountName\}' has methods for the following verbs: put, patch, delete, get */
(
@@ -664,29 +629,6 @@ export interface Routes {
): LocationCheckNameAvailability;
/** Resource for '/providers/Microsoft.Batch/operations' has methods for the following verbs: get */
(path: "/providers/Microsoft.Batch/operations"): OperationsList;
- /** Resource for '/subscriptions/\{subscriptionId\}/resourceGroups/\{resourceGroupName\}/providers/Microsoft.Batch/batchAccounts/\{accountName\}/certificates' has methods for the following verbs: get */
- (
- path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates",
- subscriptionId: string,
- resourceGroupName: string,
- accountName: string
- ): CertificateListByBatchAccount;
- /** Resource for '/subscriptions/\{subscriptionId\}/resourceGroups/\{resourceGroupName\}/providers/Microsoft.Batch/batchAccounts/\{accountName\}/certificates/\{certificateName\}' has methods for the following verbs: put, patch, delete, get */
- (
- path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}",
- subscriptionId: string,
- resourceGroupName: string,
- accountName: string,
- certificateName: string
- ): CertificateCreate;
- /** Resource for '/subscriptions/\{subscriptionId\}/resourceGroups/\{resourceGroupName\}/providers/Microsoft.Batch/batchAccounts/\{accountName\}/certificates/\{certificateName\}/cancelDelete' has methods for the following verbs: post */
- (
- path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}/cancelDelete",
- subscriptionId: string,
- resourceGroupName: string,
- accountName: string,
- certificateName: string
- ): CertificateCancelDeletion;
/** Resource for '/subscriptions/\{subscriptionId\}/resourceGroups/\{resourceGroupName\}/providers/Microsoft.Batch/batchAccounts/\{accountName\}/privateLinkResources' has methods for the following verbs: get */
(
path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateLinkResources",
@@ -748,6 +690,29 @@ export interface Routes {
accountName: string,
poolName: string
): PoolStopResize;
+ /** Resource for '/subscriptions/\{subscriptionId\}/resourceGroups/\{resourceGroupName\}/providers/Microsoft.Batch/batchAccounts/\{accountName\}/networkSecurityPerimeterConfigurations' has methods for the following verbs: get */
+ (
+ path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/networkSecurityPerimeterConfigurations",
+ subscriptionId: string,
+ resourceGroupName: string,
+ accountName: string
+ ): NetworkSecurityPerimeterListConfigurations;
+ /** Resource for '/subscriptions/\{subscriptionId\}/resourceGroups/\{resourceGroupName\}/providers/Microsoft.Batch/batchAccounts/\{accountName\}/networkSecurityPerimeterConfigurations/\{networkSecurityPerimeterConfigurationName\}' has methods for the following verbs: get */
+ (
+ path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/networkSecurityPerimeterConfigurations/{networkSecurityPerimeterConfigurationName}",
+ subscriptionId: string,
+ resourceGroupName: string,
+ accountName: string,
+ networkSecurityPerimeterConfigurationName: string
+ ): NetworkSecurityPerimeterGetConfiguration;
+ /** Resource for '/subscriptions/\{subscriptionId\}/resourceGroups/\{resourceGroupName\}/providers/Microsoft.Batch/batchAccounts/\{accountName\}/networkSecurityPerimeterConfigurations/\{networkSecurityPerimeterConfigurationName\}/reconcile' has methods for the following verbs: post */
+ (
+ path: "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/networkSecurityPerimeterConfigurations/{networkSecurityPerimeterConfigurationName}/reconcile",
+ subscriptionId: string,
+ resourceGroupName: string,
+ accountName: string,
+ networkSecurityPerimeterConfigurationName: string
+ ): NetworkSecurityPerimeterReconcileConfiguration;
}
export type BatchManagementClient = Client & {
diff --git a/packages/service/src/internal/arm-batch-rest/generated/isUnexpected.ts b/packages/service/src/internal/arm-batch-rest/generated/isUnexpected.ts
index 9c8b0add9d..c86392a880 100644
--- a/packages/service/src/internal/arm-batch-rest/generated/isUnexpected.ts
+++ b/packages/service/src/internal/arm-batch-rest/generated/isUnexpected.ts
@@ -59,20 +59,6 @@ import {
LocationCheckNameAvailabilityDefaultResponse,
OperationsList200Response,
OperationsListDefaultResponse,
- CertificateListByBatchAccount200Response,
- CertificateListByBatchAccountDefaultResponse,
- CertificateCreate200Response,
- CertificateCreateDefaultResponse,
- CertificateUpdate200Response,
- CertificateUpdateDefaultResponse,
- CertificateDelete200Response,
- CertificateDelete202Response,
- CertificateDelete204Response,
- CertificateDeleteDefaultResponse,
- CertificateGet200Response,
- CertificateGetDefaultResponse,
- CertificateCancelDeletion200Response,
- CertificateCancelDeletionDefaultResponse,
PrivateLinkResourceListByBatchAccount200Response,
PrivateLinkResourceListByBatchAccountDefaultResponse,
PrivateLinkResourceGet200Response,
@@ -102,7 +88,13 @@ import {
PoolDisableAutoScale200Response,
PoolDisableAutoScaleDefaultResponse,
PoolStopResize200Response,
- PoolStopResizeDefaultResponse
+ PoolStopResizeDefaultResponse,
+ NetworkSecurityPerimeterListConfigurations200Response,
+ NetworkSecurityPerimeterListConfigurationsDefaultResponse,
+ NetworkSecurityPerimeterGetConfiguration200Response,
+ NetworkSecurityPerimeterGetConfigurationDefaultResponse,
+ NetworkSecurityPerimeterReconcileConfiguration202Response,
+ NetworkSecurityPerimeterReconcileConfigurationDefaultResponse
} from "./responses";
const responseMap: Record = {
@@ -187,26 +179,6 @@ const responseMap: Record = {
"200"
],
"GET /providers/Microsoft.Batch/operations": ["200"],
- "GET /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates": [
- "200"
- ],
- "PUT /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}": [
- "200"
- ],
- "PATCH /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}": [
- "200"
- ],
- "DELETE /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}": [
- "200",
- "202",
- "204"
- ],
- "GET /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}": [
- "200"
- ],
- "POST /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}/cancelDelete": [
- "200"
- ],
"GET /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/privateLinkResources": [
"200"
],
@@ -249,6 +221,18 @@ const responseMap: Record = {
],
"POST /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/stopResize": [
"200"
+ ],
+ "GET /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/networkSecurityPerimeterConfigurations": [
+ "200"
+ ],
+ "GET /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/networkSecurityPerimeterConfigurations/{networkSecurityPerimeterConfigurationName}": [
+ "200"
+ ],
+ "POST /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/networkSecurityPerimeterConfigurations/{networkSecurityPerimeterConfigurationName}/reconcile": [
+ "202"
+ ],
+ "GET /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/networkSecurityPerimeterConfigurations/{networkSecurityPerimeterConfigurationName}/reconcile": [
+ "202"
]
};
@@ -367,32 +351,6 @@ export function isUnexpected(
export function isUnexpected(
response: OperationsList200Response | OperationsListDefaultResponse
): response is OperationsListDefaultResponse;
-export function isUnexpected(
- response:
- | CertificateListByBatchAccount200Response
- | CertificateListByBatchAccountDefaultResponse
-): response is CertificateListByBatchAccountDefaultResponse;
-export function isUnexpected(
- response: CertificateCreate200Response | CertificateCreateDefaultResponse
-): response is CertificateCreateDefaultResponse;
-export function isUnexpected(
- response: CertificateUpdate200Response | CertificateUpdateDefaultResponse
-): response is CertificateUpdateDefaultResponse;
-export function isUnexpected(
- response:
- | CertificateDelete200Response
- | CertificateDelete202Response
- | CertificateDelete204Response
- | CertificateDeleteDefaultResponse
-): response is CertificateDeleteDefaultResponse;
-export function isUnexpected(
- response: CertificateGet200Response | CertificateGetDefaultResponse
-): response is CertificateGetDefaultResponse;
-export function isUnexpected(
- response:
- | CertificateCancelDeletion200Response
- | CertificateCancelDeletionDefaultResponse
-): response is CertificateCancelDeletionDefaultResponse;
export function isUnexpected(
response:
| PrivateLinkResourceListByBatchAccount200Response
@@ -454,6 +412,21 @@ export function isUnexpected(
export function isUnexpected(
response: PoolStopResize200Response | PoolStopResizeDefaultResponse
): response is PoolStopResizeDefaultResponse;
+export function isUnexpected(
+ response:
+ | NetworkSecurityPerimeterListConfigurations200Response
+ | NetworkSecurityPerimeterListConfigurationsDefaultResponse
+): response is NetworkSecurityPerimeterListConfigurationsDefaultResponse;
+export function isUnexpected(
+ response:
+ | NetworkSecurityPerimeterGetConfiguration200Response
+ | NetworkSecurityPerimeterGetConfigurationDefaultResponse
+): response is NetworkSecurityPerimeterGetConfigurationDefaultResponse;
+export function isUnexpected(
+ response:
+ | NetworkSecurityPerimeterReconcileConfiguration202Response
+ | NetworkSecurityPerimeterReconcileConfigurationDefaultResponse
+): response is NetworkSecurityPerimeterReconcileConfigurationDefaultResponse;
export function isUnexpected(
response:
| BatchAccountCreate200Response
@@ -513,20 +486,6 @@ export function isUnexpected(
| LocationCheckNameAvailabilityDefaultResponse
| OperationsList200Response
| OperationsListDefaultResponse
- | CertificateListByBatchAccount200Response
- | CertificateListByBatchAccountDefaultResponse
- | CertificateCreate200Response
- | CertificateCreateDefaultResponse
- | CertificateUpdate200Response
- | CertificateUpdateDefaultResponse
- | CertificateDelete200Response
- | CertificateDelete202Response
- | CertificateDelete204Response
- | CertificateDeleteDefaultResponse
- | CertificateGet200Response
- | CertificateGetDefaultResponse
- | CertificateCancelDeletion200Response
- | CertificateCancelDeletionDefaultResponse
| PrivateLinkResourceListByBatchAccount200Response
| PrivateLinkResourceListByBatchAccountDefaultResponse
| PrivateLinkResourceGet200Response
@@ -557,6 +516,12 @@ export function isUnexpected(
| PoolDisableAutoScaleDefaultResponse
| PoolStopResize200Response
| PoolStopResizeDefaultResponse
+ | NetworkSecurityPerimeterListConfigurations200Response
+ | NetworkSecurityPerimeterListConfigurationsDefaultResponse
+ | NetworkSecurityPerimeterGetConfiguration200Response
+ | NetworkSecurityPerimeterGetConfigurationDefaultResponse
+ | NetworkSecurityPerimeterReconcileConfiguration202Response
+ | NetworkSecurityPerimeterReconcileConfigurationDefaultResponse
): response is
| BatchAccountCreateDefaultResponse
| BatchAccountUpdateDefaultResponse
@@ -584,12 +549,6 @@ export function isUnexpected(
| LocationListSupportedVirtualMachineSkusDefaultResponse
| LocationCheckNameAvailabilityDefaultResponse
| OperationsListDefaultResponse
- | CertificateListByBatchAccountDefaultResponse
- | CertificateCreateDefaultResponse
- | CertificateUpdateDefaultResponse
- | CertificateDeleteDefaultResponse
- | CertificateGetDefaultResponse
- | CertificateCancelDeletionDefaultResponse
| PrivateLinkResourceListByBatchAccountDefaultResponse
| PrivateLinkResourceGetDefaultResponse
| PrivateEndpointConnectionListByBatchAccountDefaultResponse
@@ -602,7 +561,10 @@ export function isUnexpected(
| PoolDeleteDefaultResponse
| PoolGetDefaultResponse
| PoolDisableAutoScaleDefaultResponse
- | PoolStopResizeDefaultResponse {
+ | PoolStopResizeDefaultResponse
+ | NetworkSecurityPerimeterListConfigurationsDefaultResponse
+ | NetworkSecurityPerimeterGetConfigurationDefaultResponse
+ | NetworkSecurityPerimeterReconcileConfigurationDefaultResponse {
const lroOriginal = response.headers["x-ms-original-url"];
const url = new URL(lroOriginal ?? response.request.url);
const method = response.request.method;
diff --git a/packages/service/src/internal/arm-batch-rest/generated/models.ts b/packages/service/src/internal/arm-batch-rest/generated/models.ts
index 25182f9398..65e6a71f5c 100644
--- a/packages/service/src/internal/arm-batch-rest/generated/models.ts
+++ b/packages/service/src/internal/arm-batch-rest/generated/models.ts
@@ -263,65 +263,6 @@ export interface CheckNameAvailabilityParameters {
type: "Microsoft.Batch/batchAccounts";
}
-/** Contains information about a certificate. */
-export interface Certificate extends AzureProxyResource {
- /** The properties associated with the certificate. */
- properties?: CertificateProperties;
-}
-
-/** Certificate properties. */
-export interface CertificateProperties extends CertificateBaseProperties {
- provisioningState?: "Succeeded" | "Deleting" | "Failed";
- /** The time at which the certificate entered its current state. */
- provisioningStateTransitionTime?: Date | string;
- /** The previous provisioned state of the resource */
- previousProvisioningState?: "Succeeded" | "Deleting" | "Failed";
- /** The time at which the certificate entered its previous state. */
- previousProvisioningStateTransitionTime?: Date | string;
- /** The public key of the certificate. */
- publicData?: string;
- /** This is only returned when the certificate provisioningState is 'Failed'. */
- deleteCertificateError?: DeleteCertificateError;
-}
-
-/** An error response from the Batch service. */
-export interface DeleteCertificateError {
- /** An identifier for the error. Codes are invariant and are intended to be consumed programmatically. */
- code: string;
- /** A message describing the error, intended to be suitable for display in a user interface. */
- message: string;
- /** The target of the particular error. For example, the name of the property in error. */
- target?: string;
- /** A list of additional details about the error. */
- details?: Array;
-}
-
-/** Base certificate properties. */
-export interface CertificateBaseProperties {
- /** This must match the first portion of the certificate name. Currently required to be 'SHA1'. */
- thumbprintAlgorithm?: string;
- /** This must match the thumbprint from the name. */
- thumbprint?: string;
- /** The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx. */
- format?: "Pfx" | "Cer";
-}
-
-/** Contains information about a certificate. */
-export interface CertificateCreateOrUpdateParameters
- extends AzureProxyResource {
- /** The properties associated with the certificate. */
- properties?: CertificateCreateOrUpdateProperties;
-}
-
-/** Certificate properties for create operations */
-export interface CertificateCreateOrUpdateProperties
- extends CertificateBaseProperties {
- /** The maximum size is 10KB. */
- data: string;
- /** This must not be specified if the certificate format is Cer. */
- password?: string;
-}
-
/** Contains the information for a detector. */
export interface DetectorResponse extends AzureProxyResource {
/** The properties associated with the detector. */
@@ -374,7 +315,7 @@ export interface PoolProperties {
allocationState?: "Steady" | "Resizing" | "Stopping";
/** The time at which the pool entered its current allocation state. */
allocationStateTransitionTime?: Date | string;
- /** For information about available VM sizes, see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */
+ /** For information about available VM sizes, see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */
vmSize?: string;
/** Deployment configuration properties. */
deploymentConfiguration?: DeploymentConfiguration;
@@ -400,16 +341,8 @@ export interface PoolProperties {
metadata?: Array;
/** In an PATCH (update) operation, this property can be set to an empty object to remove the start task from the pool. */
startTask?: StartTask;
- /**
- * For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory.
- *
- * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead.
- */
- certificates?: Array;
/** Changes to application package references affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged. There is a maximum of 10 application package references on any given pool. */
applicationPackages?: Array;
- /** The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, pool creation will fail. */
- applicationLicenses?: Array;
/** Describes either the current operation (if the pool AllocationState is Resizing) or the previously completed operation (if the AllocationState is Steady). */
resizeOperationStatus?: ResizeOperationStatus;
/** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */
@@ -420,8 +353,6 @@ export interface PoolProperties {
currentNodeCommunicationMode?: "Default" | "Classic" | "Simplified";
/** Describes an upgrade policy - automatic, manual, or rolling. */
upgradePolicy?: UpgradePolicy;
- /** The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. */
- resourceTags?: Record;
}
/** Deployment configuration properties. */
@@ -474,7 +405,7 @@ export interface ImageReference {
sku?: string;
/** A value of 'latest' can be specified to select the latest version of an image. If omitted, the default is 'latest'. */
version?: string;
- /** This property is mutually exclusive with other properties. The Azure Compute Gallery Image must have replicas in the same region as the Azure Batch account. For information about the firewall settings for the Batch node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
+ /** This property is mutually exclusive with other properties. The Azure Compute Gallery Image must have replicas in the same region as the Azure Batch account. For information about the firewall settings for the Batch node agent to communicate with the Batch service see https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
id?: string;
/** This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. */
sharedGalleryImageId?: string;
@@ -511,6 +442,33 @@ export interface DataDisk {
* Premium_LRS - The data disk should use premium locally redundant storage.
*/
storageAccountType?: "Standard_LRS" | "Premium_LRS" | "StandardSSD_LRS";
+ managedDisk?: ManagedDisk;
+}
+
+export interface ManagedDisk {
+ /** The storage account type for use in creating data disks or OS disk. */
+ storageAccountType?: "Standard_LRS" | "Premium_LRS" | "StandardSSD_LRS";
+ /** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and is required when using Confidential VMs. */
+ securityProfile?: VMDiskSecurityProfile;
+ /** Specifies the customer managed disk encryption set resource id for the managed disk. It can be set only in UserSubscription mode. */
+ diskEncryptionSet?: DiskEncryptionSetParameters;
+}
+
+/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and is required when using Confidential VMs. */
+export interface VMDiskSecurityProfile {
+ /** Specifies the EncryptionType of the managed disk. It is set to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob, VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and required when using Confidential VMs. */
+ securityEncryptionType?:
+ | "DiskWithVMGuestState"
+ | "VMGuestStateOnly"
+ | "NonPersistedTPM";
+ /** Specifies the customer managed disk encryption set resource id for the managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and VMGuest blob. It can be set only in UserSubscription mode. */
+ diskEncryptionSet?: DiskEncryptionSetParameters;
+}
+
+/** The ARM resource id of the disk encryption set. */
+export interface DiskEncryptionSetParameters {
+ /** The ARM resource id of the disk encryption set. The resource should be in the same subscription as the Batch account. */
+ id?: string;
}
/** The configuration for container-enabled pools. */
@@ -539,6 +497,24 @@ export interface ContainerRegistry {
export interface DiskEncryptionConfiguration {
/** On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */
targets?: Array<"OsDisk" | "TemporaryDisk">;
+ /** Customer Managed Key will encrypt OS Disk by EncryptionAtRest, and by default we will encrypt the data disk as well. It can be used only when the pool is configured with an identity and OsDisk is set as one of the targets of DiskEncryption. */
+ customerManagedKey?: DiskCustomerManagedKey;
+}
+
+/** The Customer Managed Key reference to encrypt the Disk. */
+export interface DiskCustomerManagedKey {
+ /** Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value. */
+ keyUrl?: string;
+ /** Set this flag to true to enable auto-updating of the Disk Encryption to the latest key version. Default is false. */
+ rotationToLatestKeyVersionEnabled?: boolean;
+ /** The reference of one of the pool identities to encrypt Disk. This identity will be used to access the KeyVault. */
+ identityReference?: PoolIdentityReference;
+}
+
+/** The reference of one of the pool identities to encrypt Disk. This identity will be used to access the key vault. */
+export interface PoolIdentityReference {
+ /** The ARM resource id of the user assigned identity. This reference must be included in the pool identities. */
+ resourceId?: string;
}
/** Allocation configuration used by Batch Service to provision the nodes. */
@@ -584,23 +560,10 @@ export interface OSDisk {
/** Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine. */
export interface DiffDiskSettings {
- /** This property can be used by user in the request to choose which location the operating system should be in. e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */
+ /** This property can be used by user in the request to choose which location the operating system should be in. e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */
placement?: "CacheDisk";
}
-export interface ManagedDisk {
- /** The storage account type for use in creating data disks or OS disk. */
- storageAccountType?: "Standard_LRS" | "Premium_LRS" | "StandardSSD_LRS";
- /** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and is required when using Confidential VMs. */
- securityProfile?: VMDiskSecurityProfile;
-}
-
-/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and is required when using Confidential VMs. */
-export interface VMDiskSecurityProfile {
- /** Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and required when using Confidential VMs. */
- securityEncryptionType?: "NonPersistedTPM" | "VMGuestStateOnly";
-}
-
/** Specifies the security profile settings for the virtual machine or virtual machine scale set. */
export interface SecurityProfile {
/** Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. */
@@ -609,6 +572,8 @@ export interface SecurityProfile {
encryptionAtHost?: boolean;
/** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
uefiSettings?: UefiSettings;
+ /** Specifies ProxyAgent settings while creating the virtual machine. */
+ proxyAgentSettings?: ProxyAgentSettings;
}
/** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
@@ -619,6 +584,24 @@ export interface UefiSettings {
vTpmEnabled?: boolean;
}
+/** Specifies ProxyAgent settings while creating the virtual machine. */
+export interface ProxyAgentSettings {
+ /** Specifies whether Metadata Security Protocol feature should be enabled on the virtual machine or virtual machine scale set. Default is False. */
+ enabled?: boolean;
+ /** Specifies particular host endpoint settings. */
+ imds?: HostEndpointSettings;
+ /** Specifies particular host endpoint settings. */
+ wireServer?: HostEndpointSettings;
+}
+
+/** Specifies particular host endpoint settings. */
+export interface HostEndpointSettings {
+ /** Specifies the access control policy execution mode. */
+ mode?: "Audit" | "Enforce";
+ /** Specifies the reference to the InVMAccessControlProfileVersion resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/inVMAccessControlProfiles/{profile}/versions/{version}. */
+ inVMAccessControlProfileReferenceId?: string;
+}
+
/** Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. */
export interface ServiceArtifactReference {
/** The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName} */
@@ -679,7 +662,7 @@ export interface AutoScaleRunError {
/** The network configuration for a pool. */
export interface NetworkConfiguration {
- /** The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate compute nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication,including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration */
+ /** The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate compute nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication,including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration */
subnetId?: string;
/** The scope of dynamic vnet assignment. */
dynamicVnetAssignmentScope?: "none" | "job";
@@ -731,10 +714,23 @@ export interface PublicIPAddressConfiguration {
provision?: "BatchManaged" | "UserManaged" | "NoPublicIPAddresses";
/** The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */
ipAddressIds?: Array;
+ /** IP families are used to determine single-stack or dual-stack pools. For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. */
+ ipFamilies?: Array<"IPv4" | "IPv6">;
+ /** IP Tags that will applied to new Public IPs that Batch creates. */
+ ipTags?: Array;
+}
+
+export interface IpTag {
+ /** Example: FirstPartyUsage. */
+ ipTagType?: string;
+ /** Example: SQL. */
+ tag?: string;
}
/** Specifies how tasks should be distributed across compute nodes. */
export interface TaskSchedulingPolicy {
+ /** If not specified, the default is none. */
+ jobDefaultOrder?: "None" | "CreationTime";
/** How tasks should be distributed across compute nodes. */
nodeFillType: "Spread" | "Pack";
}
@@ -831,7 +827,7 @@ export interface UserIdentity {
/** Specifies the parameters for the auto user that runs a task on the Batch service. */
export interface AutoUserSpecification {
- /** The default value is Pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by start tasks. */
+ /** The default value is Pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks. */
scope?: "Task" | "Pool";
/** The default value is nonAdmin. */
elevationLevel?: "NonAdmin" | "Admin";
@@ -865,21 +861,9 @@ export interface ContainerHostBatchBindMountEntry {
isReadOnly?: boolean;
}
-/** Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateReference {
- /** The fully qualified ID of the certificate to install on the pool. This must be inside the same batch account as the pool. */
- id: string;
- /** The default value is currentUser. This property is applicable only for pools configured with Windows compute nodes. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. */
- storeLocation?: "CurrentUser" | "LocalMachine";
- /** This property is applicable only for pools configured with Windows compute nodes. Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. */
- storeName?: string;
- /** Which user accounts on the compute node should have access to the private data of the certificate. */
- visibility?: Array<"StartTask" | "Task" | "RemoteUser">;
-}
-
-/** Link to an application package inside the batch account */
+/** Link to an application package inside the Batch account */
export interface ApplicationPackageReference {
- /** The ID of the application package to install. This must be inside the same batch account as the pool. This can either be a reference to a specific version or the default version if one exists. */
+ /** The ID of the application package to install. This must be inside the same Batch account as the pool. This can either be a reference to a specific version or the default version if one exists. */
id: string;
/** If this is omitted, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences. If you are calling the REST API directly, the HTTP status code is 409. */
version?: string;
@@ -997,7 +981,7 @@ export interface UpgradePolicy {
export interface AutomaticOSUpgradePolicy {
/** Whether OS image rollback feature should be disabled. */
disableAutomaticRollback?: boolean;
- /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.
If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchmanagement/pool/create?tabs=HTTP#windowsconfiguration) cannot be set to true. */
+ /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.
If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/rest/api/batchmanagement/pool/create?tabs=HTTP#windowsconfiguration) cannot be set to true. */
enableAutomaticOSUpgrade?: boolean;
/** Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. */
useRollingUpgradePolicy?: boolean;
diff --git a/packages/service/src/internal/arm-batch-rest/generated/outputModels.ts b/packages/service/src/internal/arm-batch-rest/generated/outputModels.ts
index 5d26a7d8d6..9a3f0bf3b4 100644
--- a/packages/service/src/internal/arm-batch-rest/generated/outputModels.ts
+++ b/packages/service/src/internal/arm-batch-rest/generated/outputModels.ts
@@ -393,74 +393,6 @@ export interface CheckNameAvailabilityResultOutput {
message?: string;
}
-/** Values returned by the List operation. */
-export interface ListCertificatesResultOutput {
- /** The collection of returned certificates. */
- value?: Array;
- /** The continuation token. */
- nextLink?: string;
-}
-
-/** Contains information about a certificate. */
-export interface CertificateOutput extends AzureProxyResourceOutput {
- /** The properties associated with the certificate. */
- properties?: CertificatePropertiesOutput;
-}
-
-/** Certificate properties. */
-export interface CertificatePropertiesOutput
- extends CertificateBasePropertiesOutput {
- provisioningState?: "Succeeded" | "Deleting" | "Failed";
- /** The time at which the certificate entered its current state. */
- provisioningStateTransitionTime?: string;
- /** The previous provisioned state of the resource */
- previousProvisioningState?: "Succeeded" | "Deleting" | "Failed";
- /** The time at which the certificate entered its previous state. */
- previousProvisioningStateTransitionTime?: string;
- /** The public key of the certificate. */
- publicData?: string;
- /** This is only returned when the certificate provisioningState is 'Failed'. */
- deleteCertificateError?: DeleteCertificateErrorOutput;
-}
-
-/** An error response from the Batch service. */
-export interface DeleteCertificateErrorOutput {
- /** An identifier for the error. Codes are invariant and are intended to be consumed programmatically. */
- code: string;
- /** A message describing the error, intended to be suitable for display in a user interface. */
- message: string;
- /** The target of the particular error. For example, the name of the property in error. */
- target?: string;
- /** A list of additional details about the error. */
- details?: Array;
-}
-
-/** Base certificate properties. */
-export interface CertificateBasePropertiesOutput {
- /** This must match the first portion of the certificate name. Currently required to be 'SHA1'. */
- thumbprintAlgorithm?: string;
- /** This must match the thumbprint from the name. */
- thumbprint?: string;
- /** The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx. */
- format?: "Pfx" | "Cer";
-}
-
-/** Contains information about a certificate. */
-export interface CertificateCreateOrUpdateParametersOutput
- extends AzureProxyResourceOutput {
- /** The properties associated with the certificate. */
- properties?: CertificateCreateOrUpdatePropertiesOutput;
-}
-
-/** Certificate properties for create operations */
-export interface CertificateCreateOrUpdatePropertiesOutput
- extends CertificateBasePropertiesOutput {
- /** The maximum size is 10KB. */
- data: string;
- /** This must not be specified if the certificate format is Cer. */
- password?: string;
-}
-
/** Values returned by the List operation. */
export interface DetectorListResultOutput {
/** The collection of Batch account detectors returned by the listing operation. */
@@ -545,7 +477,7 @@ export interface PoolPropertiesOutput {
allocationState?: "Steady" | "Resizing" | "Stopping";
/** The time at which the pool entered its current allocation state. */
allocationStateTransitionTime?: string;
- /** For information about available VM sizes, see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */
+ /** For information about available VM sizes, see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */
vmSize?: string;
/** Deployment configuration properties. */
deploymentConfiguration?: DeploymentConfigurationOutput;
@@ -571,16 +503,8 @@ export interface PoolPropertiesOutput {
metadata?: Array;
/** In an PATCH (update) operation, this property can be set to an empty object to remove the start task from the pool. */
startTask?: StartTaskOutput;
- /**
- * For Windows compute nodes, the Batch service installs the certificates to the specified certificate store and location. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory.
- *
- * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead.
- */
- certificates?: Array;
/** Changes to application package references affect all new compute nodes joining the pool, but do not affect compute nodes that are already in the pool until they are rebooted or reimaged. There is a maximum of 10 application package references on any given pool. */
applicationPackages?: Array;
- /** The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, pool creation will fail. */
- applicationLicenses?: Array;
/** Describes either the current operation (if the pool AllocationState is Resizing) or the previously completed operation (if the AllocationState is Steady). */
resizeOperationStatus?: ResizeOperationStatusOutput;
/** This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */
@@ -591,8 +515,6 @@ export interface PoolPropertiesOutput {
currentNodeCommunicationMode?: "Default" | "Classic" | "Simplified";
/** Describes an upgrade policy - automatic, manual, or rolling. */
upgradePolicy?: UpgradePolicyOutput;
- /** The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. */
- resourceTags?: Record;
}
/** Deployment configuration properties. */
@@ -645,7 +567,7 @@ export interface ImageReferenceOutput {
sku?: string;
/** A value of 'latest' can be specified to select the latest version of an image. If omitted, the default is 'latest'. */
version?: string;
- /** This property is mutually exclusive with other properties. The Azure Compute Gallery Image must have replicas in the same region as the Azure Batch account. For information about the firewall settings for the Batch node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
+ /** This property is mutually exclusive with other properties. The Azure Compute Gallery Image must have replicas in the same region as the Azure Batch account. For information about the firewall settings for the Batch node agent to communicate with the Batch service see https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
id?: string;
/** This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. */
sharedGalleryImageId?: string;
@@ -682,6 +604,33 @@ export interface DataDiskOutput {
* Premium_LRS - The data disk should use premium locally redundant storage.
*/
storageAccountType?: "Standard_LRS" | "Premium_LRS" | "StandardSSD_LRS";
+ managedDisk?: ManagedDiskOutput;
+}
+
+export interface ManagedDiskOutput {
+ /** The storage account type for use in creating data disks or OS disk. */
+ storageAccountType?: "Standard_LRS" | "Premium_LRS" | "StandardSSD_LRS";
+ /** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and is required when using Confidential VMs. */
+ securityProfile?: VMDiskSecurityProfileOutput;
+ /** Specifies the customer managed disk encryption set resource id for the managed disk. It can be set only in UserSubscription mode. */
+ diskEncryptionSet?: DiskEncryptionSetParametersOutput;
+}
+
+/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and is required when using Confidential VMs. */
+export interface VMDiskSecurityProfileOutput {
+ /** Specifies the EncryptionType of the managed disk. It is set to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob, VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and required when using Confidential VMs. */
+ securityEncryptionType?:
+ | "DiskWithVMGuestState"
+ | "VMGuestStateOnly"
+ | "NonPersistedTPM";
+ /** Specifies the customer managed disk encryption set resource id for the managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and VMGuest blob. It can be set only in UserSubscription mode. */
+ diskEncryptionSet?: DiskEncryptionSetParametersOutput;
+}
+
+/** The ARM resource id of the disk encryption set. */
+export interface DiskEncryptionSetParametersOutput {
+ /** The ARM resource id of the disk encryption set. The resource should be in the same subscription as the Batch account. */
+ id?: string;
}
/** The configuration for container-enabled pools. */
@@ -710,6 +659,24 @@ export interface ContainerRegistryOutput {
export interface DiskEncryptionConfigurationOutput {
/** On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */
targets?: Array<"OsDisk" | "TemporaryDisk">;
+ /** Customer Managed Key will encrypt OS Disk by EncryptionAtRest, and by default we will encrypt the data disk as well. It can be used only when the pool is configured with an identity and OsDisk is set as one of the targets of DiskEncryption. */
+ customerManagedKey?: DiskCustomerManagedKeyOutput;
+}
+
+/** The Customer Managed Key reference to encrypt the Disk. */
+export interface DiskCustomerManagedKeyOutput {
+ /** Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value. */
+ keyUrl?: string;
+ /** Set this flag to true to enable auto-updating of the Disk Encryption to the latest key version. Default is false. */
+ rotationToLatestKeyVersionEnabled?: boolean;
+ /** The reference of one of the pool identities to encrypt Disk. This identity will be used to access the KeyVault. */
+ identityReference?: PoolIdentityReferenceOutput;
+}
+
+/** The reference of one of the pool identities to encrypt Disk. This identity will be used to access the key vault. */
+export interface PoolIdentityReferenceOutput {
+ /** The ARM resource id of the user assigned identity. This reference must be included in the pool identities. */
+ resourceId?: string;
}
/** Allocation configuration used by Batch Service to provision the nodes. */
@@ -755,23 +722,10 @@ export interface OSDiskOutput {
/** Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine. */
export interface DiffDiskSettingsOutput {
- /** This property can be used by user in the request to choose which location the operating system should be in. e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */
+ /** This property can be used by user in the request to choose which location the operating system should be in. e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */
placement?: "CacheDisk";
}
-export interface ManagedDiskOutput {
- /** The storage account type for use in creating data disks or OS disk. */
- storageAccountType?: "Standard_LRS" | "Premium_LRS" | "StandardSSD_LRS";
- /** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and is required when using Confidential VMs. */
- securityProfile?: VMDiskSecurityProfileOutput;
-}
-
-/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and is required when using Confidential VMs. */
-export interface VMDiskSecurityProfileOutput {
- /** Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and required when using Confidential VMs. */
- securityEncryptionType?: "NonPersistedTPM" | "VMGuestStateOnly";
-}
-
/** Specifies the security profile settings for the virtual machine or virtual machine scale set. */
export interface SecurityProfileOutput {
/** Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. */
@@ -780,6 +734,8 @@ export interface SecurityProfileOutput {
encryptionAtHost?: boolean;
/** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
uefiSettings?: UefiSettingsOutput;
+ /** Specifies ProxyAgent settings while creating the virtual machine. */
+ proxyAgentSettings?: ProxyAgentSettingsOutput;
}
/** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
@@ -790,6 +746,24 @@ export interface UefiSettingsOutput {
vTpmEnabled?: boolean;
}
+/** Specifies ProxyAgent settings while creating the virtual machine. */
+export interface ProxyAgentSettingsOutput {
+ /** Specifies whether Metadata Security Protocol feature should be enabled on the virtual machine or virtual machine scale set. Default is False. */
+ enabled?: boolean;
+ /** Specifies particular host endpoint settings. */
+ imds?: HostEndpointSettingsOutput;
+ /** Specifies particular host endpoint settings. */
+ wireServer?: HostEndpointSettingsOutput;
+}
+
+/** Specifies particular host endpoint settings. */
+export interface HostEndpointSettingsOutput {
+ /** Specifies the access control policy execution mode. */
+ mode?: "Audit" | "Enforce";
+ /** Specifies the reference to the InVMAccessControlProfileVersion resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/inVMAccessControlProfiles/{profile}/versions/{version}. */
+ inVMAccessControlProfileReferenceId?: string;
+}
+
/** Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. */
export interface ServiceArtifactReferenceOutput {
/** The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName} */
@@ -850,7 +824,7 @@ export interface AutoScaleRunErrorOutput {
/** The network configuration for a pool. */
export interface NetworkConfigurationOutput {
- /** The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate compute nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication,including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration */
+ /** The virtual network must be in the same region and subscription as the Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate compute nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule tasks on the compute nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the compute nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the compute nodes to unusable. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication,including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration */
subnetId?: string;
/** The scope of dynamic vnet assignment. */
dynamicVnetAssignmentScope?: "none" | "job";
@@ -902,10 +876,23 @@ export interface PublicIPAddressConfigurationOutput {
provision?: "BatchManaged" | "UserManaged" | "NoPublicIPAddresses";
/** The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */
ipAddressIds?: Array;
+ /** IP families are used to determine single-stack or dual-stack pools. For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. */
+ ipFamilies?: Array<"IPv4" | "IPv6">;
+ /** IP Tags that will applied to new Public IPs that Batch creates. */
+ ipTags?: Array;
+}
+
+export interface IpTagOutput {
+ /** Example: FirstPartyUsage. */
+ ipTagType?: string;
+ /** Example: SQL. */
+ tag?: string;
}
/** Specifies how tasks should be distributed across compute nodes. */
export interface TaskSchedulingPolicyOutput {
+ /** If not specified, the default is none. */
+ jobDefaultOrder?: "None" | "CreationTime";
/** How tasks should be distributed across compute nodes. */
nodeFillType: "Spread" | "Pack";
}
@@ -1002,7 +989,7 @@ export interface UserIdentityOutput {
/** Specifies the parameters for the auto user that runs a task on the Batch service. */
export interface AutoUserSpecificationOutput {
- /** The default value is Pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by start tasks. */
+ /** The default value is Pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks. */
scope?: "Task" | "Pool";
/** The default value is nonAdmin. */
elevationLevel?: "NonAdmin" | "Admin";
@@ -1036,21 +1023,9 @@ export interface ContainerHostBatchBindMountEntryOutput {
isReadOnly?: boolean;
}
-/** Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateReferenceOutput {
- /** The fully qualified ID of the certificate to install on the pool. This must be inside the same batch account as the pool. */
- id: string;
- /** The default value is currentUser. This property is applicable only for pools configured with Windows compute nodes. For Linux compute nodes, the certificates are stored in a directory inside the task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this location. For certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and certificates are placed in that directory. */
- storeLocation?: "CurrentUser" | "LocalMachine";
- /** This property is applicable only for pools configured with Windows compute nodes. Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. */
- storeName?: string;
- /** Which user accounts on the compute node should have access to the private data of the certificate. */
- visibility?: Array<"StartTask" | "Task" | "RemoteUser">;
-}
-
-/** Link to an application package inside the batch account */
+/** Link to an application package inside the Batch account */
export interface ApplicationPackageReferenceOutput {
- /** The ID of the application package to install. This must be inside the same batch account as the pool. This can either be a reference to a specific version or the default version if one exists. */
+ /** The ID of the application package to install. This must be inside the same Batch account as the pool. This can either be a reference to a specific version or the default version if one exists. */
id: string;
/** If this is omitted, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences. If you are calling the REST API directly, the HTTP status code is 409. */
version?: string;
@@ -1168,7 +1143,7 @@ export interface UpgradePolicyOutput {
export interface AutomaticOSUpgradePolicyOutput {
/** Whether OS image rollback feature should be disabled. */
disableAutomaticRollback?: boolean;
- /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.
If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchmanagement/pool/create?tabs=HTTP#windowsconfiguration) cannot be set to true. */
+ /** Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.
If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/rest/api/batchmanagement/pool/create?tabs=HTTP#windowsconfiguration) cannot be set to true. */
enableAutomaticOSUpgrade?: boolean;
/** Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. */
useRollingUpgradePolicy?: boolean;
@@ -1233,3 +1208,192 @@ export interface EndpointDetailOutput {
/** The port an endpoint is connected to. */
port?: number;
}
+
+/** Result of a list NSP (network security perimeter) configurations request. */
+export interface NetworkSecurityPerimeterConfigurationListResultOutput {
+ /** Array of network security perimeter results. */
+ value?: Array;
+ /** The link used to get the next page of results. */
+ nextLink?: string;
+}
+
+/** Network security perimeter (NSP) configuration resource */
+export interface NetworkSecurityPerimeterConfigurationOutput
+ extends ProxyResourceOutput {
+ /** Network security configuration properties. */
+ properties?: NetworkSecurityPerimeterConfigurationPropertiesOutput;
+}
+
+/** Network security configuration properties. */
+export interface NetworkSecurityPerimeterConfigurationPropertiesOutput {
+ /** Provisioning state of a network security perimeter configuration that is being created or updated. */
+ provisioningState?:
+ | "Succeeded"
+ | "Creating"
+ | "Updating"
+ | "Deleting"
+ | "Accepted"
+ | "Failed"
+ | "Canceled";
+ /** List of provisioning issues, if any */
+ provisioningIssues?: Array;
+ /** Information about a network security perimeter (NSP) */
+ networkSecurityPerimeter?: NetworkSecurityPerimeterOutput;
+ /** Information about resource association */
+ resourceAssociation?: ResourceAssociationOutput;
+ /** Network security perimeter configuration profile */
+ profile?: NetworkSecurityProfileOutput;
+}
+
+/** Describes a provisioning issue for a network security perimeter configuration */
+export interface ProvisioningIssueOutput {
+ /** Name of the issue */
+ name?: string;
+ /** Details of a provisioning issue for a network security perimeter (NSP) configuration. Resource providers should generate separate provisioning issue elements for each separate issue detected, and include a meaningful and distinctive description, as well as any appropriate suggestedResourceIds and suggestedAccessRules */
+ properties?: ProvisioningIssuePropertiesOutput;
+}
+
+/** Details of a provisioning issue for a network security perimeter (NSP) configuration. Resource providers should generate separate provisioning issue elements for each separate issue detected, and include a meaningful and distinctive description, as well as any appropriate suggestedResourceIds and suggestedAccessRules */
+export interface ProvisioningIssuePropertiesOutput {
+ /** Type of issue */
+ issueType?:
+ | "Unknown"
+ | "ConfigurationPropagationFailure"
+ | "MissingPerimeterConfiguration"
+ | "MissingIdentityConfiguration";
+ /** Severity of the issue. */
+ severity?: "Warning" | "Error";
+ /** Description of the issue */
+ description?: string;
+ /** Fully qualified resource IDs of suggested resources that can be associated to the network security perimeter (NSP) to remediate the issue. */
+ suggestedResourceIds?: Array;
+ /** Access rules that can be added to the network security profile (NSP) to remediate the issue. */
+ suggestedAccessRules?: Array;
+}
+
+/** Access rule in a network security perimeter configuration profile */
+export interface AccessRuleOutput {
+ /** Name of the access rule */
+ name?: string;
+ /** Properties of Access Rule */
+ properties?: AccessRulePropertiesOutput;
+}
+
+/** Properties of Access Rule */
+export interface AccessRulePropertiesOutput {
+ /** Direction of Access Rule */
+ direction?: "Inbound" | "Outbound";
+ /** Address prefixes in the CIDR format for inbound rules */
+ addressPrefixes?: Array;
+ /** Subscriptions for inbound rules */
+ subscriptions?: Array;
+ /** Network security perimeters for inbound rules */
+ networkSecurityPerimeters?: Array;
+ /** Fully qualified domain names (FQDN) for outbound rules */
+ fullyQualifiedDomainNames?: Array;
+ /** Email addresses for outbound rules */
+ emailAddresses?: Array;
+ /** Phone numbers for outbound rules */
+ phoneNumbers?: Array;
+}
+
+/** Subscription identifiers */
+export interface AccessRulePropertiesSubscriptionsItemOutput {
+ /** The fully qualified Azure resource ID of the subscription e.g. ('/subscriptions/00000000-0000-0000-0000-000000000000') */
+ id?: string;
+}
+
+/** Information about a network security perimeter (NSP) */
+export interface NetworkSecurityPerimeterOutput {
+ /** Fully qualified Azure resource ID of the NSP resource */
+ id?: string;
+ /**
+ * Universal unique ID (UUID) of the network security perimeter
+ *
+ * Value may contain a UUID
+ */
+ perimeterGuid?: string;
+ /** Location of the network security perimeter */
+ location?: string;
+}
+
+/** Information about resource association */
+export interface ResourceAssociationOutput {
+ /** Name of the resource association */
+ name?: string;
+ /** Access mode of the resource association */
+ accessMode?: "Enforced" | "Learning" | "Audit";
+}
+
+/** Network security perimeter configuration profile */
+export interface NetworkSecurityProfileOutput {
+ /** Name of the profile */
+ name?: string;
+ /** Current access rules version */
+ accessRulesVersion?: number;
+ /** List of Access Rules */
+ accessRules?: Array;
+ /** Current diagnostic settings version */
+ diagnosticSettingsVersion?: number;
+ /** List of log categories that are enabled */
+ enabledLogCategories?: Array;
+}
+
+/** The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location */
+export interface ProxyResourceOutput extends ResourceOutput {}
+
+/** Common fields that are returned in the response for all Azure Resource Manager resources */
+export interface ResourceOutput {
+ /** Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}" */
+ id?: string;
+ /** The name of the resource */
+ name?: string;
+ /** The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" */
+ type?: string;
+ /** Azure Resource Manager metadata containing createdBy and modifiedBy information. */
+ systemData?: SystemDataOutput;
+}
+
+/** Metadata pertaining to creation and last modification of the resource. */
+export interface SystemDataOutput {
+ /** The identity that created the resource. */
+ createdBy?: string;
+ /** The type of identity that created the resource. */
+ createdByType?: "User" | "Application" | "ManagedIdentity" | "Key";
+ /** The timestamp of resource creation (UTC). */
+ createdAt?: string;
+ /** The identity that last modified the resource. */
+ lastModifiedBy?: string;
+ /** The type of identity that last modified the resource. */
+ lastModifiedByType?: "User" | "Application" | "ManagedIdentity" | "Key";
+ /** The timestamp of resource last modification (UTC) */
+ lastModifiedAt?: string;
+}
+
+/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */
+export interface ErrorResponseOutput {
+ /** The error object. */
+ error?: ErrorDetailOutput;
+}
+
+/** The error detail. */
+export interface ErrorDetailOutput {
+ /** The error code. */
+ code?: string;
+ /** The error message. */
+ message?: string;
+ /** The error target. */
+ target?: string;
+ /** The error details. */
+ details?: Array;
+ /** The error additional info. */
+ additionalInfo?: Array;
+}
+
+/** The resource management error additional info. */
+export interface ErrorAdditionalInfoOutput {
+ /** The additional info type. */
+ type?: string;
+ /** The additional info. */
+ info?: Record;
+}
diff --git a/packages/service/src/internal/arm-batch-rest/generated/parameters.ts b/packages/service/src/internal/arm-batch-rest/generated/parameters.ts
index 0140f365f9..fc450d9186 100644
--- a/packages/service/src/internal/arm-batch-rest/generated/parameters.ts
+++ b/packages/service/src/internal/arm-batch-rest/generated/parameters.ts
@@ -11,7 +11,6 @@ import {
ApplicationPackage,
Application,
CheckNameAvailabilityParameters,
- CertificateCreateOrUpdateParameters,
PrivateEndpointConnection,
Pool
} from "./models";
@@ -181,75 +180,6 @@ export type LocationCheckNameAvailabilityParameters = LocationCheckNameAvailabil
RequestParameters;
export type OperationsListParameters = RequestParameters;
-export interface CertificateListByBatchAccountQueryParamProperties {
- /** The maximum number of items to return in the response. */
- maxresults?: number;
- /** Comma separated list of properties that should be returned. e.g. "properties/provisioningState". Only top level properties under properties/ are valid for selection. */
- $select?: string;
- /** OData filter expression. Valid properties for filtering are "properties/provisioningState", "properties/provisioningStateTransitionTime", "name". */
- $filter?: string;
-}
-
-export interface CertificateListByBatchAccountQueryParam {
- queryParameters?: CertificateListByBatchAccountQueryParamProperties;
-}
-
-export type CertificateListByBatchAccountParameters = CertificateListByBatchAccountQueryParam &
- RequestParameters;
-
-export interface CertificateCreateHeaders {
- /** The entity state (ETag) version of the certificate to update. A value of "*" can be used to apply the operation only if the certificate already exists. If omitted, this operation will always be applied. */
- "If-Match"?: string;
- /** Set to '*' to allow a new certificate to be created, but to prevent updating an existing certificate. Other values will be ignored. */
- "If-None-Match"?: string;
-}
-
-export interface CertificateCreateBodyParam {
- /** Additional parameters for certificate creation. */
- body: CertificateCreateOrUpdateParameters;
-}
-
-export interface CertificateCreateHeaderParam {
- headers: RawHttpHeadersInput & CertificateCreateHeaders;
-}
-
-export interface CertificateCreateMediaTypesParam {
- /** Request content type */
- contentType?: "application/json";
-}
-
-export type CertificateCreateParameters = CertificateCreateHeaderParam &
- CertificateCreateMediaTypesParam &
- CertificateCreateBodyParam &
- RequestParameters;
-
-export interface CertificateUpdateHeaders {
- /** The entity state (ETag) version of the certificate to update. This value can be omitted or set to "*" to apply the operation unconditionally. */
- "If-Match"?: string;
-}
-
-export interface CertificateUpdateBodyParam {
- /** Certificate entity to update. */
- body: CertificateCreateOrUpdateParameters;
-}
-
-export interface CertificateUpdateHeaderParam {
- headers: RawHttpHeadersInput & CertificateUpdateHeaders;
-}
-
-export interface CertificateUpdateMediaTypesParam {
- /** Request content type */
- contentType?: "application/json";
-}
-
-export type CertificateUpdateParameters = CertificateUpdateHeaderParam &
- CertificateUpdateMediaTypesParam &
- CertificateUpdateBodyParam &
- RequestParameters;
-export type CertificateDeleteParameters = RequestParameters;
-export type CertificateGetParameters = RequestParameters;
-export type CertificateCancelDeletionParameters = RequestParameters;
-
export interface PrivateLinkResourceListByBatchAccountQueryParamProperties {
/** The maximum number of items to return in the response. */
maxresults?: number;
@@ -384,3 +314,6 @@ export type PoolDeleteParameters = RequestParameters;
export type PoolGetParameters = RequestParameters;
export type PoolDisableAutoScaleParameters = RequestParameters;
export type PoolStopResizeParameters = RequestParameters;
+export type NetworkSecurityPerimeterListConfigurationsParameters = RequestParameters;
+export type NetworkSecurityPerimeterGetConfigurationParameters = RequestParameters;
+export type NetworkSecurityPerimeterReconcileConfigurationParameters = RequestParameters;
diff --git a/packages/service/src/internal/arm-batch-rest/generated/responses.ts b/packages/service/src/internal/arm-batch-rest/generated/responses.ts
index b80e4b97a4..d3eaa02802 100644
--- a/packages/service/src/internal/arm-batch-rest/generated/responses.ts
+++ b/packages/service/src/internal/arm-batch-rest/generated/responses.ts
@@ -19,14 +19,15 @@ import {
SupportedSkusResultOutput,
CheckNameAvailabilityResultOutput,
OperationListResultOutput,
- ListCertificatesResultOutput,
- CertificateOutput,
ListPrivateLinkResourcesResultOutput,
PrivateLinkResourceOutput,
ListPrivateEndpointConnectionsResultOutput,
PrivateEndpointConnectionOutput,
ListPoolsResultOutput,
- PoolOutput
+ PoolOutput,
+ NetworkSecurityPerimeterConfigurationListResultOutput,
+ NetworkSecurityPerimeterConfigurationOutput,
+ ErrorResponseOutput
} from "./outputModels";
/** Creates a new Batch account with the specified parameters. Existing accounts cannot be updated with this API and should instead be updated with the Update Batch Account API. */
@@ -199,14 +200,14 @@ export interface BatchAccountGetDetectorDefaultResponse extends HttpResponse {
body: CloudErrorOutput;
}
-/** Lists the endpoints that a Batch Compute Node under this Batch Account may call as part of Batch service administration. If you are deploying a Pool inside of a virtual network that you specify, you must make sure your network allows outbound access to these endpoints. Failure to allow access to these endpoints may cause Batch to mark the affected nodes as unusable. For more information about creating a pool inside of a virtual network, see https://docs.microsoft.com/en-us/azure/batch/batch-virtual-network. */
+/** Lists the endpoints that a Batch Compute Node under this Batch Account may call as part of Batch service administration. If you are deploying a Pool inside of a virtual network that you specify, you must make sure your network allows outbound access to these endpoints. Failure to allow access to these endpoints may cause Batch to mark the affected nodes as unusable. For more information about creating a pool inside of a virtual network, see https://learn.microsoft.com/azure/batch/batch-virtual-network. */
export interface BatchAccountListOutboundNetworkDependenciesEndpoints200Response
extends HttpResponse {
status: "200";
body: OutboundEnvironmentEndpointCollectionOutput;
}
-/** Lists the endpoints that a Batch Compute Node under this Batch Account may call as part of Batch service administration. If you are deploying a Pool inside of a virtual network that you specify, you must make sure your network allows outbound access to these endpoints. Failure to allow access to these endpoints may cause Batch to mark the affected nodes as unusable. For more information about creating a pool inside of a virtual network, see https://docs.microsoft.com/en-us/azure/batch/batch-virtual-network. */
+/** Lists the endpoints that a Batch Compute Node under this Batch Account may call as part of Batch service administration. If you are deploying a Pool inside of a virtual network that you specify, you must make sure your network allows outbound access to these endpoints. Failure to allow access to these endpoints may cause Batch to mark the affected nodes as unusable. For more information about creating a pool inside of a virtual network, see https://learn.microsoft.com/azure/batch/batch-virtual-network. */
export interface BatchAccountListOutboundNetworkDependenciesEndpointsDefaultResponse
extends HttpResponse {
status: string;
@@ -397,131 +398,6 @@ export interface OperationsListDefaultResponse extends HttpResponse {
body: CloudErrorOutput;
}
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateListByBatchAccount200Response extends HttpResponse {
- status: "200";
- body: ListCertificatesResultOutput;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateListByBatchAccountDefaultResponse
- extends HttpResponse {
- status: string;
- body: CloudErrorOutput;
-}
-
-export interface CertificateCreate200Headers {
- /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Match or If-None-Match headers. */
- etag?: string;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateCreate200Response extends HttpResponse {
- status: "200";
- body: CertificateOutput;
- headers: RawHttpHeaders & CertificateCreate200Headers;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateCreateDefaultResponse extends HttpResponse {
- status: string;
- body: CloudErrorOutput;
-}
-
-export interface CertificateUpdate200Headers {
- /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Match or If-None-Match headers. */
- etag?: string;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateUpdate200Response extends HttpResponse {
- status: "200";
- body: CertificateOutput;
- headers: RawHttpHeaders & CertificateUpdate200Headers;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateUpdateDefaultResponse extends HttpResponse {
- status: string;
- body: CloudErrorOutput;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateDelete200Response extends HttpResponse {
- status: "200";
- body: Record;
-}
-
-export interface CertificateDelete202Headers {
- /** The URL of the resource used to check the status of the asynchronous operation. */
- location?: string;
- /** Suggested delay to check the status of the asynchronous operation. The value is an integer that represents the seconds. */
- "retry-after"?: number;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateDelete202Response extends HttpResponse {
- status: "202";
- body: Record;
- headers: RawHttpHeaders & CertificateDelete202Headers;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateDelete204Response extends HttpResponse {
- status: "204";
- body: Record;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateDeleteDefaultResponse extends HttpResponse {
- status: string;
- body: CloudErrorOutput;
-}
-
-export interface CertificateGet200Headers {
- /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Match or If-None-Match headers. */
- etag?: string;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateGet200Response extends HttpResponse {
- status: "200";
- body: CertificateOutput;
- headers: RawHttpHeaders & CertificateGet200Headers;
-}
-
-/** Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */
-export interface CertificateGetDefaultResponse extends HttpResponse {
- status: string;
- body: CloudErrorOutput;
-}
-
-export interface CertificateCancelDeletion200Headers {
- /** The ETag HTTP response header. This is an opaque string. You can use it to detect whether the resource has changed between requests. In particular, you can pass the ETag to one of the If-Match or If-None-Match headers. */
- etag?: string;
-}
-
-/**
- * If you try to delete a certificate that is being used by a pool or compute node, the status of the certificate changes to deleteFailed. If you decide that you want to continue using the certificate, you can use this operation to set the status of the certificate back to active. If you intend to delete the certificate, you do not need to run this operation after the deletion failed. You must make sure that the certificate is not being used by any resources, and then you can try again to delete the certificate.
- *
- * Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead.
- */
-export interface CertificateCancelDeletion200Response extends HttpResponse {
- status: "200";
- body: CertificateOutput;
- headers: RawHttpHeaders & CertificateCancelDeletion200Headers;
-}
-
-/**
- * If you try to delete a certificate that is being used by a pool or compute node, the status of the certificate changes to deleteFailed. If you decide that you want to continue using the certificate, you can use this operation to set the status of the certificate back to active. If you intend to delete the certificate, you do not need to run this operation after the deletion failed. You must make sure that the certificate is not being used by any resources, and then you can try again to delete the certificate.
- *
- * Warning: This operation is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead.
- */
-export interface CertificateCancelDeletionDefaultResponse extends HttpResponse {
- status: string;
- body: CloudErrorOutput;
-}
-
/** Lists all of the private link resources in the specified account. */
export interface PrivateLinkResourceListByBatchAccount200Response
extends HttpResponse {
@@ -766,3 +642,54 @@ export interface PoolStopResizeDefaultResponse extends HttpResponse {
status: string;
body: CloudErrorOutput;
}
+
+/** Lists all of the NSP configurations in the specified account. */
+export interface NetworkSecurityPerimeterListConfigurations200Response
+ extends HttpResponse {
+ status: "200";
+ body: NetworkSecurityPerimeterConfigurationListResultOutput;
+}
+
+/** Lists all of the NSP configurations in the specified account. */
+export interface NetworkSecurityPerimeterListConfigurationsDefaultResponse
+ extends HttpResponse {
+ status: string;
+ body: CloudErrorOutput;
+}
+
+/** Gets information about the specified NSP configuration. */
+export interface NetworkSecurityPerimeterGetConfiguration200Response
+ extends HttpResponse {
+ status: "200";
+ body: NetworkSecurityPerimeterConfigurationOutput;
+}
+
+/** Gets information about the specified NSP configuration. */
+export interface NetworkSecurityPerimeterGetConfigurationDefaultResponse
+ extends HttpResponse {
+ status: string;
+ body: CloudErrorOutput;
+}
+
+export interface NetworkSecurityPerimeterReconcileConfiguration202Headers {
+ /** The URL of the resource used to check the status of the asynchronous operation. */
+ location?: string;
+ /** Suggested delay to check the status of the asynchronous operation. The value is an integer that specifies the delay in seconds. */
+ "retry-after"?: number;
+}
+
+/** Reconciles the specified NSP configuration. */
+export interface NetworkSecurityPerimeterReconcileConfiguration202Response
+ extends HttpResponse {
+ status: "202";
+ body: Record;
+ headers: RawHttpHeaders &
+ NetworkSecurityPerimeterReconcileConfiguration202Headers;
+}
+
+/** Reconciles the specified NSP configuration. */
+export interface NetworkSecurityPerimeterReconcileConfigurationDefaultResponse
+ extends HttpResponse {
+ status: string;
+ body: ErrorResponseOutput;
+}
diff --git a/packages/service/src/internal/batch-rest/generated/src/batchClient.ts b/packages/service/src/internal/batch-rest/generated/src/batchClient.ts
index a9c83a85ea..0239aa0ad0 100644
--- a/packages/service/src/internal/batch-rest/generated/src/batchClient.ts
+++ b/packages/service/src/internal/batch-rest/generated/src/batchClient.ts
@@ -1,10 +1,11 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-import { getClient, ClientOptions } from "@azure-rest/core-client";
+import type { ClientOptions } from "@azure-rest/core-client";
+import { getClient } from "@azure-rest/core-client";
import { logger } from "./logger.js";
-import { TokenCredential } from "@azure/core-auth";
-import { BatchClient } from "./clientDefinitions.js";
+import type { TokenCredential } from "@azure/core-auth";
+import type { BatchClient } from "./clientDefinitions.js";
/** The optional parameters for the client */
export interface BatchClientOptions extends ClientOptions {
@@ -21,9 +22,9 @@ export interface BatchClientOptions extends ClientOptions {
export default function createClient(
endpointParam: string,
credentials: TokenCredential,
- { apiVersion = "2024-07-01.20.0", ...options }: BatchClientOptions = {},
+ { apiVersion = "2025-06-01", ...options }: BatchClientOptions = {},
): BatchClient {
- const endpointUrl = options.endpoint ?? options.baseUrl ?? `${endpointParam}`;
+ const endpointUrl = options.endpoint ?? `${endpointParam}`;
const userAgentInfo = `azsdk-js-batch-rest/1.0.0-beta.1`;
const userAgentPrefix =
options.userAgentOptions && options.userAgentOptions.userAgentPrefix
diff --git a/packages/service/src/internal/batch-rest/generated/src/clientDefinitions.ts b/packages/service/src/internal/batch-rest/generated/src/clientDefinitions.ts
index 4a122daf2a..2d2cea9db3 100644
--- a/packages/service/src/internal/batch-rest/generated/src/clientDefinitions.ts
+++ b/packages/service/src/internal/batch-rest/generated/src/clientDefinitions.ts
@@ -1,7 +1,7 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-import {
+import type {
ListApplicationsParameters,
GetApplicationParameters,
ListPoolUsageMetricsParameters,
@@ -75,7 +75,7 @@ import {
GetNodeFilePropertiesParameters,
ListNodeFilesParameters,
} from "./parameters.js";
-import {
+import type {
ListApplications200Response,
ListApplicationsDefaultResponse,
GetApplication200Response,
@@ -223,7 +223,7 @@ import {
ListNodeFiles200Response,
ListNodeFilesDefaultResponse,
} from "./responses.js";
-import { Client, StreamableMethod } from "@azure-rest/core-client";
+import type { Client, StreamableMethod } from "@azure-rest/core-client";
export interface ListApplications {
/**
@@ -280,7 +280,7 @@ export interface CreatePool {
post(
options: CreatePoolParameters,
): StreamableMethod;
- /** Lists all of the Pools which be mounted. */
+ /** Lists all of the Pools in the specified Account. */
get(
options?: ListPoolsParameters,
): StreamableMethod;
diff --git a/packages/service/src/internal/batch-rest/generated/src/isUnexpected.ts b/packages/service/src/internal/batch-rest/generated/src/isUnexpected.ts
index d58bf659df..fdd6610241 100644
--- a/packages/service/src/internal/batch-rest/generated/src/isUnexpected.ts
+++ b/packages/service/src/internal/batch-rest/generated/src/isUnexpected.ts
@@ -1,7 +1,7 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-import {
+import type {
ListApplications200Response,
ListApplicationsDefaultResponse,
GetApplication200Response,
diff --git a/packages/service/src/internal/batch-rest/generated/src/models.ts b/packages/service/src/internal/batch-rest/generated/src/models.ts
index 8f910053f1..96d04e5041 100644
--- a/packages/service/src/internal/batch-rest/generated/src/models.ts
+++ b/packages/service/src/internal/batch-rest/generated/src/models.ts
@@ -2,26 +2,24 @@
// Licensed under the MIT License.
/** Parameters for creating an Azure Batch Pool. */
-export interface BatchPoolCreateContent {
+export interface BatchPoolCreateOptions {
/** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). */
id: string;
/** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
displayName?: string;
- /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */
+ /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */
vmSize: string;
/** The virtual machine configuration for the Pool. This property must be specified. */
virtualMachineConfiguration?: VirtualMachineConfiguration;
/** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */
resizeTimeout?: string;
- /** The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. */
- resourceTags?: Record;
/** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */
targetDedicatedNodes?: number;
/** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */
targetLowPriorityNodes?: number;
/** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */
enableAutoScale?: boolean;
- /** A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). */
+ /** A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). */
autoScaleFormula?: string;
/** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */
autoScaleEvaluationInterval?: string;
@@ -40,15 +38,9 @@ export interface BatchPoolCreateContent {
/** The list of user Accounts to be created on each Compute Node in the Pool. */
userAccounts?: Array;
/** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
/** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. */
mountConfiguration?: Array;
- /**
- * The desired node communication mode for the pool. If omitted, the default value is Default.
- *
- * Possible values: "default", "classic", "simplified"
- */
- targetNodeCommunicationMode?: BatchNodeCommunicationMode;
/** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */
upgradePolicy?: UpgradePolicy;
}
@@ -59,12 +51,12 @@ export interface BatchPoolCreateContent {
*/
export interface VirtualMachineConfiguration {
/** A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. */
- imageReference: ImageReference;
+ imageReference: BatchVmImageReference;
/** The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. */
nodeAgentSKUId: string;
/** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */
windowsConfiguration?: WindowsConfiguration;
- /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */
+ /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://learn.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://learn.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */
dataDisks?: Array;
/**
* This only applies to Images that contain the Windows operating system, and
@@ -79,7 +71,7 @@ export interface VirtualMachineConfiguration {
*/
licenseType?: string;
/** The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. */
- containerConfiguration?: ContainerConfiguration;
+ containerConfiguration?: BatchContainerConfiguration;
/** The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. */
diskEncryptionConfiguration?: DiskEncryptionConfiguration;
/** The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. */
@@ -87,7 +79,7 @@ export interface VirtualMachineConfiguration {
/** The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. */
extensions?: Array;
/** Settings for the operating system disk of the Virtual Machine. */
- osDisk?: OSDisk;
+ osDisk?: BatchOsDisk;
/** Specifies the security profile settings for the virtual machine or virtual machine scale set. */
securityProfile?: SecurityProfile;
/** Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName} */
@@ -99,7 +91,7 @@ export interface VirtualMachineConfiguration {
* To get the list of all Azure Marketplace Image references verified by Azure Batch, see the
* ' List Supported Images ' operation.
*/
-export interface ImageReference {
+export interface BatchVmImageReference {
/** The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. */
publisher?: string;
/** The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. */
@@ -108,7 +100,7 @@ export interface ImageReference {
sku?: string;
/** The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. */
version?: string;
- /** The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
+ /** The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. */
virtualMachineImageId?: string;
/** The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. */
sharedGalleryImageId?: string;
@@ -138,6 +130,8 @@ export interface DataDisk {
caching?: CachingType;
/** The initial disk size in gigabytes. */
diskSizeGB: number;
+ /** The managed disk parameters. */
+ managedDisk?: ManagedDisk;
/**
* The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs".
*
@@ -146,8 +140,38 @@ export interface DataDisk {
storageAccountType?: StorageAccountType;
}
+/** The managed disk parameters. */
+export interface ManagedDisk {
+ /** Specifies the customer managed disk encryption set resource id for the managed disk. It can be set only in UserSubscription mode. */
+ diskEncryptionSet?: DiskEncryptionSetParameters;
+ /**
+ * The storage account type for managed disk.
+ *
+ * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs"
+ */
+ storageAccountType?: StorageAccountType;
+ /** Specifies the security profile settings for the managed disk. */
+ securityProfile?: BatchVmDiskSecurityProfile;
+}
+
+/** The ARM resource id of the disk encryption set. */
+export interface DiskEncryptionSetParameters {
+ /** The ARM resource id of the disk encryption set. The resource must be in the same subscription as the Batch account. */
+ id?: string;
+}
+
+/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. */
+export interface BatchVmDiskSecurityProfile {
+ /**
+ * Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs.
+ *
+ * Possible values: "DiskWithVMGuestState", "NonPersistedTPM", "VMGuestStateOnly"
+ */
+ securityEncryptionType?: SecurityEncryptionTypes;
+}
+
/** The configuration for container-enabled Pools. */
-export interface ContainerConfiguration {
+export interface BatchContainerConfiguration {
/**
* The container technology to be used.
*
@@ -187,10 +211,28 @@ export interface BatchNodeIdentityReference {
* Azure Compute Gallery Image.
*/
export interface DiskEncryptionConfiguration {
+ /** The Customer Managed Key reference to encrypt the OS Disk. Customer Managed Key will encrypt OS Disk by EncryptionAtRest, and by default we will encrypt the data disk as well. It can be used only when the pool is configured with an identity and OsDisk is set as one of the targets of DiskEncryption. */
+ customerManagedKey?: DiskCustomerManagedKey;
/** The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. */
targets?: DiskEncryptionTarget[];
}
+/** The Customer Managed Key reference to encrypt the Disk. */
+export interface DiskCustomerManagedKey {
+ /** The reference of one of the pool identities to encrypt Disk. This identity will be used to access the KeyVault. */
+ identityReference?: BatchPoolIdentityReference;
+ /** Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value. */
+ keyUrl?: string;
+ /** Set this flag to true to enable auto-updating of the Disk Encryption to the latest key version. Default is false. */
+ rotationToLatestKeyVersionEnabled?: boolean;
+}
+
+/** The reference of one of the pool identities to encrypt Disk. This identity will be used to access the key vault. */
+export interface BatchPoolIdentityReference {
+ /** The ARM resource id of the user assigned identity. This reference must be included in the pool identities. */
+ resourceId?: string;
+}
+
/**
* For regional placement, nodes in the pool will be allocated in the same region.
* For zonal placement, nodes in the pool will be spread across different zones
@@ -200,7 +242,7 @@ export interface BatchNodePlacementConfiguration {
/**
* Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy.
*
- * Possible values: "Shared", "Startup", "VfsMounts", "Task", "JobPrep", "Applications"
+ * Possible values: "regional", "zonal"
*/
policy?: BatchNodePlacementPolicyType;
}
@@ -228,9 +270,9 @@ export interface VMExtension {
}
/** Settings for the operating system disk of the compute node (VM). */
-export interface OSDisk {
+export interface BatchOsDisk {
/** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */
- ephemeralOSDiskSettings?: DiffDiskSettings;
+ ephemeralOSDiskSettings?: BatchDiffDiskSettings;
/**
* Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage.
*
@@ -249,53 +291,55 @@ export interface OSDisk {
* Specifies the ephemeral Disk Settings for the operating system disk used by the
* compute node (VM).
*/
-export interface DiffDiskSettings {
+export interface BatchDiffDiskSettings {
/**
- * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements.
+ * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements.
*
* Possible values: "cachedisk"
*/
placement?: DiffDiskPlacement;
}
-/** The managed disk parameters. */
-export interface ManagedDisk {
+/** Specifies the security profile settings for the virtual machine or virtual machine scale set. */
+export interface SecurityProfile {
+ /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. */
+ encryptionAtHost?: boolean;
+ /** Specifies ProxyAgent settings while creating the virtual machine. */
+ proxyAgentSettings?: ProxyAgentSettings;
/**
- * The storage account type for managed disk.
+ * Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings.
*
- * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs"
+ * Possible values: "trustedLaunch", "confidentialVM"
*/
- storageAccountType?: StorageAccountType;
- /** Specifies the security profile settings for the managed disk. */
- securityProfile?: VMDiskSecurityProfile;
+ securityType?: SecurityTypes;
+ /** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
+ uefiSettings?: BatchUefiSettings;
}
-/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. */
-export interface VMDiskSecurityProfile {
- /**
- * Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs.
- *
- * Possible values: "NonPersistedTPM", "VMGuestStateOnly"
- */
- securityEncryptionType?: SecurityEncryptionTypes;
+/** Specifies ProxyAgent settings while creating the virtual machine. */
+export interface ProxyAgentSettings {
+ /** Specifies whether Metadata Security Protocol feature should be enabled on the virtual machine or virtual machine scale set. Default is False. */
+ enabled?: boolean;
+ /** Settings for the IMDS endpoint. */
+ imds?: HostEndpointSettings;
+ /** Settings for the WireServer endpoint. */
+ wireServer?: HostEndpointSettings;
}
-/** Specifies the security profile settings for the virtual machine or virtual machine scale set. */
-export interface SecurityProfile {
- /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. */
- encryptionAtHost: boolean;
+/** Specifies particular host endpoint settings. */
+export interface HostEndpointSettings {
+ /** Specifies the reference to the InVMAccessControlProfileVersion resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/inVMAccessControlProfiles/{profile}/versions/{version}. */
+ inVMAccessControlProfileReferenceId?: string;
/**
- * Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings.
+ * Specifies the access control policy execution mode.
*
- * Possible values: "trustedLaunch", "confidentialVM"
+ * Possible values: "Audit", "Enforce"
*/
- securityType: SecurityTypes;
- /** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
- uefiSettings: UefiSettings;
+ mode?: HostEndpointSettingsModeTypes;
}
/** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
-export interface UefiSettings {
+export interface BatchUefiSettings {
/** Specifies whether secure boot should be enabled on the virtual machine. */
secureBootEnabled?: boolean;
/** Specifies whether vTPM should be enabled on the virtual machine. */
@@ -313,7 +357,7 @@ export interface ServiceArtifactReference {
/** The network configuration for a Pool. */
export interface NetworkConfiguration {
- /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. Enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
+ /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication, including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration */
subnetId?: string;
/**
* The scope of dynamic vnet assignment.
@@ -324,7 +368,7 @@ export interface NetworkConfiguration {
/** The configuration for endpoints on Compute Nodes in the Batch Pool. */
endpointConfiguration?: BatchPoolEndpointConfiguration;
/** The Public IPAddress configuration for Compute Nodes in the Batch Pool. */
- publicIPAddressConfiguration?: PublicIpAddressConfiguration;
+ publicIPAddressConfiguration?: BatchPublicIpAddressConfiguration;
/** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */
enableAcceleratedNetworking?: boolean;
}
@@ -332,14 +376,14 @@ export interface NetworkConfiguration {
/** The endpoint configuration for a Pool. */
export interface BatchPoolEndpointConfiguration {
/** A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. */
- inboundNATPools: Array;
+ inboundNATPools: Array;
}
/**
* A inbound NAT Pool that can be used to address specific ports on Compute Nodes
* in a Batch Pool externally.
*/
-export interface InboundNatPool {
+export interface BatchInboundNatPool {
/** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */
name: string;
/**
@@ -348,7 +392,7 @@ export interface InboundNatPool {
* Possible values: "tcp", "udp"
*/
protocol: InboundEndpointProtocol;
- /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */
+ /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */
backendPort: number;
/** The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */
frontendPortRangeStart: number;
@@ -375,15 +419,27 @@ export interface NetworkSecurityGroupRule {
}
/** The public IP Address configuration of the networking configuration of a Pool. */
-export interface PublicIpAddressConfiguration {
+export interface BatchPublicIpAddressConfiguration {
/**
* The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged.
*
* Possible values: "batchmanaged", "usermanaged", "nopublicipaddresses"
*/
provision?: IpAddressProvisioningType;
+ /** The IP families used to specify IP versions available to the pool. IP families are used to determine single-stack or dual-stack pools. For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. */
+ ipFamilies?: IPFamily[];
/** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */
ipAddressIds?: string[];
+ /** A list of IP tags associated with the public IP addresses of the Pool. IP tags are used to categorize and filter public IP addresses for billing and management purposes. */
+ ipTags?: Array;
+}
+
+/** Contains the IP tag associated with the public IP address. */
+export interface IPTag {
+ /** The IP Tag type. Example: FirstPartyUsage. */
+ ipTagType?: string;
+ /** The value of the IP tag associated with the public IP. Example: SQL. */
+ tag?: string;
}
/**
@@ -402,7 +458,7 @@ export interface PublicIpAddressConfiguration {
* block Batch from being able to re-run the StartTask.
*/
export interface BatchStartTask {
- /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettings;
@@ -441,7 +497,7 @@ export interface ContainerHostBatchBindMountEntry {
/**
* The path which be mounted to container customer can select.
*
- * Possible values: "regional", "zonal"
+ * Possible values: "Shared", "Startup", "VfsMounts", "Task", "JobPrep", "Applications"
*/
source?: ContainerHostDataPath;
/** Mount this source path as read-only mode or not. Default value is false (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean that all users in container have the read/write access for the path, it depends on the access in host VM. If this path is mounted read-only, all users within the container will not be able to modify the path. */
@@ -485,7 +541,7 @@ export interface UserIdentity {
/** Specifies the options for the auto user that runs an Azure Batch Task. */
export interface AutoUserSpecification {
/**
- * The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks.
+ * The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks.
*
* Possible values: "task", "pool"
*/
@@ -508,6 +564,12 @@ export interface BatchApplicationPackageReference {
/** Specifies how Tasks should be distributed across Compute Nodes. */
export interface BatchTaskSchedulingPolicy {
+ /**
+ * The order for scheduling tasks from different jobs with the same priority. If not specified, the default is none.
+ *
+ * Possible values: "none", "creationtime"
+ */
+ jobDefaultOrder?: BatchJobDefaultOrder;
/**
* How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.
*
@@ -561,7 +623,7 @@ export interface WindowsUserConfiguration {
* The Batch service does not assign any meaning to this metadata; it is solely
* for the use of user code.
*/
-export interface MetadataItem {
+export interface BatchMetadataItem {
/** The name of the metadata item. */
name: string;
/** The value of the metadata item. */
@@ -626,10 +688,10 @@ export interface CifsMountConfiguration {
export interface AzureFileShareConfiguration {
/** The Azure Storage account name. */
accountName: string;
- /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */
- azureFileUrl: string;
/** The Azure Storage account key. */
accountKey: string;
+ /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */
+ azureFileUrl: string;
/** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */
relativeMountPath: string;
/** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */
@@ -689,10 +751,10 @@ export interface NameValuePair {
}
/** Parameters for updating an Azure Batch Pool. */
-export interface BatchPoolUpdateContent {
+export interface BatchPoolUpdateOptions {
/** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. This field can be updated only when the pool is empty. */
displayName?: string;
- /** The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes).
This field can be updated only when the pool is empty. */
+ /** The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).
This field can be updated only when the pool is empty. */
vmSize?: string;
/** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.
This field can be updated only when the pool is empty. */
enableInterNodeCommunication?: boolean;
@@ -701,23 +763,15 @@ export interface BatchPoolUpdateContent {
/** A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. */
applicationPackageReferences?: Array;
/** A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. */
- metadata?: Array;
+ metadata?: Array;
/** The virtual machine configuration for the Pool. This property must be specified.
This field can be updated only when the pool is empty. */
virtualMachineConfiguration?: VirtualMachineConfiguration;
- /**
- * The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged.
- *
- * Possible values: "default", "classic", "simplified"
- */
- targetNodeCommunicationMode?: BatchNodeCommunicationMode;
/** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.
This field can be updated only when the pool is empty. */
taskSlotsPerNode?: number;
/** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.
This field can be updated only when the pool is empty. */
taskSchedulingPolicy?: BatchTaskSchedulingPolicy;
/** The network configuration for the Pool. This field can be updated only when the pool is empty. */
networkConfiguration?: NetworkConfiguration;
- /** The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.
This field can be updated only when the pool is empty. */
- resourceTags?: Record;
/** The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. */
userAccounts?: Array;
/** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.
This field can be updated only when the pool is empty. */
@@ -727,7 +781,7 @@ export interface BatchPoolUpdateContent {
}
/** Parameters for enabling automatic scaling on an Azure Batch Pool. */
-export interface BatchPoolEnableAutoScaleContent {
+export interface BatchPoolEnableAutoScaleOptions {
/** The formula for the desired number of Compute Nodes in the Pool. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */
autoScaleFormula?: string;
/** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */
@@ -735,13 +789,13 @@ export interface BatchPoolEnableAutoScaleContent {
}
/** Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. */
-export interface BatchPoolEvaluateAutoScaleContent {
- /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling). */
+export interface BatchPoolEvaluateAutoScaleOptions {
+ /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). */
autoScaleFormula: string;
}
/** Parameters for changing the size of an Azure Batch Pool. */
-export interface BatchPoolResizeContent {
+export interface BatchPoolResizeOptions {
/** The desired number of dedicated Compute Nodes in the Pool. */
targetDedicatedNodes?: number;
/** The desired number of Spot/Low-priority Compute Nodes in the Pool. */
@@ -757,23 +811,17 @@ export interface BatchPoolResizeContent {
}
/** Parameters for replacing properties on an Azure Batch Pool. */
-export interface BatchPoolReplaceContent {
+export interface BatchPoolReplaceOptions {
/** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. */
startTask?: BatchStartTask;
/** The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. */
applicationPackageReferences: Array;
/** A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. */
- metadata: Array;
- /**
- * The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default.
- *
- * Possible values: "default", "classic", "simplified"
- */
- targetNodeCommunicationMode?: BatchNodeCommunicationMode;
+ metadata: Array;
}
/** Parameters for removing nodes from an Azure Batch Pool. */
-export interface BatchNodeRemoveContent {
+export interface BatchNodeRemoveOptions {
/** A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. */
nodeList: string[];
/** The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */
@@ -790,9 +838,9 @@ export interface BatchNodeRemoveContent {
export interface BatchJob {
/** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */
priority?: number;
- /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
+ /** Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
allowTaskPreemption?: boolean;
- /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
+ /** The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
maxParallelTasks?: number;
/** The execution constraints for the Job. */
constraints?: BatchJobConstraints;
@@ -803,9 +851,9 @@ export interface BatchJob {
*
* Possible values: "noaction", "terminatejob"
*/
- onAllTasksComplete?: OnAllBatchTasksComplete;
+ onAllTasksComplete?: BatchAllTasksCompleteMode;
/** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
}
/** The execution constraints for a Job. */
@@ -846,7 +894,7 @@ export interface BatchJobManagerTask {
id: string;
/** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
displayName?: string;
- /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettings;
@@ -907,12 +955,12 @@ export interface OutputFileBlobContainerDestination {
containerUrl: string;
/** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */
identityReference?: BatchNodeIdentityReference;
- /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. */
- uploadHeaders?: Array;
+ /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. */
+ uploadHeaders?: Array;
}
/** An HTTP header name-value pair */
-export interface HttpHeader {
+export interface OutputFileUploadHeader {
/** The case-insensitive name of the header to be used while uploading output files. */
name: string;
/** The value of the header to be used while uploading output files. */
@@ -948,7 +996,7 @@ export interface BatchTaskConstraints {
*/
export interface AuthenticationTokenSettings {
/** The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. */
- access?: AccessScope[];
+ access?: BatchAccessScope[];
}
/**
@@ -982,7 +1030,7 @@ export interface AuthenticationTokenSettings {
export interface BatchJobPreparationTask {
/** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */
id?: string;
- /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettings;
@@ -1021,7 +1069,7 @@ export interface BatchJobPreparationTask {
export interface BatchJobReleaseTask {
/** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */
id?: string;
- /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettings;
@@ -1068,7 +1116,7 @@ export interface BatchAutoPoolSpecification {
export interface BatchPoolSpecification {
/** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
displayName?: string;
- /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */
+ /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). */
vmSize: string;
/** The virtual machine configuration for the Pool. This property must be specified. */
virtualMachineConfiguration?: VirtualMachineConfiguration;
@@ -1078,8 +1126,6 @@ export interface BatchPoolSpecification {
taskSchedulingPolicy?: BatchTaskSchedulingPolicy;
/** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */
resizeTimeout?: string;
- /** The user-specified tags associated with the pool.The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. */
- resourceTags?: string;
/** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */
targetDedicatedNodes?: number;
/** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */
@@ -1101,25 +1147,19 @@ export interface BatchPoolSpecification {
/** The list of user Accounts to be created on each Compute Node in the Pool. */
userAccounts?: Array;
/** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
/** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */
mountConfiguration?: Array;
- /**
- * The desired node communication mode for the pool. If omitted, the default value is Default.
- *
- * Possible values: "default", "classic", "simplified"
- */
- targetNodeCommunicationMode?: BatchNodeCommunicationMode;
/** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */
upgradePolicy?: UpgradePolicy;
}
-/** The network configuration for the Job. */
+/** (This property is not available by default. Please contact support for more information) The network configuration for the Job. */
export interface BatchJobNetworkConfiguration {
- /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
+ /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
subnetId: string;
/** Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. */
- skipWithdrawFromVNet: boolean;
+ skipWithdrawFromVNet?: boolean;
}
/** Contains information about the execution of a Job in the Azure Batch service. */
@@ -1143,7 +1183,7 @@ export interface BatchJobSchedulingError {
*
* Possible values: "usererror", "servererror"
*/
- category: ErrorCategory;
+ category: BatchErrorSourceCategory;
/** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */
code?: string;
/** A message describing the Job scheduling error, intended to be suitable for display in a user interface. */
@@ -1167,30 +1207,30 @@ export interface BatchJobStatistics {
/** The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */
wallClockTime: string;
/** The total number of disk read operations made by all Tasks in the Job. */
- readIOps: number;
+ readIOps: string;
/** The total number of disk write operations made by all Tasks in the Job. */
- writeIOps: number;
+ writeIOps: string;
/** The total amount of data in GiB read from disk by all Tasks in the Job. */
readIOGiB: number;
/** The total amount of data in GiB written to disk by all Tasks in the Job. */
writeIOGiB: number;
/** The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. */
- numSucceededTasks: number;
+ numSucceededTasks: string;
/** The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. */
- numFailedTasks: number;
+ numFailedTasks: string;
/** The total number of retries on all the Tasks in the Job during the given time range. */
- numTaskRetries: number;
+ numTaskRetries: string;
/** The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */
waitTime: string;
}
/** Parameters for updating an Azure Batch Job. */
-export interface BatchJobUpdateContent {
+export interface BatchJobUpdateOptions {
/** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. */
priority?: number;
- /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
+ /** Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
allowTaskPreemption?: boolean;
- /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
+ /** The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
maxParallelTasks?: number;
/** The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. */
constraints?: BatchJobConstraints;
@@ -1201,15 +1241,15 @@ export interface BatchJobUpdateContent {
*
* Possible values: "noaction", "terminatejob"
*/
- onAllTasksComplete?: OnAllBatchTasksComplete;
+ onAllTasksComplete?: BatchAllTasksCompleteMode;
/** A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. */
- metadata?: Array;
- /** The network configuration for the Job. */
+ metadata?: Array;
+ /** (This property is not available by default. Please contact support for more information) The network configuration for the Job. */
networkConfiguration?: BatchJobNetworkConfiguration;
}
/** Parameters for disabling an Azure Batch Job. */
-export interface BatchJobDisableContent {
+export interface BatchJobDisableOptions {
/**
* What to do with active Tasks associated with the Job.
*
@@ -1219,13 +1259,13 @@ export interface BatchJobDisableContent {
}
/** Parameters for terminating an Azure Batch Job. */
-export interface BatchJobTerminateContent {
+export interface BatchJobTerminateOptions {
/** The text you want to appear as the Job's TerminationReason. The default is 'UserTerminate'. */
terminateReason?: string;
}
/** Parameters for creating an Azure Batch Job. */
-export interface BatchJobCreateContent {
+export interface BatchJobCreateOptions {
/** A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */
id: string;
/** The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
@@ -1234,9 +1274,9 @@ export interface BatchJobCreateContent {
usesTaskDependencies?: boolean;
/** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */
priority?: number;
- /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
+ /** Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
allowTaskPreemption?: boolean;
- /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
+ /** The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
maxParallelTasks?: number;
/** The execution constraints for the Job. */
constraints?: BatchJobConstraints;
@@ -1255,17 +1295,17 @@ export interface BatchJobCreateContent {
*
* Possible values: "noaction", "terminatejob"
*/
- onAllTasksComplete?: OnAllBatchTasksComplete;
+ onAllTasksComplete?: BatchAllTasksCompleteMode;
/**
* The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction.
*
* Possible values: "noaction", "performexitoptionsjobaction"
*/
- onTaskFailure?: OnBatchTaskFailure;
- /** The network configuration for the Job. */
+ onTaskFailure?: BatchTaskFailureMode;
+ /** (This property is not available by default. Please contact support for more information) The network configuration for the Job. */
networkConfiguration?: BatchJobNetworkConfiguration;
/** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
}
/** Contains information about the container which a Task is executing. */
@@ -1285,7 +1325,7 @@ export interface BatchTaskFailureInfo {
*
* Possible values: "usererror", "servererror"
*/
- category: ErrorCategory;
+ category: BatchErrorSourceCategory;
/** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */
code?: string;
/** A message describing the Task error, intended to be suitable for display in a user interface. */
@@ -1304,7 +1344,7 @@ export interface BatchJobSchedule {
/** The details of the Jobs to be created on this schedule. */
jobSpecification: BatchJobSpecification;
/** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
}
/**
@@ -1326,9 +1366,9 @@ export interface BatchJobScheduleConfiguration {
export interface BatchJobSpecification {
/** The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. */
priority?: number;
- /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
+ /** Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
allowTaskPreemption?: boolean;
- /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
+ /** The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
maxParallelTasks?: number;
/** The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
displayName?: string;
@@ -1339,14 +1379,14 @@ export interface BatchJobSpecification {
*
* Possible values: "noaction", "terminatejob"
*/
- onAllTasksComplete?: OnAllBatchTasksComplete;
+ onAllTasksComplete?: BatchAllTasksCompleteMode;
/**
* The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction.
*
* Possible values: "noaction", "performexitoptionsjobaction"
*/
- onTaskFailure?: OnBatchTaskFailure;
- /** The network configuration for the Job. */
+ onTaskFailure?: BatchTaskFailureMode;
+ /** (This property is not available by default. Please contact support for more information) The network configuration for the Job. */
networkConfiguration?: BatchJobNetworkConfiguration;
/** The execution constraints for Jobs created under this schedule. */
constraints?: BatchJobConstraints;
@@ -1361,7 +1401,7 @@ export interface BatchJobSpecification {
/** The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. */
poolInfo: BatchPoolInfo;
/** A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
}
/**
@@ -1400,35 +1440,35 @@ export interface BatchJobScheduleStatistics {
/** The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */
wallClockTime: string;
/** The total number of disk read operations made by all Tasks in all Jobs created under the schedule. */
- readIOps: number;
+ readIOps: string;
/** The total number of disk write operations made by all Tasks in all Jobs created under the schedule. */
- writeIOps: number;
+ writeIOps: string;
/** The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. */
readIOGiB: number;
/** The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. */
writeIOGiB: number;
/** The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. */
- numSucceededTasks: number;
+ numSucceededTasks: string;
/** The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. */
- numFailedTasks: number;
+ numFailedTasks: string;
/** The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. */
- numTaskRetries: number;
+ numTaskRetries: string;
/** The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */
waitTime: string;
}
/** Parameters for updating an Azure Batch Job Schedule. */
-export interface BatchJobScheduleUpdateContent {
+export interface BatchJobScheduleUpdateOptions {
/** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. If you do not specify this element, the existing schedule is left unchanged. */
schedule?: BatchJobScheduleConfiguration;
/** The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification. */
jobSpecification?: BatchJobSpecification;
/** A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged. */
- metadata?: Array;
+ metadata?: Array;
}
/** Parameters for creating an Azure Batch Job Schedule */
-export interface BatchJobScheduleCreateContent {
+export interface BatchJobScheduleCreateOptions {
/** A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */
id: string;
/** The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
@@ -1438,18 +1478,18 @@ export interface BatchJobScheduleCreateContent {
/** The details of the Jobs to be created on this schedule. */
jobSpecification: BatchJobSpecification;
/** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
}
/** Parameters for creating an Azure Batch Task. */
-export interface BatchTaskCreateContent {
+export interface BatchTaskCreateOptions {
/** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). */
id: string;
/** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
displayName?: string;
/** How the Batch service should respond when the Task completes. */
exitConditions?: ExitConditions;
- /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettings;
@@ -1460,7 +1500,7 @@ export interface BatchTaskCreateContent {
/** A list of environment variable settings for the Task. */
environmentSettings?: Array;
/** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */
- affinityInfo?: AffinityInfo;
+ affinityInfo?: BatchAffinityInfo;
/** The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. */
constraints?: BatchTaskConstraints;
/** The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */
@@ -1509,7 +1549,7 @@ export interface ExitOptions {
*
* Possible values: "none", "disable", "terminate"
*/
- jobAction?: BatchJobAction;
+ jobAction?: BatchJobActionKind;
/**
* An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks.
*
@@ -1535,7 +1575,7 @@ export interface ExitCodeRangeMapping {
* A locality hint that can be used by the Batch service to select a Compute Node
* on which to start a Task.
*/
-export interface AffinityInfo {
+export interface BatchAffinityInfo {
/** An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */
affinityId: string;
}
@@ -1653,9 +1693,9 @@ export interface BatchTaskStatistics {
/** The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. */
wallClockTime: string;
/** The total number of disk read operations made by the Task. */
- readIOps: number;
+ readIOps: string;
/** The total number of disk write operations made by the Task. */
- writeIOps: number;
+ writeIOps: string;
/** The total gibibytes read from disk by the Task. */
readIOGiB: number;
/** The total gibibytes written to disk by the Task. */
@@ -1667,11 +1707,11 @@ export interface BatchTaskStatistics {
/** A collection of Azure Batch Tasks to add. */
export interface BatchTaskGroup {
/** The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. */
- value: Array;
+ value: Array;
}
/** Parameters for creating a user account for RDP or SSH access on an Azure Batch Compute Node. */
-export interface BatchNodeUserCreateContent {
+export interface BatchNodeUserCreateOptions {
/** The user name of the Account. */
name: string;
/** Whether the Account should be an administrator on the Compute Node. The default value is false. */
@@ -1685,7 +1725,7 @@ export interface BatchNodeUserCreateContent {
}
/** Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. */
-export interface BatchNodeUserUpdateContent {
+export interface BatchNodeUserUpdateOptions {
/** The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */
password?: string;
/** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */
@@ -1695,17 +1735,17 @@ export interface BatchNodeUserUpdateContent {
}
/** Parameters for rebooting an Azure Batch Compute Node. */
-export interface BatchNodeRebootContent {
+export interface BatchNodeRebootOptions {
/**
* When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue.
*
* Possible values: "requeue", "terminate", "taskcompletion", "retaineddata"
*/
- nodeRebootOption?: BatchNodeRebootOption;
+ nodeRebootOption?: BatchNodeRebootKind;
}
/** Options for deallocating a Compute Node. */
-export interface BatchNodeDeallocateContent {
+export interface BatchNodeDeallocateOptions {
/**
* When to deallocate the Compute Node and what to do with currently running Tasks. The default value is requeue.
*
@@ -1715,7 +1755,7 @@ export interface BatchNodeDeallocateContent {
}
/** Parameters for reimaging an Azure Batch Compute Node. */
-export interface BatchNodeReimageContent {
+export interface BatchNodeReimageOptions {
/**
* When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue.
*
@@ -1725,7 +1765,7 @@ export interface BatchNodeReimageContent {
}
/** Parameters for disabling scheduling on an Azure Batch Compute Node. */
-export interface BatchNodeDisableSchedulingContent {
+export interface BatchNodeDisableSchedulingOptions {
/**
* What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue.
*
@@ -1735,7 +1775,7 @@ export interface BatchNodeDisableSchedulingContent {
}
/** The Azure Batch service log files upload parameters for a Compute Node. */
-export interface UploadBatchServiceLogsContent {
+export interface UploadBatchServiceLogsOptions {
/** The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. */
containerUrl: string;
/** The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. */
@@ -1750,6 +1790,8 @@ export interface UploadBatchServiceLogsContent {
export type CachingType = string;
/** Alias for StorageAccountType */
export type StorageAccountType = string;
+/** Alias for SecurityEncryptionTypes */
+export type SecurityEncryptionTypes = string;
/** Alias for ContainerType */
export type ContainerType = string;
/** Alias for DiskEncryptionTarget */
@@ -1758,8 +1800,8 @@ export type DiskEncryptionTarget = string;
export type BatchNodePlacementPolicyType = string;
/** Alias for DiffDiskPlacement */
export type DiffDiskPlacement = string;
-/** Alias for SecurityEncryptionTypes */
-export type SecurityEncryptionTypes = string;
+/** Alias for HostEndpointSettingsModeTypes */
+export type HostEndpointSettingsModeTypes = string;
/** Alias for SecurityTypes */
export type SecurityTypes = string;
/** Alias for DynamicVNetAssignmentScope */
@@ -1770,6 +1812,8 @@ export type InboundEndpointProtocol = string;
export type NetworkSecurityGroupRuleAccess = string;
/** Alias for IpAddressProvisioningType */
export type IpAddressProvisioningType = string;
+/** Alias for IPFamily */
+export type IPFamily = string;
/** Alias for ContainerWorkingDirectory */
export type ContainerWorkingDirectory = string;
/** Alias for ContainerHostDataPath */
@@ -1778,12 +1822,12 @@ export type ContainerHostDataPath = string;
export type AutoUserScope = string;
/** Alias for ElevationLevel */
export type ElevationLevel = string;
+/** Alias for BatchJobDefaultOrder */
+export type BatchJobDefaultOrder = string;
/** Alias for BatchNodeFillType */
export type BatchNodeFillType = string;
/** Alias for LoginMode */
export type LoginMode = string;
-/** Alias for BatchNodeCommunicationMode */
-export type BatchNodeCommunicationMode = string;
/** Alias for UpgradeMode */
export type UpgradeMode = string;
/** Alias for BatchNodeDeallocationOption */
@@ -1792,30 +1836,30 @@ export type BatchNodeDeallocationOption = string;
export type BatchJobState = string;
/** Alias for OutputFileUploadCondition */
export type OutputFileUploadCondition = string;
-/** Alias for AccessScope */
-export type AccessScope = string;
+/** Alias for BatchAccessScope */
+export type BatchAccessScope = string;
/** Alias for BatchPoolLifetimeOption */
export type BatchPoolLifetimeOption = string;
-/** Alias for OnAllBatchTasksComplete */
-export type OnAllBatchTasksComplete = string;
-/** Alias for OnBatchTaskFailure */
-export type OnBatchTaskFailure = string;
-/** Alias for ErrorCategory */
-export type ErrorCategory = string;
+/** Alias for BatchAllTasksCompleteMode */
+export type BatchAllTasksCompleteMode = string;
+/** Alias for BatchTaskFailureMode */
+export type BatchTaskFailureMode = string;
+/** Alias for BatchErrorSourceCategory */
+export type BatchErrorSourceCategory = string;
/** Alias for DisableBatchJobOption */
export type DisableBatchJobOption = string;
/** Alias for BatchTaskExecutionResult */
export type BatchTaskExecutionResult = string;
/** Alias for BatchJobScheduleState */
export type BatchJobScheduleState = string;
-/** Alias for BatchJobAction */
-export type BatchJobAction = string;
+/** Alias for BatchJobActionKind */
+export type BatchJobActionKind = string;
/** Alias for DependencyAction */
export type DependencyAction = string;
/** Alias for BatchTaskState */
export type BatchTaskState = string;
-/** Alias for BatchNodeRebootOption */
-export type BatchNodeRebootOption = string;
+/** Alias for BatchNodeRebootKind */
+export type BatchNodeRebootKind = string;
/** Alias for BatchNodeDeallocateOption */
export type BatchNodeDeallocateOption = string;
/** Alias for BatchNodeReimageOption */
diff --git a/packages/service/src/internal/batch-rest/generated/src/outputModels.ts b/packages/service/src/internal/batch-rest/generated/src/outputModels.ts
index 44f19538a9..28f96475ba 100644
--- a/packages/service/src/internal/batch-rest/generated/src/outputModels.ts
+++ b/packages/service/src/internal/batch-rest/generated/src/outputModels.ts
@@ -22,7 +22,7 @@ export interface BatchApplicationOutput {
/** An error response received from the Azure Batch service. */
export interface BatchErrorOutput {
/** An identifier for the error. Codes are invariant and are intended to be consumed programmatically. */
- code: string;
+ code?: string;
/** A message describing the error, intended to be suitable for display in a user interface. */
message?: BatchErrorMessageOutput;
/** A collection of key-value pairs containing additional details about the error. */
@@ -61,7 +61,7 @@ export interface BatchPoolUsageMetricsOutput {
startTime: string;
/** The end time of the aggregation interval covered by this entry. */
endTime: string;
- /** The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */
+ /** The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). */
vmSize: string;
/** The total core hours used in the Pool during this aggregation interval. */
totalCoreHours: number;
@@ -73,12 +73,12 @@ export interface BatchPoolUsageMetricsOutput {
*/
export interface VirtualMachineConfigurationOutput {
/** A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. */
- imageReference: ImageReferenceOutput;
+ imageReference: BatchVmImageReferenceOutput;
/** The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. */
nodeAgentSKUId: string;
/** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */
windowsConfiguration?: WindowsConfigurationOutput;
- /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */
+ /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://learn.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://learn.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */
dataDisks?: Array;
/**
* This only applies to Images that contain the Windows operating system, and
@@ -93,7 +93,7 @@ export interface VirtualMachineConfigurationOutput {
*/
licenseType?: string;
/** The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. */
- containerConfiguration?: ContainerConfigurationOutput;
+ containerConfiguration?: BatchContainerConfigurationOutput;
/** The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. */
diskEncryptionConfiguration?: DiskEncryptionConfigurationOutput;
/** The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. */
@@ -101,7 +101,7 @@ export interface VirtualMachineConfigurationOutput {
/** The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. */
extensions?: Array;
/** Settings for the operating system disk of the Virtual Machine. */
- osDisk?: OSDiskOutput;
+ osDisk?: BatchOsDiskOutput;
/** Specifies the security profile settings for the virtual machine or virtual machine scale set. */
securityProfile?: SecurityProfileOutput;
/** Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName} */
@@ -113,7 +113,7 @@ export interface VirtualMachineConfigurationOutput {
* To get the list of all Azure Marketplace Image references verified by Azure Batch, see the
* ' List Supported Images ' operation.
*/
-export interface ImageReferenceOutput {
+export interface BatchVmImageReferenceOutput {
/** The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. */
publisher?: string;
/** The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. */
@@ -122,7 +122,7 @@ export interface ImageReferenceOutput {
sku?: string;
/** The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. */
version?: string;
- /** The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
+ /** The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. */
virtualMachineImageId?: string;
/** The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. */
readonly exactVersion?: string;
@@ -154,6 +154,8 @@ export interface DataDiskOutput {
caching?: CachingTypeOutput;
/** The initial disk size in gigabytes. */
diskSizeGB: number;
+ /** The managed disk parameters. */
+ managedDisk?: ManagedDiskOutput;
/**
* The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs".
*
@@ -162,8 +164,38 @@ export interface DataDiskOutput {
storageAccountType?: StorageAccountTypeOutput;
}
+/** The managed disk parameters. */
+export interface ManagedDiskOutput {
+ /** Specifies the customer managed disk encryption set resource id for the managed disk. It can be set only in UserSubscription mode. */
+ diskEncryptionSet?: DiskEncryptionSetParametersOutput;
+ /**
+ * The storage account type for managed disk.
+ *
+ * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs"
+ */
+ storageAccountType?: StorageAccountTypeOutput;
+ /** Specifies the security profile settings for the managed disk. */
+ securityProfile?: BatchVmDiskSecurityProfileOutput;
+}
+
+/** The ARM resource id of the disk encryption set. */
+export interface DiskEncryptionSetParametersOutput {
+ /** The ARM resource id of the disk encryption set. The resource must be in the same subscription as the Batch account. */
+ id?: string;
+}
+
+/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. */
+export interface BatchVmDiskSecurityProfileOutput {
+ /**
+ * Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs.
+ *
+ * Possible values: "DiskWithVMGuestState", "NonPersistedTPM", "VMGuestStateOnly"
+ */
+ securityEncryptionType?: SecurityEncryptionTypesOutput;
+}
+
/** The configuration for container-enabled Pools. */
-export interface ContainerConfigurationOutput {
+export interface BatchContainerConfigurationOutput {
/**
* The container technology to be used.
*
@@ -203,10 +235,28 @@ export interface BatchNodeIdentityReferenceOutput {
* Azure Compute Gallery Image.
*/
export interface DiskEncryptionConfigurationOutput {
+ /** The Customer Managed Key reference to encrypt the OS Disk. Customer Managed Key will encrypt OS Disk by EncryptionAtRest, and by default we will encrypt the data disk as well. It can be used only when the pool is configured with an identity and OsDisk is set as one of the targets of DiskEncryption. */
+ customerManagedKey?: DiskCustomerManagedKeyOutput;
/** The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. */
targets?: DiskEncryptionTargetOutput[];
}
+/** The Customer Managed Key reference to encrypt the Disk. */
+export interface DiskCustomerManagedKeyOutput {
+ /** The reference of one of the pool identities to encrypt Disk. This identity will be used to access the KeyVault. */
+ identityReference?: BatchPoolIdentityReferenceOutput;
+ /** Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled value. */
+ keyUrl?: string;
+ /** Set this flag to true to enable auto-updating of the Disk Encryption to the latest key version. Default is false. */
+ rotationToLatestKeyVersionEnabled?: boolean;
+}
+
+/** The reference of one of the pool identities to encrypt Disk. This identity will be used to access the key vault. */
+export interface BatchPoolIdentityReferenceOutput {
+ /** The ARM resource id of the user assigned identity. This reference must be included in the pool identities. */
+ resourceId?: string;
+}
+
/**
* For regional placement, nodes in the pool will be allocated in the same region.
* For zonal placement, nodes in the pool will be spread across different zones
@@ -216,7 +266,7 @@ export interface BatchNodePlacementConfigurationOutput {
/**
* Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy.
*
- * Possible values: "Shared", "Startup", "VfsMounts", "Task", "JobPrep", "Applications"
+ * Possible values: "regional", "zonal"
*/
policy?: BatchNodePlacementPolicyTypeOutput;
}
@@ -244,9 +294,9 @@ export interface VMExtensionOutput {
}
/** Settings for the operating system disk of the compute node (VM). */
-export interface OSDiskOutput {
+export interface BatchOsDiskOutput {
/** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */
- ephemeralOSDiskSettings?: DiffDiskSettingsOutput;
+ ephemeralOSDiskSettings?: BatchDiffDiskSettingsOutput;
/**
* Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage.
*
@@ -265,53 +315,55 @@ export interface OSDiskOutput {
* Specifies the ephemeral Disk Settings for the operating system disk used by the
* compute node (VM).
*/
-export interface DiffDiskSettingsOutput {
+export interface BatchDiffDiskSettingsOutput {
/**
- * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements.
+ * Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements.
*
* Possible values: "cachedisk"
*/
placement?: DiffDiskPlacementOutput;
}
-/** The managed disk parameters. */
-export interface ManagedDiskOutput {
+/** Specifies the security profile settings for the virtual machine or virtual machine scale set. */
+export interface SecurityProfileOutput {
+ /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. */
+ encryptionAtHost?: boolean;
+ /** Specifies ProxyAgent settings while creating the virtual machine. */
+ proxyAgentSettings?: ProxyAgentSettingsOutput;
/**
- * The storage account type for managed disk.
+ * Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings.
*
- * Possible values: "standard_lrs", "premium_lrs", "standardssd_lrs"
+ * Possible values: "trustedLaunch", "confidentialVM"
*/
- storageAccountType?: StorageAccountTypeOutput;
- /** Specifies the security profile settings for the managed disk. */
- securityProfile?: VMDiskSecurityProfileOutput;
+ securityType?: SecurityTypesOutput;
+ /** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
+ uefiSettings?: BatchUefiSettingsOutput;
}
-/** Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. */
-export interface VMDiskSecurityProfileOutput {
- /**
- * Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs.
- *
- * Possible values: "NonPersistedTPM", "VMGuestStateOnly"
- */
- securityEncryptionType?: SecurityEncryptionTypesOutput;
+/** Specifies ProxyAgent settings while creating the virtual machine. */
+export interface ProxyAgentSettingsOutput {
+ /** Specifies whether Metadata Security Protocol feature should be enabled on the virtual machine or virtual machine scale set. Default is False. */
+ enabled?: boolean;
+ /** Settings for the IMDS endpoint. */
+ imds?: HostEndpointSettingsOutput;
+ /** Settings for the WireServer endpoint. */
+ wireServer?: HostEndpointSettingsOutput;
}
-/** Specifies the security profile settings for the virtual machine or virtual machine scale set. */
-export interface SecurityProfileOutput {
- /** This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. */
- encryptionAtHost: boolean;
+/** Specifies particular host endpoint settings. */
+export interface HostEndpointSettingsOutput {
+ /** Specifies the reference to the InVMAccessControlProfileVersion resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/inVMAccessControlProfiles/{profile}/versions/{version}. */
+ inVMAccessControlProfileReferenceId?: string;
/**
- * Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings.
+ * Specifies the access control policy execution mode.
*
- * Possible values: "trustedLaunch", "confidentialVM"
+ * Possible values: "Audit", "Enforce"
*/
- securityType: SecurityTypesOutput;
- /** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
- uefiSettings: UefiSettingsOutput;
+ mode?: HostEndpointSettingsModeTypesOutput;
}
/** Specifies the security settings like secure boot and vTPM used while creating the virtual machine. */
-export interface UefiSettingsOutput {
+export interface BatchUefiSettingsOutput {
/** Specifies whether secure boot should be enabled on the virtual machine. */
secureBootEnabled?: boolean;
/** Specifies whether vTPM should be enabled on the virtual machine. */
@@ -329,7 +381,7 @@ export interface ServiceArtifactReferenceOutput {
/** The network configuration for a Pool. */
export interface NetworkConfigurationOutput {
- /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. Enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
+ /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication, including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration */
subnetId?: string;
/**
* The scope of dynamic vnet assignment.
@@ -340,7 +392,7 @@ export interface NetworkConfigurationOutput {
/** The configuration for endpoints on Compute Nodes in the Batch Pool. */
endpointConfiguration?: BatchPoolEndpointConfigurationOutput;
/** The Public IPAddress configuration for Compute Nodes in the Batch Pool. */
- publicIPAddressConfiguration?: PublicIpAddressConfigurationOutput;
+ publicIPAddressConfiguration?: BatchPublicIpAddressConfigurationOutput;
/** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */
enableAcceleratedNetworking?: boolean;
}
@@ -348,14 +400,14 @@ export interface NetworkConfigurationOutput {
/** The endpoint configuration for a Pool. */
export interface BatchPoolEndpointConfigurationOutput {
/** A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. */
- inboundNATPools: Array;
+ inboundNATPools: Array;
}
/**
* A inbound NAT Pool that can be used to address specific ports on Compute Nodes
* in a Batch Pool externally.
*/
-export interface InboundNatPoolOutput {
+export interface BatchInboundNatPoolOutput {
/** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */
name: string;
/**
@@ -364,7 +416,7 @@ export interface InboundNatPoolOutput {
* Possible values: "tcp", "udp"
*/
protocol: InboundEndpointProtocolOutput;
- /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */
+ /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */
backendPort: number;
/** The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */
frontendPortRangeStart: number;
@@ -391,15 +443,27 @@ export interface NetworkSecurityGroupRuleOutput {
}
/** The public IP Address configuration of the networking configuration of a Pool. */
-export interface PublicIpAddressConfigurationOutput {
+export interface BatchPublicIpAddressConfigurationOutput {
/**
* The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged.
*
* Possible values: "batchmanaged", "usermanaged", "nopublicipaddresses"
*/
provision?: IpAddressProvisioningTypeOutput;
+ /** The IP families used to specify IP versions available to the pool. IP families are used to determine single-stack or dual-stack pools. For single-stack, the expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6. */
+ ipFamilies?: IPFamilyOutput[];
/** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */
ipAddressIds?: string[];
+ /** A list of IP tags associated with the public IP addresses of the Pool. IP tags are used to categorize and filter public IP addresses for billing and management purposes. */
+ ipTags?: Array;
+}
+
+/** Contains the IP tag associated with the public IP address. */
+export interface IPTagOutput {
+ /** The IP Tag type. Example: FirstPartyUsage. */
+ ipTagType?: string;
+ /** The value of the IP tag associated with the public IP. Example: SQL. */
+ tag?: string;
}
/**
@@ -418,7 +482,7 @@ export interface PublicIpAddressConfigurationOutput {
* block Batch from being able to re-run the StartTask.
*/
export interface BatchStartTaskOutput {
- /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettingsOutput;
@@ -457,7 +521,7 @@ export interface ContainerHostBatchBindMountEntryOutput {
/**
* The path which be mounted to container customer can select.
*
- * Possible values: "regional", "zonal"
+ * Possible values: "Shared", "Startup", "VfsMounts", "Task", "JobPrep", "Applications"
*/
source?: ContainerHostDataPathOutput;
/** Mount this source path as read-only mode or not. Default value is false (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean that all users in container have the read/write access for the path, it depends on the access in host VM. If this path is mounted read-only, all users within the container will not be able to modify the path. */
@@ -501,7 +565,7 @@ export interface UserIdentityOutput {
/** Specifies the options for the auto user that runs an Azure Batch Task. */
export interface AutoUserSpecificationOutput {
/**
- * The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks.
+ * The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks.
*
* Possible values: "task", "pool"
*/
@@ -524,6 +588,12 @@ export interface BatchApplicationPackageReferenceOutput {
/** Specifies how Tasks should be distributed across Compute Nodes. */
export interface BatchTaskSchedulingPolicyOutput {
+ /**
+ * The order for scheduling tasks from different jobs with the same priority. If not specified, the default is none.
+ *
+ * Possible values: "none", "creationtime"
+ */
+ jobDefaultOrder?: BatchJobDefaultOrderOutput;
/**
* How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.
*
@@ -577,7 +647,7 @@ export interface WindowsUserConfigurationOutput {
* The Batch service does not assign any meaning to this metadata; it is solely
* for the use of user code.
*/
-export interface MetadataItemOutput {
+export interface BatchMetadataItemOutput {
/** The name of the metadata item. */
name: string;
/** The value of the metadata item. */
@@ -642,10 +712,10 @@ export interface CifsMountConfigurationOutput {
export interface AzureFileShareConfigurationOutput {
/** The Azure Storage account name. */
accountName: string;
- /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */
- azureFileUrl: string;
/** The Azure Storage account key. */
accountKey: string;
+ /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */
+ azureFileUrl: string;
/** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */
relativeMountPath: string;
/** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */
@@ -707,25 +777,25 @@ export interface BatchPoolListResultOutput {
/** A Pool in the Azure Batch service. */
export interface BatchPoolOutput {
/** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */
- readonly id?: string;
- /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
+ readonly id: string;
+ /** The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
readonly displayName?: string;
/** The URL of the Pool. */
- readonly url?: string;
+ readonly url: string;
/** The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. */
- readonly eTag?: string;
+ readonly eTag: string;
/** The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. */
- readonly lastModified?: string;
+ readonly lastModified: string;
/** The creation time of the Pool. */
- readonly creationTime?: string;
+ readonly creationTime: string;
/**
* The current state of the Pool.
*
* Possible values: "active", "deleting"
*/
- readonly state?: BatchPoolStateOutput;
+ readonly state: BatchPoolStateOutput;
/** The time at which the Pool entered its current state. */
- readonly stateTransitionTime?: string;
+ readonly stateTransitionTime: string;
/**
* Whether the Pool is resizing.
*
@@ -734,20 +804,18 @@ export interface BatchPoolOutput {
readonly allocationState?: AllocationStateOutput;
/** The time at which the Pool entered its current allocation state. */
readonly allocationStateTransitionTime?: string;
- /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */
- readonly vmSize?: string;
+ /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). */
+ readonly vmSize: string;
/** The virtual machine configuration for the Pool. This property must be specified. */
readonly virtualMachineConfiguration?: VirtualMachineConfigurationOutput;
/** The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. */
readonly resizeTimeout?: string;
/** A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. */
readonly resizeErrors?: Array;
- /** The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. */
- readonly resourceTags?: Record;
/** The number of dedicated Compute Nodes currently in the Pool. */
- readonly currentDedicatedNodes?: number;
+ readonly currentDedicatedNodes: number;
/** The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. */
- readonly currentLowPriorityNodes?: number;
+ readonly currentLowPriorityNodes: number;
/** The desired number of dedicated Compute Nodes in the Pool. */
readonly targetDedicatedNodes?: number;
/** The desired number of Spot/Low-priority Compute Nodes in the Pool. */
@@ -760,7 +828,7 @@ export interface BatchPoolOutput {
readonly autoScaleEvaluationInterval?: string;
/** The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */
readonly autoScaleRun?: AutoScaleRunOutput;
- /** Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. */
+ /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. */
readonly enableInterNodeCommunication?: boolean;
/** The network configuration for the Pool. */
readonly networkConfiguration?: NetworkConfigurationOutput;
@@ -775,25 +843,13 @@ export interface BatchPoolOutput {
/** The list of user Accounts to be created on each Compute Node in the Pool. */
readonly userAccounts?: Array;
/** A list of name-value pairs associated with the Pool as metadata. */
- readonly metadata?: Array;
+ readonly metadata?: Array;
/** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */
readonly stats?: BatchPoolStatisticsOutput;
- /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */
+ /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. */
readonly mountConfiguration?: Array;
/** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */
readonly identity?: BatchPoolIdentityOutput;
- /**
- * The desired node communication mode for the pool. If omitted, the default value is Default.
- *
- * Possible values: "default", "classic", "simplified"
- */
- targetNodeCommunicationMode?: BatchNodeCommunicationModeOutput;
- /**
- * The current state of the pool communication mode.
- *
- * Possible values: "default", "classic", "simplified"
- */
- readonly currentNodeCommunicationMode?: BatchNodeCommunicationModeOutput;
/** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */
upgradePolicy?: UpgradePolicyOutput;
}
@@ -877,9 +933,9 @@ export interface BatchPoolResourceStatisticsOutput {
/** The peak used disk space in GiB across all Compute Nodes in the Pool. */
peakDiskGiB: number;
/** The total number of disk read operations across all Compute Nodes in the Pool. */
- diskReadIOps: number;
+ diskReadIOps: string;
/** The total number of disk write operations across all Compute Nodes in the Pool. */
- diskWriteIOps: number;
+ diskWriteIOps: string;
/** The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. */
diskReadGiB: number;
/** The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. */
@@ -899,11 +955,11 @@ export interface BatchPoolIdentityOutput {
*/
type: BatchPoolIdentityTypeOutput;
/** The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */
- userAssignedIdentities?: Array;
+ userAssignedIdentities?: Array;
}
/** The user assigned Identity */
-export interface UserAssignedIdentityOutput {
+export interface BatchUserAssignedIdentityOutput {
/** The ARM resource id of the user assigned identity. */
resourceId: string;
/** The client id of the user assigned identity. */
@@ -928,7 +984,7 @@ export interface BatchSupportedImageOutput {
/** The ID of the Compute Node agent SKU which the Image supports. */
nodeAgentSKUId: string;
/** The reference to the Azure Virtual Machine's Marketplace Image. */
- imageReference: ImageReferenceOutput;
+ imageReference: BatchVmImageReferenceOutput;
/**
* The type of operating system (e.g. Windows or Linux) of the Image.
*
@@ -1006,27 +1062,27 @@ export interface BatchNodeCountsOutput {
/** An Azure Batch Job. */
export interface BatchJobOutput {
/** A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */
- readonly id?: string;
+ readonly id: string;
/** The display name for the Job. */
readonly displayName?: string;
/** Whether Tasks in the Job can define dependencies on each other. The default is false. */
readonly usesTaskDependencies?: boolean;
/** The URL of the Job. */
- readonly url?: string;
+ readonly url: string;
/** The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */
- readonly eTag?: string;
+ readonly eTag: string;
/** The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. */
- readonly lastModified?: string;
+ readonly lastModified: string;
/** The creation time of the Job. */
- readonly creationTime?: string;
+ readonly creationTime: string;
/**
* The current state of the Job.
*
* Possible values: "active", "disabling", "disabled", "enabling", "terminating", "completed", "deleting"
*/
- readonly state?: BatchJobStateOutput;
+ readonly state: BatchJobStateOutput;
/** The time at which the Job entered its current state. */
- readonly stateTransitionTime?: string;
+ readonly stateTransitionTime: string;
/**
* The previous state of the Job. This property is not set if the Job is in its initial Active state.
*
@@ -1037,9 +1093,9 @@ export interface BatchJobOutput {
readonly previousStateTransitionTime?: string;
/** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */
priority?: number;
- /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
+ /** Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
allowTaskPreemption?: boolean;
- /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
+ /** The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
maxParallelTasks?: number;
/** The execution constraints for the Job. */
constraints?: BatchJobConstraintsOutput;
@@ -1058,17 +1114,17 @@ export interface BatchJobOutput {
*
* Possible values: "noaction", "terminatejob"
*/
- onAllTasksComplete?: OnAllBatchTasksCompleteOutput;
+ onAllTasksComplete?: BatchAllTasksCompleteModeOutput;
/**
* The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction.
*
* Possible values: "noaction", "performexitoptionsjobaction"
*/
- readonly onTaskFailure?: OnBatchTaskFailureOutput;
- /** The network configuration for the Job. */
+ readonly onTaskFailure?: BatchTaskFailureModeOutput;
+ /** (This property is not available by default. Please contact support for more information) The network configuration for the Job. */
readonly networkConfiguration?: BatchJobNetworkConfigurationOutput;
/** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
/** The execution information for the Job. */
readonly executionInfo?: BatchJobExecutionInfoOutput;
/** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */
@@ -1113,7 +1169,7 @@ export interface BatchJobManagerTaskOutput {
id: string;
/** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
displayName?: string;
- /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettingsOutput;
@@ -1174,12 +1230,12 @@ export interface OutputFileBlobContainerDestinationOutput {
containerUrl: string;
/** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */
identityReference?: BatchNodeIdentityReferenceOutput;
- /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. */
- uploadHeaders?: Array;
+ /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. */
+ uploadHeaders?: Array;
}
/** An HTTP header name-value pair */
-export interface HttpHeaderOutput {
+export interface OutputFileUploadHeaderOutput {
/** The case-insensitive name of the header to be used while uploading output files. */
name: string;
/** The value of the header to be used while uploading output files. */
@@ -1215,7 +1271,7 @@ export interface BatchTaskConstraintsOutput {
*/
export interface AuthenticationTokenSettingsOutput {
/** The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. */
- access?: AccessScopeOutput[];
+ access?: BatchAccessScopeOutput[];
}
/**
@@ -1249,7 +1305,7 @@ export interface AuthenticationTokenSettingsOutput {
export interface BatchJobPreparationTaskOutput {
/** A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */
id?: string;
- /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettingsOutput;
@@ -1288,7 +1344,7 @@ export interface BatchJobPreparationTaskOutput {
export interface BatchJobReleaseTaskOutput {
/** A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). */
id?: string;
- /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ /** The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
commandLine: string;
/** The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
containerSettings?: BatchTaskContainerSettingsOutput;
@@ -1335,7 +1391,7 @@ export interface BatchAutoPoolSpecificationOutput {
export interface BatchPoolSpecificationOutput {
/** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
displayName?: string;
- /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */
+ /** The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). */
vmSize: string;
/** The virtual machine configuration for the Pool. This property must be specified. */
virtualMachineConfiguration?: VirtualMachineConfigurationOutput;
@@ -1345,8 +1401,6 @@ export interface BatchPoolSpecificationOutput {
taskSchedulingPolicy?: BatchTaskSchedulingPolicyOutput;
/** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */
resizeTimeout?: string;
- /** The user-specified tags associated with the pool.The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. */
- resourceTags?: string;
/** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */
targetDedicatedNodes?: number;
/** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */
@@ -1368,25 +1422,19 @@ export interface BatchPoolSpecificationOutput {
/** The list of user Accounts to be created on each Compute Node in the Pool. */
userAccounts?: Array;
/** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
/** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */
mountConfiguration?: Array;
- /**
- * The desired node communication mode for the pool. If omitted, the default value is Default.
- *
- * Possible values: "default", "classic", "simplified"
- */
- targetNodeCommunicationMode?: BatchNodeCommunicationModeOutput;
/** The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. */
upgradePolicy?: UpgradePolicyOutput;
}
-/** The network configuration for the Job. */
+/** (This property is not available by default. Please contact support for more information) The network configuration for the Job. */
export interface BatchJobNetworkConfigurationOutput {
- /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
+ /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */
subnetId: string;
/** Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. */
- skipWithdrawFromVNet: boolean;
+ skipWithdrawFromVNet?: boolean;
}
/** Contains information about the execution of a Job in the Azure Batch service. */
@@ -1410,7 +1458,7 @@ export interface BatchJobSchedulingErrorOutput {
*
* Possible values: "usererror", "servererror"
*/
- category: ErrorCategoryOutput;
+ category: BatchErrorSourceCategoryOutput;
/** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */
code?: string;
/** A message describing the Job scheduling error, intended to be suitable for display in a user interface. */
@@ -1434,19 +1482,19 @@ export interface BatchJobStatisticsOutput {
/** The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */
wallClockTime: string;
/** The total number of disk read operations made by all Tasks in the Job. */
- readIOps: number;
+ readIOps: string;
/** The total number of disk write operations made by all Tasks in the Job. */
- writeIOps: number;
+ writeIOps: string;
/** The total amount of data in GiB read from disk by all Tasks in the Job. */
readIOGiB: number;
/** The total amount of data in GiB written to disk by all Tasks in the Job. */
writeIOGiB: number;
/** The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. */
- numSucceededTasks: number;
+ numSucceededTasks: string;
/** The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. */
- numFailedTasks: number;
+ numFailedTasks: string;
/** The total number of retries on all the Tasks in the Job during the given time range. */
- numTaskRetries: number;
+ numTaskRetries: string;
/** The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */
waitTime: string;
}
@@ -1538,7 +1586,7 @@ export interface BatchTaskFailureInfoOutput {
*
* Possible values: "usererror", "servererror"
*/
- category: ErrorCategoryOutput;
+ category: BatchErrorSourceCategoryOutput;
/** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */
code?: string;
/** A message describing the Task error, intended to be suitable for display in a user interface. */
@@ -1622,25 +1670,25 @@ export interface BatchTaskSlotCountsOutput {
*/
export interface BatchJobScheduleOutput {
/** A string that uniquely identifies the schedule within the Account. */
- readonly id?: string;
+ readonly id: string;
/** The display name for the schedule. */
readonly displayName?: string;
/** The URL of the Job Schedule. */
- readonly url?: string;
+ readonly url: string;
/** The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. */
- readonly eTag?: string;
+ readonly eTag: string;
/** The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. */
- readonly lastModified?: string;
+ readonly lastModified: string;
/** The creation time of the Job Schedule. */
- readonly creationTime?: string;
+ readonly creationTime: string;
/**
* The current state of the Job Schedule.
*
* Possible values: "active", "completed", "disabled", "terminating", "deleting"
*/
- readonly state?: BatchJobScheduleStateOutput;
+ readonly state: BatchJobScheduleStateOutput;
/** The time at which the Job Schedule entered the current state. */
- readonly stateTransitionTime?: string;
+ readonly stateTransitionTime: string;
/**
* The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state.
*
@@ -1654,9 +1702,9 @@ export interface BatchJobScheduleOutput {
/** The details of the Jobs to be created on this schedule. */
jobSpecification: BatchJobSpecificationOutput;
/** Information about Jobs that have been and will be run under this schedule. */
- readonly executionInfo?: BatchJobScheduleExecutionInfoOutput;
+ readonly executionInfo: BatchJobScheduleExecutionInfoOutput;
/** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
/** The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */
readonly stats?: BatchJobScheduleStatisticsOutput;
}
@@ -1680,9 +1728,9 @@ export interface BatchJobScheduleConfigurationOutput {
export interface BatchJobSpecificationOutput {
/** The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. */
priority?: number;
- /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
+ /** Whether Tasks in this job can be preempted by other high priority jobs. (This property is not available by default. Please contact support for more information) If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */
allowTaskPreemption?: boolean;
- /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
+ /** The maximum number of tasks that can be executed in parallel for the job. (This property is not available by default. Please contact support for more information) The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */
maxParallelTasks?: number;
/** The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
displayName?: string;
@@ -1693,14 +1741,14 @@ export interface BatchJobSpecificationOutput {
*
* Possible values: "noaction", "terminatejob"
*/
- onAllTasksComplete?: OnAllBatchTasksCompleteOutput;
+ onAllTasksComplete?: BatchAllTasksCompleteModeOutput;
/**
* The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction.
*
* Possible values: "noaction", "performexitoptionsjobaction"
*/
- onTaskFailure?: OnBatchTaskFailureOutput;
- /** The network configuration for the Job. */
+ onTaskFailure?: BatchTaskFailureModeOutput;
+ /** (This property is not available by default. Please contact support for more information) The network configuration for the Job. */
networkConfiguration?: BatchJobNetworkConfigurationOutput;
/** The execution constraints for Jobs created under this schedule. */
constraints?: BatchJobConstraintsOutput;
@@ -1715,7 +1763,7 @@ export interface BatchJobSpecificationOutput {
/** The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. */
poolInfo: BatchPoolInfoOutput;
/** A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */
- metadata?: Array;
+ metadata?: Array;
}
/**
@@ -1754,19 +1802,19 @@ export interface BatchJobScheduleStatisticsOutput {
/** The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */
wallClockTime: string;
/** The total number of disk read operations made by all Tasks in all Jobs created under the schedule. */
- readIOps: number;
+ readIOps: string;
/** The total number of disk write operations made by all Tasks in all Jobs created under the schedule. */
- writeIOps: number;
+ writeIOps: string;
/** The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. */
readIOGiB: number;
/** The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. */
writeIOGiB: number;
/** The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. */
- numSucceededTasks: number;
+ numSucceededTasks: string;
/** The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. */
- numFailedTasks: number;
+ numFailedTasks: string;
/** The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. */
- numTaskRetries: number;
+ numTaskRetries: string;
/** The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */
waitTime: string;
}
@@ -1811,7 +1859,7 @@ export interface ExitOptionsOutput {
*
* Possible values: "none", "disable", "terminate"
*/
- jobAction?: BatchJobActionOutput;
+ jobAction?: BatchJobActionKindOutput;
/**
* An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks.
*
@@ -1837,7 +1885,7 @@ export interface ExitCodeRangeMappingOutput {
* A locality hint that can be used by the Batch service to select a Compute Node
* on which to start a Task.
*/
-export interface AffinityInfoOutput {
+export interface BatchAffinityInfoOutput {
/** An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */
affinityId: string;
}
@@ -1901,17 +1949,17 @@ export interface BatchTaskListResultOutput {
*/
export interface BatchTaskOutput {
/** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. */
- readonly id?: string;
+ readonly id: string;
/** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */
readonly displayName?: string;
/** The URL of the Task. */
- readonly url?: string;
+ readonly url: string;
/** The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. */
- readonly eTag?: string;
+ readonly eTag: string;
/** The last modified time of the Task. */
- readonly lastModified?: string;
+ readonly lastModified: string;
/** The creation time of the Task. */
- readonly creationTime?: string;
+ readonly creationTime: string;
/** How the Batch service should respond when the Task completes. */
readonly exitConditions?: ExitConditionsOutput;
/**
@@ -1919,9 +1967,9 @@ export interface BatchTaskOutput {
*
* Possible values: "active", "preparing", "running", "completed"
*/
- readonly state?: BatchTaskStateOutput;
+ readonly state: BatchTaskStateOutput;
/** The time at which the Task entered its current state. */
- readonly stateTransitionTime?: string;
+ readonly stateTransitionTime: string;
/**
* The previous state of the Task. This property is not set if the Task is in its initial Active state.
*
@@ -1930,8 +1978,8 @@ export interface BatchTaskOutput {
readonly previousState?: BatchTaskStateOutput;
/** The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. */
readonly previousStateTransitionTime?: string;
- /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
- readonly commandLine?: string;
+ /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). */
+ readonly commandLine: string;
/** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */
readonly containerSettings?: BatchTaskContainerSettingsOutput;
/** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */
@@ -1941,7 +1989,7 @@ export interface BatchTaskOutput {
/** A list of environment variable settings for the Task. */
readonly environmentSettings?: Array;
/** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */
- readonly affinityInfo?: AffinityInfoOutput;
+ readonly affinityInfo?: BatchAffinityInfoOutput;
/** The execution constraints that apply to this Task. */
constraints?: BatchTaskConstraintsOutput;
/** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */
@@ -2023,9 +2071,9 @@ export interface BatchTaskStatisticsOutput {
/** The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. */
wallClockTime: string;
/** The total number of disk read operations made by the Task. */
- readIOps: number;
+ readIOps: string;
/** The total number of disk write operations made by the Task. */
- writeIOps: number;
+ writeIOps: string;
/** The total gibibytes read from disk by the Task. */
readIOGiB: number;
/** The total gibibytes written to disk by the Task. */
@@ -2034,14 +2082,14 @@ export interface BatchTaskStatisticsOutput {
waitTime: string;
}
-/** The result of adding a collection of Tasks to a Job. */
-export interface BatchTaskAddCollectionResultOutput {
- /** The results of the add Task collection operation. */
- value?: Array;
+/** The result of creating a collection of Tasks to a Job. */
+export interface BatchCreateTaskCollectionResultOutput {
+ /** The results of the create Task collection operation. */
+ value?: Array;
}
-/** Result for a single Task added as part of an add Task collection operation. */
-export interface BatchTaskAddResultOutput {
+/** Result for a single Task created as part of an add Task collection operation. */
+export interface BatchTaskCreateResultOutput {
/**
* The status of the add Task request.
*
@@ -2148,57 +2196,59 @@ export interface FilePropertiesOutput {
/** A Compute Node in the Batch service. */
export interface BatchNodeOutput {
/** The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. */
- id?: string;
+ readonly id: string;
/** The URL of the Compute Node. */
- url?: string;
+ readonly url: string;
/**
* The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available.
*
* Possible values: "idle", "rebooting", "reimaging", "running", "unusable", "creating", "starting", "waitingforstarttask", "starttaskfailed", "unknown", "leavingpool", "offline", "preempted", "upgradingos", "deallocated", "deallocating"
*/
- state?: BatchNodeStateOutput;
+ readonly state: BatchNodeStateOutput;
/**
* Whether the Compute Node is available for Task scheduling.
*
* Possible values: "enabled", "disabled"
*/
- schedulingState?: SchedulingStateOutput;
+ readonly schedulingState?: SchedulingStateOutput;
/** The time at which the Compute Node entered its current state. */
- stateTransitionTime?: string;
+ readonly stateTransitionTime: string;
/** The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. */
- lastBootTime?: string;
+ readonly lastBootTime: string;
/** The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. */
- allocationTime?: string;
+ readonly allocationTime: string;
/** The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. */
- ipAddress?: string;
+ readonly ipAddress: string;
+ /** The IPv6 address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. This property will not be present if the Pool is not configured for IPv6. */
+ readonly ipv6Address: string;
/** An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */
- affinityId?: string;
- /** The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */
- vmSize?: string;
+ readonly affinityId: string;
+ /** The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). */
+ readonly vmSize: string;
/** The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */
- totalTasksRun?: number;
+ readonly totalTasksRun: number;
/** The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */
- runningTasksCount?: number;
+ readonly runningTasksCount?: number;
/** The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */
- runningTaskSlotsCount?: number;
+ readonly runningTaskSlotsCount?: number;
/** The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */
- totalTasksSucceeded?: number;
+ readonly totalTasksSucceeded?: number;
/** A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. */
- recentTasks?: Array;
+ readonly recentTasks?: Array;
/** The Task specified to run on the Compute Node as it joins the Pool. */
- startTask?: BatchStartTaskOutput;
+ readonly startTask?: BatchStartTaskOutput;
/** Runtime information about the execution of the StartTask on the Compute Node. */
- startTaskInfo?: BatchStartTaskInfoOutput;
+ readonly startTaskInfo?: BatchStartTaskInfoOutput;
/** The list of errors that are currently being encountered by the Compute Node. */
- errors?: Array;
+ readonly errors?: Array;
/** Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. */
- isDedicated?: boolean;
+ readonly isDedicated?: boolean;
/** The endpoint configuration for the Compute Node. */
- endpointConfiguration?: BatchNodeEndpointConfigurationOutput;
+ readonly endpointConfiguration?: BatchNodeEndpointConfigurationOutput;
/** Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. */
- nodeAgentInfo?: BatchNodeAgentInfoOutput;
+ readonly nodeAgentInfo: BatchNodeAgentInfoOutput;
/** Info about the current state of the virtual machine. */
- virtualMachineInfo?: VirtualMachineInfoOutput;
+ readonly virtualMachineInfo: VirtualMachineInfoOutput;
}
/** Information about a Task running on a Compute Node. */
@@ -2301,13 +2351,17 @@ export interface BatchNodeAgentInfoOutput {
/** Info about the current state of the virtual machine. */
export interface VirtualMachineInfoOutput {
/** The reference to the Azure Virtual Machine's Marketplace Image. */
- imageReference?: ImageReferenceOutput;
+ imageReference?: BatchVmImageReferenceOutput;
/** The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */
scaleSetVmResourceId?: string;
}
/** The remote login settings for a Compute Node. */
export interface BatchNodeRemoteLoginSettingsOutput {
+ /** The IPv6 address used for remote login to the Compute Node. */
+ ipv6RemoteLoginIPAddress?: string;
+ /** The port used for remote login to the Compute Node. */
+ ipv6RemoteLoginPort?: number;
/** The IP address used for remote login to the Compute Node. */
remoteLoginIPAddress: string;
/** The port used for remote login to the Compute Node. */
@@ -2380,6 +2434,8 @@ export interface BatchNodeVMExtensionListResultOutput {
export type CachingTypeOutput = string;
/** Alias for StorageAccountTypeOutput */
export type StorageAccountTypeOutput = string;
+/** Alias for SecurityEncryptionTypesOutput */
+export type SecurityEncryptionTypesOutput = string;
/** Alias for ContainerTypeOutput */
export type ContainerTypeOutput = string;
/** Alias for DiskEncryptionTargetOutput */
@@ -2388,8 +2444,8 @@ export type DiskEncryptionTargetOutput = string;
export type BatchNodePlacementPolicyTypeOutput = string;
/** Alias for DiffDiskPlacementOutput */
export type DiffDiskPlacementOutput = string;
-/** Alias for SecurityEncryptionTypesOutput */
-export type SecurityEncryptionTypesOutput = string;
+/** Alias for HostEndpointSettingsModeTypesOutput */
+export type HostEndpointSettingsModeTypesOutput = string;
/** Alias for SecurityTypesOutput */
export type SecurityTypesOutput = string;
/** Alias for DynamicVNetAssignmentScopeOutput */
@@ -2400,6 +2456,8 @@ export type InboundEndpointProtocolOutput = string;
export type NetworkSecurityGroupRuleAccessOutput = string;
/** Alias for IpAddressProvisioningTypeOutput */
export type IpAddressProvisioningTypeOutput = string;
+/** Alias for IPFamilyOutput */
+export type IPFamilyOutput = string;
/** Alias for ContainerWorkingDirectoryOutput */
export type ContainerWorkingDirectoryOutput = string;
/** Alias for ContainerHostDataPathOutput */
@@ -2408,12 +2466,12 @@ export type ContainerHostDataPathOutput = string;
export type AutoUserScopeOutput = string;
/** Alias for ElevationLevelOutput */
export type ElevationLevelOutput = string;
+/** Alias for BatchJobDefaultOrderOutput */
+export type BatchJobDefaultOrderOutput = string;
/** Alias for BatchNodeFillTypeOutput */
export type BatchNodeFillTypeOutput = string;
/** Alias for LoginModeOutput */
export type LoginModeOutput = string;
-/** Alias for BatchNodeCommunicationModeOutput */
-export type BatchNodeCommunicationModeOutput = string;
/** Alias for UpgradeModeOutput */
export type UpgradeModeOutput = string;
/** Alias for BatchPoolStateOutput */
@@ -2430,16 +2488,16 @@ export type ImageVerificationTypeOutput = string;
export type BatchJobStateOutput = string;
/** Alias for OutputFileUploadConditionOutput */
export type OutputFileUploadConditionOutput = string;
-/** Alias for AccessScopeOutput */
-export type AccessScopeOutput = string;
+/** Alias for BatchAccessScopeOutput */
+export type BatchAccessScopeOutput = string;
/** Alias for BatchPoolLifetimeOptionOutput */
export type BatchPoolLifetimeOptionOutput = string;
-/** Alias for OnAllBatchTasksCompleteOutput */
-export type OnAllBatchTasksCompleteOutput = string;
-/** Alias for OnBatchTaskFailureOutput */
-export type OnBatchTaskFailureOutput = string;
-/** Alias for ErrorCategoryOutput */
-export type ErrorCategoryOutput = string;
+/** Alias for BatchAllTasksCompleteModeOutput */
+export type BatchAllTasksCompleteModeOutput = string;
+/** Alias for BatchTaskFailureModeOutput */
+export type BatchTaskFailureModeOutput = string;
+/** Alias for BatchErrorSourceCategoryOutput */
+export type BatchErrorSourceCategoryOutput = string;
/** Alias for BatchJobPreparationTaskStateOutput */
export type BatchJobPreparationTaskStateOutput = string;
/** Alias for BatchTaskExecutionResultOutput */
@@ -2448,8 +2506,8 @@ export type BatchTaskExecutionResultOutput = string;
export type BatchJobReleaseTaskStateOutput = string;
/** Alias for BatchJobScheduleStateOutput */
export type BatchJobScheduleStateOutput = string;
-/** Alias for BatchJobActionOutput */
-export type BatchJobActionOutput = string;
+/** Alias for BatchJobActionKindOutput */
+export type BatchJobActionKindOutput = string;
/** Alias for DependencyActionOutput */
export type DependencyActionOutput = string;
/** Alias for BatchTaskStateOutput */
diff --git a/packages/service/src/internal/batch-rest/generated/src/paginateHelper.ts b/packages/service/src/internal/batch-rest/generated/src/paginateHelper.ts
index b2a24801c3..027227a8ad 100644
--- a/packages/service/src/internal/batch-rest/generated/src/paginateHelper.ts
+++ b/packages/service/src/internal/batch-rest/generated/src/paginateHelper.ts
@@ -1,16 +1,159 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-import {
- getPagedAsyncIterator,
- PagedAsyncIterableIterator,
- PagedResult,
-} from "@azure/core-paging";
-import {
- Client,
- createRestError,
- PathUncheckedResponse,
-} from "@azure-rest/core-client";
+import type { Client, PathUncheckedResponse } from "@azure-rest/core-client";
+import { createRestError } from "@azure-rest/core-client";
+
+/**
+ * returns an async iterator that iterates over results. It also has a `byPage`
+ * method that returns pages of items at once.
+ *
+ * @param pagedResult - an object that specifies how to get pages.
+ * @returns a paged async iterator that iterates over results.
+ */
+function getPagedAsyncIterator<
+ TElement,
+ TPage = TElement[],
+ TPageSettings = PageSettings,
+ TLink = string,
+>(
+ pagedResult: PagedResult,
+): PagedAsyncIterableIterator {
+ const iter = getItemAsyncIterator(
+ pagedResult,
+ );
+ return {
+ next() {
+ return iter.next();
+ },
+ [Symbol.asyncIterator]() {
+ return this;
+ },
+ byPage:
+ pagedResult?.byPage ??
+ (((settings?: PageSettings) => {
+ const { continuationToken } = settings ?? {};
+ return getPageAsyncIterator(pagedResult, {
+ pageLink: continuationToken as unknown as TLink | undefined,
+ });
+ }) as unknown as (
+ settings?: TPageSettings,
+ ) => AsyncIterableIterator),
+ };
+}
+
+async function* getItemAsyncIterator(
+ pagedResult: PagedResult,
+): AsyncIterableIterator {
+ const pages = getPageAsyncIterator(pagedResult);
+ const firstVal = await pages.next();
+ // if the result does not have an array shape, i.e. TPage = TElement, then we return it as is
+ if (!Array.isArray(firstVal.value)) {
+ // can extract elements from this page
+ const { toElements } = pagedResult;
+ if (toElements) {
+ yield* toElements(firstVal.value) as TElement[];
+ for await (const page of pages) {
+ yield* toElements(page) as TElement[];
+ }
+ } else {
+ yield firstVal.value;
+ // `pages` is of type `AsyncIterableIterator` but TPage = TElement in this case
+ yield* pages as unknown as AsyncIterableIterator;
+ }
+ } else {
+ yield* firstVal.value;
+ for await (const page of pages) {
+ // pages is of type `AsyncIterableIterator` so `page` is of type `TPage`. In this branch,
+ // it must be the case that `TPage = TElement[]`
+ yield* page as unknown as TElement[];
+ }
+ }
+}
+
+async function* getPageAsyncIterator(
+ pagedResult: PagedResult,
+ options: {
+ pageLink?: TLink;
+ } = {},
+): AsyncIterableIterator {
+ const { pageLink } = options;
+ let response = await pagedResult.getPage(
+ pageLink ?? pagedResult.firstPageLink,
+ );
+ if (!response) {
+ return;
+ }
+ yield response.page;
+ while (response.nextPageLink) {
+ response = await pagedResult.getPage(response.nextPageLink);
+ if (!response) {
+ return;
+ }
+ yield response.page;
+ }
+}
+
+/**
+ * An interface that tracks the settings for paged iteration
+ */
+export interface PageSettings {
+ /**
+ * The token that keeps track of where to continue the iterator
+ */
+ continuationToken?: string;
+}
+
+/**
+ * An interface that allows async iterable iteration both to completion and by page.
+ */
+export interface PagedAsyncIterableIterator<
+ TElement,
+ TPage = TElement[],
+ TPageSettings = PageSettings,
+> {
+ /**
+ * The next method, part of the iteration protocol
+ */
+ next(): Promise>;
+ /**
+ * The connection to the async iterator, part of the iteration protocol
+ */
+ [Symbol.asyncIterator](): PagedAsyncIterableIterator<
+ TElement,
+ TPage,
+ TPageSettings
+ >;
+ /**
+ * Return an AsyncIterableIterator that works a page at a time
+ */
+ byPage: (settings?: TPageSettings) => AsyncIterableIterator;
+}
+
+/**
+ * An interface that describes how to communicate with the service.
+ */
+interface PagedResult {
+ /**
+ * Link to the first page of results.
+ */
+ firstPageLink: TLink;
+ /**
+ * A method that returns a page of results.
+ */
+ getPage: (
+ pageLink: TLink,
+ ) => Promise<{ page: TPage; nextPageLink?: TLink } | undefined>;
+ /**
+ * a function to implement the `byPage` method on the paged async iterator.
+ */
+ byPage?: (settings?: TPageSettings) => AsyncIterableIterator;
+
+ /**
+ * A function to extract elements from a page.
+ */
+ toElements?: (page: TPage) => unknown[];
+}
/**
* Helper type to extract the type of an array
@@ -20,10 +163,7 @@ export type GetArrayType = T extends Array ? TData : never;
/**
* The type of a custom function that defines how to get a page and a link to the next one if any.
*/
-export type GetPage = (
- pageLink: string,
- maxPageSize?: number,
-) => Promise<{
+export type GetPage = (pageLink: string) => Promise<{
page: TPage;
nextPageLink?: string;
}>;
diff --git a/packages/service/src/internal/batch-rest/generated/src/parameters.ts b/packages/service/src/internal/batch-rest/generated/src/parameters.ts
index 3567b1bf5c..e5a0331fda 100644
--- a/packages/service/src/internal/batch-rest/generated/src/parameters.ts
+++ b/packages/service/src/internal/batch-rest/generated/src/parameters.ts
@@ -1,34 +1,34 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-import { RawHttpHeadersInput } from "@azure/core-rest-pipeline";
-import { RequestParameters } from "@azure-rest/core-client";
-import {
- BatchPoolCreateContent,
- BatchPoolUpdateContent,
- BatchPoolEnableAutoScaleContent,
- BatchPoolEvaluateAutoScaleContent,
- BatchPoolResizeContent,
- BatchPoolReplaceContent,
- BatchNodeRemoveContent,
- BatchJobUpdateContent,
+import type { RawHttpHeadersInput } from "@azure/core-rest-pipeline";
+import type { RequestParameters } from "@azure-rest/core-client";
+import type {
+ BatchPoolCreateOptions,
+ BatchPoolUpdateOptions,
+ BatchPoolEnableAutoScaleOptions,
+ BatchPoolEvaluateAutoScaleOptions,
+ BatchPoolResizeOptions,
+ BatchPoolReplaceOptions,
+ BatchNodeRemoveOptions,
+ BatchJobUpdateOptions,
BatchJob,
- BatchJobDisableContent,
- BatchJobTerminateContent,
- BatchJobCreateContent,
- BatchJobScheduleUpdateContent,
+ BatchJobDisableOptions,
+ BatchJobTerminateOptions,
+ BatchJobCreateOptions,
+ BatchJobScheduleUpdateOptions,
BatchJobSchedule,
- BatchJobScheduleCreateContent,
- BatchTaskCreateContent,
+ BatchJobScheduleCreateOptions,
+ BatchTaskCreateOptions,
BatchTaskGroup,
BatchTask,
- BatchNodeUserCreateContent,
- BatchNodeUserUpdateContent,
- BatchNodeRebootContent,
- BatchNodeDeallocateContent,
- BatchNodeReimageContent,
- BatchNodeDisableSchedulingContent,
- UploadBatchServiceLogsContent,
+ BatchNodeUserCreateOptions,
+ BatchNodeUserUpdateOptions,
+ BatchNodeRebootOptions,
+ BatchNodeDeallocateOptions,
+ BatchNodeReimageOptions,
+ BatchNodeDisableSchedulingOptions,
+ UploadBatchServiceLogsOptions,
} from "./models.js";
export interface ListApplicationsHeaders {
@@ -140,7 +140,7 @@ export interface ListPoolUsageMetricsQueryParamProperties {
endtime?: Date | string;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics.
*/
$filter?: string;
}
@@ -175,7 +175,7 @@ export interface CreatePoolHeaders {
export interface CreatePoolBodyParam {
/** The Pool to be created. */
- body: BatchPoolCreateContent;
+ body: BatchPoolCreateOptions;
}
export interface CreatePoolQueryParamProperties {
@@ -218,6 +218,26 @@ export interface ListPoolsHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListPoolsSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface ListPoolsExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListPoolsQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
@@ -228,13 +248,13 @@ export interface ListPoolsQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools.
*/
$filter?: string;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListPoolsSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | ListPoolsExpandQueryParam;
}
export interface ListPoolsQueryParam {
@@ -403,13 +423,33 @@ export interface GetPoolHeaders {
"If-None-Match"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface GetPoolSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface GetPoolExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface GetPoolQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | GetPoolSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | GetPoolExpandQueryParam;
}
export interface GetPoolQueryParam {
@@ -466,7 +506,7 @@ export interface UpdatePoolHeaders {
export interface UpdatePoolBodyParam {
/** The pool properties to update. */
- body: BatchPoolUpdateContent;
+ body: BatchPoolUpdateOptions;
}
export interface UpdatePoolQueryParamProperties {
@@ -568,7 +608,7 @@ export interface EnablePoolAutoScaleHeaders {
export interface EnablePoolAutoScaleBodyParam {
/** The options to use for enabling automatic scaling. */
- body: BatchPoolEnableAutoScaleContent;
+ body: BatchPoolEnableAutoScaleOptions;
}
export interface EnablePoolAutoScaleQueryParamProperties {
@@ -613,7 +653,7 @@ export interface EvaluatePoolAutoScaleHeaders {
export interface EvaluatePoolAutoScaleBodyParam {
/** The options to use for evaluating the automatic scaling formula. */
- body: BatchPoolEvaluateAutoScaleContent;
+ body: BatchPoolEvaluateAutoScaleOptions;
}
export interface EvaluatePoolAutoScaleQueryParamProperties {
@@ -682,7 +722,7 @@ export interface ResizePoolHeaders {
export interface ResizePoolBodyParam {
/** The options to use for resizing the pool. */
- body: BatchPoolResizeContent;
+ body: BatchPoolResizeOptions;
}
export interface ResizePoolQueryParamProperties {
@@ -784,7 +824,7 @@ export interface ReplacePoolPropertiesHeaders {
export interface ReplacePoolPropertiesBodyParam {
/** The options to use for replacing properties on the pool. */
- body: BatchPoolReplaceContent;
+ body: BatchPoolReplaceOptions;
}
export interface ReplacePoolPropertiesQueryParamProperties {
@@ -853,7 +893,7 @@ export interface RemoveNodesHeaders {
export interface RemoveNodesBodyParam {
/** The options to use for removing the node. */
- body: BatchNodeRemoveContent;
+ body: BatchNodeRemoveOptions;
}
export interface RemoveNodesQueryParamProperties {
@@ -906,7 +946,7 @@ export interface ListSupportedImagesQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images.
*/
$filter?: string;
}
@@ -949,7 +989,7 @@ export interface ListPoolNodeCountsQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images.
*/
$filter?: string;
}
@@ -1065,13 +1105,33 @@ export interface GetJobHeaders {
"If-None-Match"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface GetJobSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface GetJobExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface GetJobQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | GetJobSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | GetJobExpandQueryParam;
}
export interface GetJobQueryParam {
@@ -1128,7 +1188,7 @@ export interface UpdateJobHeaders {
export interface UpdateJobBodyParam {
/** The options to use for updating the Job. */
- body: BatchJobUpdateContent;
+ body: BatchJobUpdateOptions;
}
export interface UpdateJobQueryParamProperties {
@@ -1266,7 +1326,7 @@ export interface DisableJobHeaders {
export interface DisableJobBodyParam {
/** The options to use for disabling the Job. */
- body: BatchJobDisableContent;
+ body: BatchJobDisableOptions;
}
export interface DisableJobQueryParamProperties {
@@ -1392,7 +1452,7 @@ export interface TerminateJobHeaders {
export interface TerminateJobBodyParam {
/** The options to use for terminating the Job. */
- body?: BatchJobTerminateContent;
+ body?: BatchJobTerminateOptions;
}
export interface TerminateJobQueryParamProperties {
@@ -1439,7 +1499,7 @@ export interface CreateJobHeaders {
export interface CreateJobBodyParam {
/** The Job to be created. */
- body: BatchJobCreateContent;
+ body: BatchJobCreateOptions;
}
export interface CreateJobQueryParamProperties {
@@ -1482,6 +1542,26 @@ export interface ListJobsHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListJobsSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface ListJobsExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListJobsQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
@@ -1492,13 +1572,13 @@ export interface ListJobsQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs.
*/
$filter?: string;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListJobsSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | ListJobsExpandQueryParam;
}
export interface ListJobsQueryParam {
@@ -1529,6 +1609,26 @@ export interface ListJobsFromScheduleHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListJobsFromScheduleSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface ListJobsFromScheduleExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListJobsFromScheduleQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
@@ -1539,13 +1639,13 @@ export interface ListJobsFromScheduleQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule.
*/
$filter?: string;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListJobsFromScheduleSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | ListJobsFromScheduleExpandQueryParam;
}
export interface ListJobsFromScheduleQueryParam {
@@ -1576,6 +1676,16 @@ export interface ListJobPreparationAndReleaseTaskStatusHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListJobPreparationAndReleaseTaskStatusSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListJobPreparationAndReleaseTaskStatusQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
@@ -1586,11 +1696,11 @@ export interface ListJobPreparationAndReleaseTaskStatusQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status.
*/
$filter?: string;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListJobPreparationAndReleaseTaskStatusSelectQueryParam;
}
export interface ListJobPreparationAndReleaseTaskStatusQueryParam {
@@ -1795,13 +1905,33 @@ export interface GetJobScheduleHeaders {
"If-None-Match"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface GetJobScheduleSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface GetJobScheduleExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface GetJobScheduleQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | GetJobScheduleSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | GetJobScheduleExpandQueryParam;
}
export interface GetJobScheduleQueryParam {
@@ -1858,7 +1988,7 @@ export interface UpdateJobScheduleHeaders {
export interface UpdateJobScheduleBodyParam {
/** The options to use for updating the Job Schedule. */
- body: BatchJobScheduleUpdateContent;
+ body: BatchJobScheduleUpdateOptions;
}
export interface UpdateJobScheduleQueryParamProperties {
@@ -2145,7 +2275,7 @@ export interface CreateJobScheduleHeaders {
export interface CreateJobScheduleBodyParam {
/** The Job Schedule to be created. */
- body: BatchJobScheduleCreateContent;
+ body: BatchJobScheduleCreateOptions;
}
export interface CreateJobScheduleQueryParamProperties {
@@ -2188,6 +2318,26 @@ export interface ListJobSchedulesHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListJobSchedulesSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface ListJobSchedulesExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListJobSchedulesQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
@@ -2198,13 +2348,13 @@ export interface ListJobSchedulesQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules.
*/
$filter?: string;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListJobSchedulesSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | ListJobSchedulesExpandQueryParam;
}
export interface ListJobSchedulesQueryParam {
@@ -2237,7 +2387,7 @@ export interface CreateTaskHeaders {
export interface CreateTaskBodyParam {
/** The Task to be created. */
- body: BatchTaskCreateContent;
+ body: BatchTaskCreateOptions;
}
export interface CreateTaskQueryParamProperties {
@@ -2280,6 +2430,26 @@ export interface ListTasksHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListTasksSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface ListTasksExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListTasksQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
@@ -2290,13 +2460,13 @@ export interface ListTasksQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks.
*/
$filter?: string;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListTasksSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | ListTasksExpandQueryParam;
}
export interface ListTasksQueryParam {
@@ -2453,13 +2623,33 @@ export interface GetTaskHeaders {
"If-None-Match"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface GetTaskSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
+/** This is the wrapper object for the parameter `$expand` with explode set to false and style set to form. */
+export interface GetTaskExpandQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface GetTaskQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | GetTaskSelectQueryParam;
/** An OData $expand clause. */
- $expand?: string[];
+ $expand?: string[] | GetTaskExpandQueryParam;
}
export interface GetTaskQueryParam {
@@ -2559,11 +2749,21 @@ export interface ListSubTasksHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListSubTasksSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListSubTasksQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListSubTasksSelectQueryParam;
}
export interface ListSubTasksQueryParam {
@@ -2853,7 +3053,7 @@ export interface ListTaskFilesQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files.
*/
$filter?: string;
/**
@@ -2893,7 +3093,7 @@ export interface CreateNodeUserHeaders {
export interface CreateNodeUserBodyParam {
/** The options to use for creating the user. */
- body: BatchNodeUserCreateContent;
+ body: BatchNodeUserCreateOptions;
}
export interface CreateNodeUserQueryParamProperties {
@@ -2971,7 +3171,7 @@ export interface ReplaceNodeUserHeaders {
export interface ReplaceNodeUserBodyParam {
/** The options to use for updating the user. */
- body: BatchNodeUserUpdateContent;
+ body: BatchNodeUserUpdateOptions;
}
export interface ReplaceNodeUserQueryParamProperties {
@@ -3014,11 +3214,21 @@ export interface GetNodeHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface GetNodeSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface GetNodeQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | GetNodeSelectQueryParam;
}
export interface GetNodeQueryParam {
@@ -3051,7 +3261,7 @@ export interface RebootNodeHeaders {
export interface RebootNodeBodyParam {
/** The options to use for rebooting the Compute Node. */
- body?: BatchNodeRebootContent;
+ body?: BatchNodeRebootOptions;
}
export interface RebootNodeQueryParamProperties {
@@ -3129,7 +3339,7 @@ export interface DeallocateNodeHeaders {
export interface DeallocateNodeBodyParam {
/** The options to use for deallocating the Compute Node. */
- body?: BatchNodeDeallocateContent;
+ body?: BatchNodeDeallocateOptions;
}
export interface DeallocateNodeQueryParamProperties {
@@ -3174,7 +3384,7 @@ export interface ReimageNodeHeaders {
export interface ReimageNodeBodyParam {
/** The options to use for reimaging the Compute Node. */
- body?: BatchNodeReimageContent;
+ body?: BatchNodeReimageOptions;
}
export interface ReimageNodeQueryParamProperties {
@@ -3219,7 +3429,7 @@ export interface DisableNodeSchedulingHeaders {
export interface DisableNodeSchedulingBodyParam {
/** The options to use for disabling scheduling on the Compute Node. */
- body?: BatchNodeDisableSchedulingContent;
+ body?: BatchNodeDisableSchedulingOptions;
}
export interface DisableNodeSchedulingQueryParamProperties {
@@ -3331,7 +3541,7 @@ export interface UploadNodeLogsHeaders {
export interface UploadNodeLogsBodyParam {
/** The Azure Batch service log files upload options. */
- body: UploadBatchServiceLogsContent;
+ body: UploadBatchServiceLogsOptions;
}
export interface UploadNodeLogsQueryParamProperties {
@@ -3374,6 +3584,16 @@ export interface ListNodesHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListNodesSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListNodesQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
@@ -3384,11 +3604,11 @@ export interface ListNodesQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool.
*/
$filter?: string;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListNodesSelectQueryParam;
}
export interface ListNodesQueryParam {
@@ -3419,11 +3639,21 @@ export interface GetNodeExtensionHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface GetNodeExtensionSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface GetNodeExtensionQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | GetNodeExtensionSelectQueryParam;
}
export interface GetNodeExtensionQueryParam {
@@ -3454,6 +3684,16 @@ export interface ListNodeExtensionsHeaders {
"ocp-date"?: string;
}
+/** This is the wrapper object for the parameter `$select` with explode set to false and style set to form. */
+export interface ListNodeExtensionsSelectQueryParam {
+ /** Value of the parameter */
+ value: string[];
+ /** Should we explode the value? */
+ explode: false;
+ /** Style of the value */
+ style: "form";
+}
+
export interface ListNodeExtensionsQueryParamProperties {
/** The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". */
timeOut?: number;
@@ -3463,7 +3703,7 @@ export interface ListNodeExtensionsQueryParamProperties {
*/
maxresults?: number;
/** An OData $select clause. */
- $select?: string[];
+ $select?: string[] | ListNodeExtensionsSelectQueryParam;
}
export interface ListNodeExtensionsQueryParam {
@@ -3639,7 +3879,7 @@ export interface ListNodeFilesQueryParamProperties {
maxresults?: number;
/**
* An OData $filter clause. For more information on constructing this filter, see
- * https://docs.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files.
+ * https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files.
*/
$filter?: string;
/** Whether to list children of a directory. */
diff --git a/packages/service/src/internal/batch-rest/generated/src/responses.ts b/packages/service/src/internal/batch-rest/generated/src/responses.ts
index 7647899ac9..8a77a7b7ec 100644
--- a/packages/service/src/internal/batch-rest/generated/src/responses.ts
+++ b/packages/service/src/internal/batch-rest/generated/src/responses.ts
@@ -1,9 +1,9 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
-import { RawHttpHeaders } from "@azure/core-rest-pipeline";
-import { HttpResponse } from "@azure-rest/core-client";
-import {
+import type { RawHttpHeaders } from "@azure/core-rest-pipeline";
+import type { HttpResponse } from "@azure-rest/core-client";
+import type {
BatchApplicationListResultOutput,
BatchErrorOutput,
BatchApplicationOutput,
@@ -20,7 +20,7 @@ import {
BatchJobScheduleOutput,
BatchJobScheduleListResultOutput,
BatchTaskListResultOutput,
- BatchTaskAddCollectionResultOutput,
+ BatchCreateTaskCollectionResultOutput,
BatchTaskOutput,
BatchTaskListSubtasksResultOutput,
BatchNodeFileListResultOutput,
@@ -1030,7 +1030,7 @@ export interface CreateTaskCollection200Headers {
/** The request has succeeded. */
export interface CreateTaskCollection200Response extends HttpResponse {
status: "200";
- body: BatchTaskAddCollectionResultOutput;
+ body: BatchCreateTaskCollectionResultOutput;
headers: RawHttpHeaders & CreateTaskCollection200Headers;
}
diff --git a/packages/service/src/internal/batch-rest/generated/tsp-location.yaml b/packages/service/src/internal/batch-rest/generated/tsp-location.yaml
index c4d5e02968..93aab2ea24 100644
--- a/packages/service/src/internal/batch-rest/generated/tsp-location.yaml
+++ b/packages/service/src/internal/batch-rest/generated/tsp-location.yaml
@@ -1,4 +1,4 @@
directory: specification/batch/Azure.Batch
-commit: 191c76349cdbc840567a9f1b2cbae50fd57bc1b9
+commit: f17f8a4c10b849aa2101140ecf4d78cad78ab299
repo: Azure/azure-rest-api-specs
additionalDirectories:
diff --git a/packages/service/src/pool/__tests__/fake-pool-service.spec.ts b/packages/service/src/pool/__tests__/fake-pool-service.spec.ts
index c304799019..839715f03a 100644
--- a/packages/service/src/pool/__tests__/fake-pool-service.spec.ts
+++ b/packages/service/src/pool/__tests__/fake-pool-service.spec.ts
@@ -1,5 +1,5 @@
import { FakePoolService } from "../fake-pool-service";
-import { Pool } from "../pool-models";
+import { LegacyPool, Pool } from "../pool-models";
import { BasicBatchFakeSet, BatchFakeSet } from "../../test-util/fakes";
import { initMockBatchEnvironment } from "../../environment";
@@ -36,6 +36,26 @@ describe("FakePoolService", () => {
expect(byosPool?.name).toEqual("byospool1");
});
+ test("Get by resource ID using 2024-07-01 API version", async () => {
+ const hoboPool = await service.getLegacy(hoboPoolResourceId);
+ expect(hoboPool?.name).toEqual("hobopool1");
+ expect(hoboPool?.properties?.targetNodeCommunicationMode).toBe(
+ "Default"
+ );
+ expect(
+ hoboPool?.properties?.currentNodeCommunicationMode
+ ).toBeUndefined();
+
+ const byosPool = await service.getLegacy(byosPoolResourceId);
+ expect(byosPool?.name).toEqual("byospool1");
+ expect(byosPool?.properties?.targetNodeCommunicationMode).toBe(
+ "Default"
+ );
+ expect(byosPool?.properties?.currentNodeCommunicationMode).toBe(
+ "Simplified"
+ );
+ });
+
test("Create", async () => {
const newPool: Pool = {
id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/supercomputing/providers/Microsoft.Batch/batchAccounts/hobo/pools/newtestpool",
@@ -139,4 +159,21 @@ describe("FakePoolService", () => {
const pools = await service.listByAccountId(hoboAcctId);
expect(pools.map((pool) => pool.name)).toEqual(["hobopool1"]);
});
+
+ test("Patch using 2024-07-01 API version", async () => {
+ const update: LegacyPool = {
+ id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/supercomputing/providers/Microsoft.Batch/batchAccounts/hobo/pools/hobopool1",
+ name: "hobopool1",
+ type: "Microsoft.Batch/batchAccounts/pools",
+ properties: {
+ targetNodeCommunicationMode: "Simplified",
+ },
+ };
+
+ const pool = await service.patchLegacy(hoboPoolResourceId, update);
+ expect(pool?.name).toEqual("hobopool1");
+ expect(pool?.properties?.targetNodeCommunicationMode).toBe(
+ "Simplified"
+ );
+ });
});
diff --git a/packages/service/src/pool/__tests__/live-pool-service.spec.ts b/packages/service/src/pool/__tests__/live-pool-service.spec.ts
index 4a4cd8c888..b6c522ea69 100644
--- a/packages/service/src/pool/__tests__/live-pool-service.spec.ts
+++ b/packages/service/src/pool/__tests__/live-pool-service.spec.ts
@@ -2,7 +2,7 @@ import { getMockEnvironment } from "@azure/bonito-core/lib/environment";
import { MockHttpClient, MockHttpResponse } from "@azure/bonito-core/lib/http";
import { LivePoolService } from "../live-pool-service";
import { PoolService } from "../pool-service";
-import type { Pool } from "../pool-models";
+import type { LegacyPool, Pool } from "../pool-models";
import { cloneDeep, getArmUrl, mergeDeep } from "@azure/bonito-core";
import { BasicBatchFakeSet, BatchFakeSet } from "../../test-util/fakes";
import { BatchApiVersion } from "../../constants";
@@ -103,6 +103,50 @@ describe("LivePoolService", () => {
expect(byosPool?.name).toEqual("byospool1");
});
+ test("Get by resource ID using 2024-07-01 API version", async () => {
+ httpClient.addExpected(
+ new MockHttpResponse(
+ `${getArmUrl()}${hoboPoolResourceId}?api-version=2024-07-01`,
+ {
+ status: 200,
+ body: JSON.stringify(
+ fakeSet.getLegacyPool(hoboPoolResourceId)
+ ),
+ }
+ )
+ );
+
+ const hoboPool = await service.getLegacy(hoboPoolResourceId);
+ expect(hoboPool?.name).toEqual("hobopool1");
+ expect(hoboPool?.properties?.targetNodeCommunicationMode).toBe(
+ "Default"
+ );
+ expect(
+ hoboPool?.properties?.currentNodeCommunicationMode
+ ).toBeUndefined();
+
+ httpClient.addExpected(
+ new MockHttpResponse(
+ `${getArmUrl()}${byosPoolResourceId}?api-version=2024-07-01`,
+ {
+ status: 200,
+ body: JSON.stringify(
+ fakeSet.getLegacyPool(byosPoolResourceId)
+ ),
+ }
+ )
+ );
+
+ const byosPool = await service.getLegacy(byosPoolResourceId);
+ expect(byosPool?.name).toEqual("byospool1");
+ expect(byosPool?.properties?.targetNodeCommunicationMode).toBe(
+ "Default"
+ );
+ expect(byosPool?.properties?.currentNodeCommunicationMode).toBe(
+ "Simplified"
+ );
+ });
+
test("Get by resource ID error", async () => {
httpClient.addExpected(
new MockHttpResponse(
@@ -244,4 +288,42 @@ describe("LivePoolService", () => {
pool?.properties?.scaleSettings?.fixedScale?.targetLowPriorityNodes
).toEqual(5);
});
+
+ test("Patch using 2024-07-01 API version", async () => {
+ const pool: LegacyPool = {
+ id: hoboPoolResourceId,
+ properties: {
+ targetNodeCommunicationMode: "Simplified",
+ },
+ };
+
+ const fakeOutput = fakeSet.getLegacyPool(hoboPoolResourceId);
+ if (fakeOutput?.properties) {
+ fakeOutput.properties.targetNodeCommunicationMode = "Simplified";
+ }
+
+ httpClient.addExpected(
+ new MockHttpResponse(
+ `${getArmUrl()}${hoboPoolResourceId}?api-version=2024-07-01`,
+ {
+ status: 200,
+ body: JSON.stringify(fakeOutput),
+ }
+ ),
+ {
+ method: "PATCH",
+ body: JSON.stringify(pool),
+ }
+ );
+
+ // Patch
+ const poolOutput = await service.patchLegacy(hoboPoolResourceId, pool);
+ expect(poolOutput?.name).toEqual("hobopool1");
+ expect(poolOutput?.properties?.targetNodeCommunicationMode).toEqual(
+ "Simplified"
+ );
+ expect(
+ poolOutput?.properties?.currentNodeCommunicationMode
+ ).toBeUndefined();
+ });
});
diff --git a/packages/service/src/pool/fake-pool-service.ts b/packages/service/src/pool/fake-pool-service.ts
index fae771aa95..3d67321bd5 100644
--- a/packages/service/src/pool/fake-pool-service.ts
+++ b/packages/service/src/pool/fake-pool-service.ts
@@ -1,5 +1,10 @@
import { BasicBatchFakeSet, BatchFakeSet } from "../test-util/fakes";
-import type { Pool, PoolOutput } from "./pool-models";
+import type {
+ LegacyPool,
+ LegacyPoolOutput,
+ Pool,
+ PoolOutput,
+} from "./pool-models";
import type { PoolService } from "./pool-service";
export class FakePoolService implements PoolService {
@@ -20,6 +25,12 @@ export class FakePoolService implements PoolService {
return this.fakeSet.getPool(poolResourceId);
}
+ async getLegacy(
+ poolResourceId: string
+ ): Promise {
+ return this.fakeSet.getLegacyPool(poolResourceId);
+ }
+
async listByAccountId(accountId: string): Promise {
return this.fakeSet.listPoolsByAccount(accountId);
}
@@ -27,4 +38,11 @@ export class FakePoolService implements PoolService {
async patch(poolResourceId: string, pool: Pool): Promise {
return this.fakeSet.patchPool(pool);
}
+
+ async patchLegacy(
+ poolResourceId: string,
+ pool: LegacyPool
+ ): Promise {
+ return this.fakeSet.patchLegacyPool(pool);
+ }
}
diff --git a/packages/service/src/pool/live-pool-service.ts b/packages/service/src/pool/live-pool-service.ts
index f518aff206..a1a163cae1 100644
--- a/packages/service/src/pool/live-pool-service.ts
+++ b/packages/service/src/pool/live-pool-service.ts
@@ -1,10 +1,18 @@
// import { BatchApiVersion } from "../constants";
-import type { Pool, PoolOutput } from "./pool-models";
+import type {
+ LegacyPool,
+ LegacyPoolOutput,
+ Pool,
+ PoolOutput,
+} from "./pool-models";
import type { PoolService } from "./pool-service";
import {
CustomHttpHeaders,
+ HttpRequestMetadata,
OperationOptions,
+ UnexpectedStatusCodeError,
getArmUrl,
+ getHttpClient,
} from "@azure/bonito-core";
import { createARMBatchClient, isUnexpected } from "../internal/arm-batch-rest";
import {
@@ -96,6 +104,33 @@ export class LivePoolService implements PoolService {
return res.body;
}
+ async getLegacy(
+ poolResourceId: string,
+ opts?: OperationOptions
+ ): Promise {
+ let metadata: HttpRequestMetadata | undefined;
+ if (opts?.commandName) {
+ metadata = { commandName: opts.commandName };
+ }
+
+ const response = await getHttpClient().get(
+ `${getArmUrl()}${poolResourceId}?api-version=2024-07-01`,
+ {
+ metadata,
+ }
+ );
+
+ if (response.status !== 200) {
+ throw new UnexpectedStatusCodeError(
+ `Failed to get pool ${poolResourceId}`,
+ response.status,
+ await response.text()
+ );
+ }
+
+ return (await response.json()) as LegacyPoolOutput;
+ }
+
async listByAccountId(
batchAccountId: string,
opts?: OperationOptions
@@ -166,4 +201,38 @@ export class LivePoolService implements PoolService {
return res.body;
}
+
+ async patchLegacy(
+ poolResourceId: string,
+ pool: LegacyPool,
+ opts?: OperationOptions
+ ): Promise {
+ let metadata: HttpRequestMetadata | undefined;
+ if (opts?.commandName) {
+ metadata = { commandName: opts.commandName };
+ }
+
+ // Use 2024-07-01 API version since node communication mode properties
+ // were removed in subsequent versions.
+ const response = await getHttpClient().patch(
+ `${getArmUrl()}${poolResourceId}?api-version=2024-07-01`,
+ {
+ body: JSON.stringify(pool),
+ headers: {
+ "Content-Type": "application/json",
+ },
+ metadata,
+ }
+ );
+
+ if (response.status !== 200) {
+ throw new UnexpectedStatusCodeError(
+ `Failed to update pool ${poolResourceId}`,
+ response.status,
+ await response.text()
+ );
+ }
+
+ return (await response.json()) as LegacyPoolOutput;
+ }
}
diff --git a/packages/service/src/pool/pool-models.ts b/packages/service/src/pool/pool-models.ts
index 05d891d36c..5a1531295f 100644
--- a/packages/service/src/pool/pool-models.ts
+++ b/packages/service/src/pool/pool-models.ts
@@ -12,3 +12,54 @@ export {
} from "../internal/arm-batch-rest";
export type NodeCommunicationMode = "Default" | "Simplified" | "Classic";
+
+/**
+ * A pool output model in the 2024-07-01 API version format. Note that only
+ * properties need for backwards compatibility are included.
+ */
+export interface LegacyPoolOutput {
+ /** The ID of the resource. */
+ id?: string;
+
+ /** The name of the resource. */
+ name?: string;
+
+ /** The type of the resource. */
+ type?: string;
+
+ /** The properties associated with the pool. */
+ properties?: LegacyPoolPropertiesOutput;
+}
+
+export interface LegacyPoolPropertiesOutput {
+ targetNodeCommunicationMode?: NodeCommunicationMode;
+
+ /** Determines how a pool communicates with the Batch service. */
+ currentNodeCommunicationMode?: NodeCommunicationMode;
+}
+
+/**
+ * A pool model in the 2024-07-01 API version format. Note that only
+ * properties need for backwards compatibility are included.
+ */
+export interface LegacyPool {
+ /** The ID of the resource. */
+ id?: string;
+
+ /** The name of the resource. */
+ name?: string;
+
+ /** The type of the resource. */
+ type?: string;
+
+ /** The properties associated with the pool. */
+ properties?: LegacyPoolProperties;
+}
+
+export interface LegacyPoolProperties {
+ /** If omitted, the default value is Default. */
+ targetNodeCommunicationMode?: NodeCommunicationMode;
+
+ /** Determines how a pool communicates with the Batch service. */
+ currentNodeCommunicationMode?: NodeCommunicationMode;
+}
diff --git a/packages/service/src/pool/pool-service.ts b/packages/service/src/pool/pool-service.ts
index 6f7c17c08a..6f89e8d189 100644
--- a/packages/service/src/pool/pool-service.ts
+++ b/packages/service/src/pool/pool-service.ts
@@ -1,19 +1,76 @@
import { OperationOptions } from "@azure/bonito-core";
-import { Pool, PoolOutput } from "./pool-models";
+import { LegacyPool, LegacyPoolOutput, Pool, PoolOutput } from "./pool-models";
export interface PoolService {
+ /**
+ * Creates or updates a pool.
+ *
+ * @param poolResourceId The ARM resource ID of the pool.
+ * @param pool The pool to create or update.
+ * @returns The created/updated pool.
+ */
createOrUpdate(poolResourceId: string, pool: Pool): Promise;
+
+ /**
+ * Get a pool by its resource ID.
+ *
+ * @param poolResourceId The ARM resource ID of the pool.
+ * @param opts Optional operation parameters.
+ * @returns The pool, or undefined if not found.
+ */
get(
poolResourceId: string,
opts?: OperationOptions
): Promise;
+
+ /**
+ * Get a pool by its resource ID using the 2024-07-01 API version.
+ *
+ * @param poolResourceId The ARM resource ID of the pool.
+ * @param opts Optional operation parameters.
+ * @returns The pool, or undefined if not found.
+ */
+ getLegacy(
+ poolResourceId: string,
+ opts?: OperationOptions
+ ): Promise;
+
+ /**
+ * List pools by Batch account resource ID.
+ * @param accountId The ARM resource ID of the Batch account.
+ * @param opts Optional operation parameters.
+ * @returns The list of pools.
+ */
listByAccountId(
accountId: string,
opts?: OperationOptions
): Promise;
+
+ /**
+ * Updates specified properties of a pool.
+ *
+ * @param poolResourceId The ARM resource ID of the pool.
+ * @param pool A partial pool with the properties to update.
+ * @param opts Optional operation parameters.
+ * @returns The updated pool.
+ */
patch(
poolResourceId: string,
pool: Pool,
opts?: OperationOptions
): Promise;
+
+ /**
+ * Updates specified properties of a pool using the 2024-07-01 API version.
+ *
+ * @param poolResourceId The ARM resource ID of the pool.
+ * @param pool A partial pool with the properties to update.
+ * @param opts Optional operation parameters.
+ * @returns The updated pool.
+ */
+ patchLegacy(
+ poolResourceId: string,
+ pool: LegacyPool,
+ opts?: OperationOptions
+ ): Promise;
}
diff --git a/packages/service/src/test-util/fakes.ts b/packages/service/src/test-util/fakes.ts
index 30e71ee731..02d4abbdea 100644
--- a/packages/service/src/test-util/fakes.ts
+++ b/packages/service/src/test-util/fakes.ts
@@ -17,8 +17,17 @@ import {
BatchNodeOutput,
BatchNodeVMExtensionOutput,
} from "../node/node-models";
-import { Pool, PoolOutput } from "../pool/pool-models";
+import {
+ LegacyPool,
+ LegacyPoolOutput,
+ Pool,
+ PoolOutput,
+} from "../pool/pool-models";
import { BatchJobOutput, BatchTaskOutput } from "../batch-models";
+import {
+ AccountBatchUpdateParameters,
+ NetworkSecurityPerimeterConfigurationListResultOutput,
+} from "../arm-batch-models";
/**
* A fake dataset which includes Batch accounts, pools, etc.
@@ -48,11 +57,23 @@ export interface BatchFakeSet extends FakeSet {
*/
getPool(poolResourceId: string): PoolOutput | undefined;
+ /**
+ * Get a Batch pool in the 2024-07-01 API version format by case-insensitive ID
+ *
+ * @param poolResourceId The ARM resource ID of the pool
+ */
+ getLegacyPool(poolResourceId: string): LegacyPoolOutput | undefined;
+
/**
* Patches a pool and returns it
*/
patchPool(pool: Pool): PoolOutput;
+ /**
+ * Patches a pool using the 2024-07-01 API version format and returns it
+ */
+ patchLegacyPool(pool: LegacyPool): LegacyPoolOutput;
+
/**
* Creates or updates a pool and returns it
*/
@@ -113,6 +134,23 @@ export interface BatchFakeSet extends FakeSet {
* @param jobId
*/
listTasks(accountEndpoint: string, jobId: string): BatchTaskOutput[];
+
+ /**
+ * update a batch account and return it
+ *
+ * @param accountResouceId The resource id of the account
+ * @param parameters The parameters to update the account with
+ * @param opts
+ *
+ */
+ patchBatchAccount(
+ accountResouceId: string,
+ parameters: AccountBatchUpdateParameters
+ ): BatchAccountOutput | undefined;
+
+ listNetworkSecurityPerimeterConfigurations(
+ accountResouceId: string
+ ): NetworkSecurityPerimeterConfigurationListResultOutput;
}
export abstract class AbstractBatchFakeSet
@@ -124,8 +162,11 @@ export abstract class AbstractBatchFakeSet
protected abstract batchAccounts: {
[accountId: string]: BatchAccountOutput;
};
+
protected abstract batchPools: { [poolId: string]: PoolOutput };
+ protected abstract legacyPools: { [poolId: string]: LegacyPoolOutput };
+
/**
* Node key is the account endpoint, pool name and node ID concatenated,
* colon-separated and lower-cased
@@ -138,10 +179,30 @@ export abstract class AbstractBatchFakeSet
protected abstract batchTasks: { [taskKey: string]: BatchTaskOutput };
+ protected networkSecurityPerimeterConfigurations: {
+ [
+ accountId: string
+ ]: NetworkSecurityPerimeterConfigurationListResultOutput;
+ } = {};
+
getBatchAccount(batchAccountId: string): BatchAccountOutput | undefined {
return this.batchAccounts[batchAccountId.toLowerCase()];
}
+ patchBatchAccount(
+ accountResouceId: string,
+ parameters: AccountBatchUpdateParameters
+ ): BatchAccountOutput | undefined {
+ const batchAccount = this.getBatchAccount(accountResouceId);
+ if (!batchAccount) {
+ throw new Error("No batch account with ID " + accountResouceId);
+ }
+
+ const oldAccount = cloneDeep(batchAccount);
+
+ return mergeDeep(oldAccount, parameters as BatchAccountOutput);
+ }
+
listBatchAccountsBySubscription(subId: string): BatchAccountOutput[] {
if (!subId) {
return [];
@@ -156,6 +217,10 @@ export abstract class AbstractBatchFakeSet
return this.batchPools[poolResourceId.toLowerCase()];
}
+ getLegacyPool(poolResourceId: string): LegacyPoolOutput | undefined {
+ return this.legacyPools[poolResourceId.toLowerCase()];
+ }
+
listPoolsByAccount(accountId: string): PoolOutput[] {
if (!accountId) {
return [];
@@ -178,6 +243,19 @@ export abstract class AbstractBatchFakeSet
return mergeDeep(oldPool, poolToOutput(pool));
}
+ patchLegacyPool(pool: LegacyPool): LegacyPoolOutput {
+ if (!pool.id) {
+ throw new Error("Cannot patch a pool without a valid ID");
+ }
+
+ const oldPool = this.getLegacyPool(pool.id);
+ if (!oldPool) {
+ throw new Error("No pool with ID " + pool.id);
+ }
+
+ return mergeDeep(oldPool, legacyPoolToOutput(pool));
+ }
+
putPool(pool: Pool): PoolOutput {
if (!pool.id) {
throw new Error("Cannot create/update a pool without a valid ID");
@@ -252,6 +330,21 @@ export abstract class AbstractBatchFakeSet
)
.map((entry) => entry[1]);
}
+
+ listNetworkSecurityPerimeterConfigurations(
+ accountResouceId: string
+ ): NetworkSecurityPerimeterConfigurationListResultOutput {
+ const res =
+ this.networkSecurityPerimeterConfigurations[
+ accountResouceId.toLowerCase()
+ ];
+ if (!res) {
+ return {
+ value: [],
+ };
+ }
+ return res;
+ }
}
export class BasicBatchFakeSet extends AbstractBatchFakeSet {
@@ -770,11 +863,28 @@ export class BasicBatchFakeSet extends AbstractBatchFakeSet {
},
currentDedicatedNodes: 0,
currentLowPriorityNodes: 0,
+ },
+ },
+ };
+
+ protected legacyPools: { [poolId: string]: LegacyPoolOutput } = {
+ "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/supercomputing/providers/microsoft.batch/batchaccounts/hobo/pools/hobopool1":
+ {
+ id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/supercomputing/providers/Microsoft.Batch/batchAccounts/hobo/pools/hobopool1",
+ name: "hobopool1",
+ type: "Microsoft.Batch/batchAccounts/pools",
+ properties: {
targetNodeCommunicationMode: "Default",
- resourceTags: {
- tag1: "one",
- tag2: "two",
- },
+ },
+ },
+ "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/visualization/providers/microsoft.batch/batchaccounts/byos/pools/byospool1":
+ {
+ id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/visualization/providers/Microsoft.Batch/batchAccounts/byos/pools/byospool1",
+ name: "byospool1",
+ type: "Microsoft.Batch/batchAccounts/pools",
+ properties: {
+ targetNodeCommunicationMode: "Default",
+ currentNodeCommunicationMode: "Simplified",
},
},
};
@@ -787,7 +897,9 @@ export class BasicBatchFakeSet extends AbstractBatchFakeSet {
schedulingState: "enabled",
stateTransitionTime: "2023-11-09T07:20:55.000Z",
allocationTime: "2023-11-09T07:20:45.000Z",
+ lastBootTime: "2023-11-09T07:20:45.000Z",
ipAddress: "10.0.0.4",
+ ipv6Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
affinityId:
"TVM:tvmps_7b5797648c5d43f7a15b952e5ada3c082ccac8de5eb95f5518ab1242bc79aa3b_d",
vmSize: "standard_d2_v2",
@@ -808,6 +920,10 @@ export class BasicBatchFakeSet extends AbstractBatchFakeSet {
},
],
},
+ nodeAgentInfo: {
+ version: "1.0",
+ lastUpdateTime: "2024-11-09T07:20:45.000Z",
+ },
virtualMachineInfo: {
imageReference: {
publisher: "microsoftwindowsserver",
@@ -824,6 +940,7 @@ export class BasicBatchFakeSet extends AbstractBatchFakeSet {
hobopool1: [
{
id: "faketestjob1",
+ eTag: "eTag",
usesTaskDependencies: false,
url: "https://batchsyntheticsprod.eastus2euap.batch.azure.com/jobs/faketestjob1",
lastModified: "2024-05-29T08:32:21.000Z",
@@ -852,7 +969,12 @@ export class BasicBatchFakeSet extends AbstractBatchFakeSet {
"mercury.eastus.batch.azure.com:faketestjob1:taska": {
url: "https://batchsyntheticsprod.eastus2euap.batch.azure.com/jobs/faketestjob1/tasks/taskA",
id: "taska",
+ eTag: "eTag",
state: "active",
+ lastModified: "2024-05-29T08:32:21.000Z",
+ creationTime: "2024-05-29T08:32:21.000Z",
+ stateTransitionTime: "2024-05-29T08:32:21.000Z",
+ commandLine: "hostname",
executionInfo: {
retryCount: 0,
requeueCount: 0,
@@ -861,7 +983,12 @@ export class BasicBatchFakeSet extends AbstractBatchFakeSet {
"mercury.eastus.batch.azure.com:faketestjob1:task1": {
url: "https://batchsyntheticsprod.eastus2euap.batch.azure.com/jobs/faketestjob1/tasks/task1",
id: "task1",
+ eTag: "eTag",
state: "completed",
+ lastModified: "2024-05-29T08:32:21.000Z",
+ creationTime: "2024-05-29T08:32:21.000Z",
+ stateTransitionTime: "2024-05-29T08:32:21.000Z",
+ commandLine: "hostname",
executionInfo: {
retryCount: 0,
requeueCount: 0,
@@ -901,6 +1028,98 @@ export class BasicBatchFakeSet extends AbstractBatchFakeSet {
},
],
};
+
+ networkSecurityPerimeterConfigurations: {
+ [
+ accountId: string
+ ]: NetworkSecurityPerimeterConfigurationListResultOutput;
+ } = {
+ "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/supercomputing/providers/microsoft.batch/batchaccounts/hobo":
+ {
+ value: [
+ {
+ id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/supercomputing/providers/microsoft.batch/batchaccounts/hobo/networkSecurityPerimeterConfigurations/00000000-0000-0000-0000-000000000000.abcd",
+ name: "00000000-0000-0000-0000-000000000000.resourceAssociationName",
+ type: "Microsoft.Batch/batchAccounts/networkSecurityPerimeterConfigurations",
+ properties: {
+ provisioningState: "Succeeded",
+ provisioningIssues: [
+ {
+ name: "issue1",
+ },
+ {
+ name: "issue2",
+ },
+ ],
+ networkSecurityPerimeter: {
+ id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/npipv2/providers/Microsoft.Network/networkSecurityPerimeters/nspname",
+ perimeterGuid:
+ "00000000-0000-0000-0000-000000000000",
+ location: "eastus2euap",
+ },
+ resourceAssociation: {
+ name: "resourceAssociationName",
+ accessMode: "Enforced",
+ },
+ profile: {
+ name: "default",
+ accessRulesVersion: 5,
+ accessRules: [
+ {
+ name: "test",
+ properties: {
+ direction: "Inbound",
+ addressPrefixes: [
+ "111.111.111.111/32",
+ ],
+ fullyQualifiedDomainNames: [],
+ subscriptions: [],
+ networkSecurityPerimeters: [],
+ emailAddresses: [],
+ phoneNumbers: [],
+ },
+ },
+ {
+ name: "rule2",
+ properties: {
+ direction: "Outbound",
+ addressPrefixes: [],
+ fullyQualifiedDomainNames: ["*"],
+ subscriptions: [],
+ networkSecurityPerimeters: [],
+ emailAddresses: [],
+ phoneNumbers: [],
+ },
+ },
+ ],
+ diagnosticSettingsVersion: 0,
+ enabledLogCategories: [
+ "NspPublicInboundPerimeterRulesAllowed",
+ "NspPublicInboundPerimeterRulesDenied",
+ "NspPublicOutboundPerimeterRulesAllowed",
+ "NspPublicOutboundPerimeterRulesDenied",
+ "NspIntraPerimeterOutboundAllowed",
+ "NspPublicInboundResourceRulesAllowed",
+ "NspPublicInboundResourceRulesDenied",
+ "NspPublicOutboundResourceRulesAllowed",
+ "NspPublicOutboundResourceRulesDenied",
+ "NspPrivateInboundAllowed",
+ "NspIntraPerimeterInboundAllowed",
+ "NspOutboundAttempt",
+ "NspCrossPerimeterInboundAllowed",
+ "NspCrossPerimeterOutboundAllowed",
+ ],
+ },
+ },
+ },
+ ],
+ },
+ "/subscriptions/11111111-1111-1111-1111-111111111111/resourcegroups/test/providers/microsoft.batch/batchaccounts/hobo":
+ {
+ value: [],
+ nextLink: undefined,
+ },
+ };
}
/**
@@ -918,3 +1137,19 @@ function poolToOutput(pool: Pool): PoolOutput {
return clone as PoolOutput;
}
+
+/**
+ * Convert a Pool model to a PoolOutput model using the 2024-07-01 API version
+ * model format
+ *
+ * @param pool The input pool
+ * @returns The output model with a lastModified date of now
+ */
+function legacyPoolToOutput(pool: LegacyPool): LegacyPoolOutput {
+ const clone = cloneDeep(pool);
+
+ // KLUDGE: Properties shouldn't be nullable, but since it is right now, handle it.
+ clone.properties = clone.properties ?? {};
+
+ return clone as LegacyPoolOutput;
+}
diff --git a/packages/service/swagger/README.md b/packages/service/swagger/README.md
index bcb0e131bf..3d9ac82015 100644
--- a/packages/service/swagger/README.md
+++ b/packages/service/swagger/README.md
@@ -14,7 +14,9 @@ generate-sample: false
license-header: MICROSOFT_MIT_NO_VERSION
output-folder: ../src/internal/arm-batch-rest
source-code-folder-path: ./generated
-input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/batch/resource-manager/Microsoft.Batch/stable/2024-07-01/BatchManagement.json
+input-file:
+ - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/batch/resource-manager/Microsoft.Batch/stable/2025-06-01/BatchManagement.json
+ - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/batch/resource-manager/Microsoft.Batch/stable/2025-06-01/NetworkSecurityPerimeter.json
package-version: 1.0.0-beta.1
rest-level-client: true
add-credentials: true
diff --git a/util/bux/package-lock.json b/util/bux/package-lock.json
index 40ca864711..15aab65b23 100644
--- a/util/bux/package-lock.json
+++ b/util/bux/package-lock.json
@@ -54,80 +54,19 @@
}
},
"node_modules/@babel/code-frame": {
- "version": "7.23.5",
- "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz",
- "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==",
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
"dev": true,
"dependencies": {
- "@babel/highlight": "^7.23.4",
- "chalk": "^2.4.2"
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
},
"engines": {
"node": ">=6.9.0"
}
},
- "node_modules/@babel/code-frame/node_modules/ansi-styles": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
- "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
- "dev": true,
- "dependencies": {
- "color-convert": "^1.9.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/code-frame/node_modules/chalk": {
- "version": "2.4.2",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
- "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
- "dev": true,
- "dependencies": {
- "ansi-styles": "^3.2.1",
- "escape-string-regexp": "^1.0.5",
- "supports-color": "^5.3.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/code-frame/node_modules/color-convert": {
- "version": "1.9.3",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
- "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
- "dev": true,
- "dependencies": {
- "color-name": "1.1.3"
- }
- },
- "node_modules/@babel/code-frame/node_modules/color-name": {
- "version": "1.1.3",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
- "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
- "dev": true
- },
- "node_modules/@babel/code-frame/node_modules/has-flag": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
- "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
- "dev": true,
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/code-frame/node_modules/supports-color": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
- "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
- "dev": true,
- "dependencies": {
- "has-flag": "^3.0.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
"node_modules/@babel/compat-data": {
"version": "7.22.9",
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.9.tgz",
@@ -330,18 +269,18 @@
}
},
"node_modules/@babel/helper-string-parser": {
- "version": "7.23.4",
- "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz",
- "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==",
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-identifier": {
- "version": "7.22.20",
- "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
- "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
"dev": true,
"engines": {
"node": ">=6.9.0"
@@ -357,100 +296,26 @@
}
},
"node_modules/@babel/helpers": {
- "version": "7.22.11",
- "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.11.tgz",
- "integrity": "sha512-vyOXC8PBWaGc5h7GMsNx68OH33cypkEDJCHvYVVgVbbxJDROYVtexSk0gK5iCF1xNjRIN2s8ai7hwkWDq5szWg==",
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
"dev": true,
"dependencies": {
- "@babel/template": "^7.22.5",
- "@babel/traverse": "^7.22.11",
- "@babel/types": "^7.22.11"
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.4"
},
"engines": {
"node": ">=6.9.0"
}
},
- "node_modules/@babel/highlight": {
- "version": "7.23.4",
- "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz",
- "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==",
- "dev": true,
- "dependencies": {
- "@babel/helper-validator-identifier": "^7.22.20",
- "chalk": "^2.4.2",
- "js-tokens": "^4.0.0"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/highlight/node_modules/ansi-styles": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
- "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
- "dev": true,
- "dependencies": {
- "color-convert": "^1.9.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/highlight/node_modules/chalk": {
- "version": "2.4.2",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
- "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
- "dev": true,
- "dependencies": {
- "ansi-styles": "^3.2.1",
- "escape-string-regexp": "^1.0.5",
- "supports-color": "^5.3.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/highlight/node_modules/color-convert": {
- "version": "1.9.3",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
- "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
- "dev": true,
- "dependencies": {
- "color-name": "1.1.3"
- }
- },
- "node_modules/@babel/highlight/node_modules/color-name": {
- "version": "1.1.3",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
- "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
- "dev": true
- },
- "node_modules/@babel/highlight/node_modules/has-flag": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
- "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
- "dev": true,
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/highlight/node_modules/supports-color": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
- "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "node_modules/@babel/parser": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
"dev": true,
"dependencies": {
- "has-flag": "^3.0.0"
+ "@babel/types": "^7.28.5"
},
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/parser": {
- "version": "7.23.5",
- "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.5.tgz",
- "integrity": "sha512-hOOqoiNXrmGdFbhgCzu6GiURxUgM27Xwd/aPuu8RfHEZPBzL1Z54okAHAQjXfcQNwvrlkAmAp4SlRTZ45vlthQ==",
- "dev": true,
"bin": {
"parser": "bin/babel-parser.js"
},
@@ -621,14 +486,14 @@
}
},
"node_modules/@babel/template": {
- "version": "7.22.15",
- "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
- "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
"dev": true,
"dependencies": {
- "@babel/code-frame": "^7.22.13",
- "@babel/parser": "^7.22.15",
- "@babel/types": "^7.22.15"
+ "@babel/code-frame": "^7.27.1",
+ "@babel/parser": "^7.27.2",
+ "@babel/types": "^7.27.1"
},
"engines": {
"node": ">=6.9.0"
@@ -656,14 +521,13 @@
}
},
"node_modules/@babel/types": {
- "version": "7.23.5",
- "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.5.tgz",
- "integrity": "sha512-ON5kSOJwVO6xXVRTvOI0eOnWe7VdUcIpsovGo9U/Br4Ie4UVFQTboO2cYnDhAGU6Fp+UxSiT+pMft0SMHfuq6w==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
"dev": true,
"dependencies": {
- "@babel/helper-string-parser": "^7.23.4",
- "@babel/helper-validator-identifier": "^7.22.20",
- "to-fast-properties": "^2.0.0"
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
},
"engines": {
"node": ">=6.9.0"
@@ -675,6 +539,26 @@
"integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
"dev": true
},
+ "node_modules/@inquirer/external-editor": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/@inquirer/external-editor/-/external-editor-1.0.3.tgz",
+ "integrity": "sha512-RWbSrDiYmO4LbejWY7ttpxczuwQyZLBUyygsA9Nsv95hpzUWwnNTVQmAq3xuh7vNwCp07UTmE5i11XAEExx4RA==",
+ "dependencies": {
+ "chardet": "^2.1.1",
+ "iconv-lite": "^0.7.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@types/node": ">=18"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ }
+ }
+ },
"node_modules/@istanbuljs/load-nyc-config": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz",
@@ -701,9 +585,9 @@
}
},
"node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": {
- "version": "3.14.1",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
- "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
"dev": true,
"dependencies": {
"argparse": "^1.0.7",
@@ -1232,7 +1116,7 @@
"version": "20.5.4",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.4.tgz",
"integrity": "sha512-Y9vbIAoM31djQZrPYjpTLo0XlaSwOIsrlfE3LpulZeRblttsLQRFRlBAppW0LOxyT3ALj2M5vU1ucQQayQH3jA==",
- "dev": true
+ "devOptional": true
},
"node_modules/@types/prettier": {
"version": "2.7.3",
@@ -1505,9 +1389,9 @@
}
},
"node_modules/brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
@@ -1683,9 +1567,9 @@
}
},
"node_modules/chardet": {
- "version": "0.7.0",
- "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
- "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA=="
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/chardet/-/chardet-2.1.1.tgz",
+ "integrity": "sha512-PsezH1rqdV9VvyNhxxOW32/d75r01NY7TQCmOqomRo15ZSOKbpTFVsfjghxo6JloQUCGnH4k1LGu0R4yCLlWQQ=="
},
"node_modules/ci-info": {
"version": "3.8.0",
@@ -1839,9 +1723,9 @@
"dev": true
},
"node_modules/cross-spawn": {
- "version": "7.0.3",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
- "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
"dev": true,
"dependencies": {
"path-key": "^3.1.0",
@@ -2303,19 +2187,6 @@
"resolved": "https://registry.npmjs.org/type/-/type-2.7.2.tgz",
"integrity": "sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw=="
},
- "node_modules/external-editor": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz",
- "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==",
- "dependencies": {
- "chardet": "^0.7.0",
- "iconv-lite": "^0.4.24",
- "tmp": "^0.0.33"
- },
- "engines": {
- "node": ">=4"
- }
- },
"node_modules/fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
@@ -2661,14 +2532,18 @@
}
},
"node_modules/iconv-lite": {
- "version": "0.4.24",
- "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
- "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.1.tgz",
+ "integrity": "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw==",
"dependencies": {
- "safer-buffer": ">= 2.1.2 < 3"
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
},
"engines": {
"node": ">=0.10.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
}
},
"node_modules/ieee754": {
@@ -2733,15 +2608,15 @@
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"node_modules/inquirer": {
- "version": "8.2.6",
- "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.6.tgz",
- "integrity": "sha512-M1WuAmb7pn9zdFRtQYk26ZBoY043Sse0wVDdk4Bppr+JOXyQYybdtvK+l9wUibhtjdjvtoiNy8tk+EgsYIUqKg==",
+ "version": "8.2.7",
+ "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.7.tgz",
+ "integrity": "sha512-UjOaSel/iddGZJ5xP/Eixh6dY1XghiBw4XK13rCCIJcJfyhhoul/7KhLLUGtebEj6GDYM6Vnx/mVsjx2L/mFIA==",
"dependencies": {
+ "@inquirer/external-editor": "^1.0.0",
"ansi-escapes": "^4.2.1",
"chalk": "^4.1.1",
"cli-cursor": "^3.1.0",
"cli-width": "^3.0.0",
- "external-editor": "^3.0.3",
"figures": "^3.0.0",
"lodash": "^4.17.21",
"mute-stream": "0.0.8",
@@ -4063,9 +3938,9 @@
"dev": true
},
"node_modules/js-yaml": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
- "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"dependencies": {
"argparse": "^2.0.1"
},
@@ -4490,9 +4365,9 @@
"dev": true
},
"node_modules/npm-run-all/node_modules/cross-spawn": {
- "version": "6.0.5",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz",
- "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
+ "version": "6.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.6.tgz",
+ "integrity": "sha512-VqCUuhcd1iB+dsv8gxPttb5iZh/D0iubSP21g36KXdEuf6I5JiioesUVjpCdHV9MZRUfVFlvwtIUyPfxo5trtw==",
"dev": true,
"dependencies": {
"nice-try": "^1.0.4",
@@ -4660,14 +4535,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/os-tmpdir": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
- "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==",
- "engines": {
- "node": ">=0.10.0"
- }
- },
"node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
@@ -4781,9 +4648,9 @@
}
},
"node_modules/picocolors": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
- "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
"dev": true
},
"node_modules/picomatch": {
@@ -5485,32 +5352,12 @@
"next-tick": "1"
}
},
- "node_modules/tmp": {
- "version": "0.0.33",
- "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz",
- "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==",
- "dependencies": {
- "os-tmpdir": "~1.0.2"
- },
- "engines": {
- "node": ">=0.6.0"
- }
- },
"node_modules/tmpl": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
"dev": true
},
- "node_modules/to-fast-properties": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
- "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
- "dev": true,
- "engines": {
- "node": ">=4"
- }
- },
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
@@ -5694,6 +5541,20 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/typescript": {
+ "version": "5.9.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
+ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
+ "dev": true,
+ "peer": true,
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
"node_modules/unbox-primitive": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz",
diff --git a/util/bux/util.ts b/util/bux/util.ts
index 5678b9822b..afaf5300f4 100644
--- a/util/bux/util.ts
+++ b/util/bux/util.ts
@@ -336,7 +336,12 @@ export async function printStatus() {
}
}
-// integrations have one place to look for things like coverage reports
+/**
+ * Gather all build and test results into a single top-level 'build' directory.
+ * Used for both test/coverage reporting and release artifact upload.
+ *
+ * @param basePath The path to the top level of the repo
+ */
export async function gatherBuildResults(basePath: string) {
if (!basePath) {
basePath =
@@ -345,14 +350,17 @@ export async function gatherBuildResults(basePath: string) {
}
const baseBuildDir = path.join(basePath, "build");
- const doCopy = (src: string, dst: string) => {
- info(`Copying build/test results from ${src}/* to ${dst}`);
+ const doCopy = (src: string, dst: string, patterns: string[] = ["*"]) => {
+ info(`Copying [${patterns.join(",")}] from ${src}/* to ${dst}}`);
if (!fs.existsSync(src)) {
warn(`${src} does not exist - skipping`);
return;
}
mkdirp(dst);
- shell.cp("-r", src + "/*", dst);
+
+ for (const pattern of patterns) {
+ shell.cp("-r", src + "/" + pattern, dst);
+ }
};
// packages
@@ -387,7 +395,17 @@ export async function gatherBuildResults(basePath: string) {
);
doCopy(
path.join(basePath, "desktop", "release"),
- path.join(baseBuildDir, "desktop", "release")
+ path.join(baseBuildDir, "desktop", "release"),
+ [
+ "manifest.json",
+ "*.yml",
+ "*.exe",
+ "*.zip",
+ "*.dmg",
+ "*.deb",
+ "*.rpm",
+ "*.AppImage",
+ ]
);
}
diff --git a/util/common-config/package-lock.json b/util/common-config/package-lock.json
index 3a5b7300c1..474f8844dd 100644
--- a/util/common-config/package-lock.json
+++ b/util/common-config/package-lock.json
@@ -31,89 +31,19 @@
}
},
"node_modules/@babel/code-frame": {
- "version": "7.23.5",
- "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz",
- "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==",
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
"dev": true,
"dependencies": {
- "@babel/highlight": "^7.23.4",
- "chalk": "^2.4.2"
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
},
"engines": {
"node": ">=6.9.0"
}
},
- "node_modules/@babel/code-frame/node_modules/ansi-styles": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
- "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
- "dev": true,
- "dependencies": {
- "color-convert": "^1.9.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/code-frame/node_modules/chalk": {
- "version": "2.4.2",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
- "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
- "dev": true,
- "dependencies": {
- "ansi-styles": "^3.2.1",
- "escape-string-regexp": "^1.0.5",
- "supports-color": "^5.3.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/code-frame/node_modules/color-convert": {
- "version": "1.9.3",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
- "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
- "dev": true,
- "dependencies": {
- "color-name": "1.1.3"
- }
- },
- "node_modules/@babel/code-frame/node_modules/color-name": {
- "version": "1.1.3",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
- "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
- "dev": true
- },
- "node_modules/@babel/code-frame/node_modules/escape-string-regexp": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
- "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
- "dev": true,
- "engines": {
- "node": ">=0.8.0"
- }
- },
- "node_modules/@babel/code-frame/node_modules/has-flag": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
- "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
- "dev": true,
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/code-frame/node_modules/supports-color": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
- "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
- "dev": true,
- "dependencies": {
- "has-flag": "^3.0.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
"node_modules/@babel/compat-data": {
"version": "7.22.9",
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.9.tgz",
@@ -283,18 +213,18 @@
}
},
"node_modules/@babel/helper-string-parser": {
- "version": "7.23.4",
- "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz",
- "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==",
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
"dev": true,
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-validator-identifier": {
- "version": "7.22.20",
- "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
- "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
"dev": true,
"engines": {
"node": ">=6.9.0"
@@ -310,109 +240,26 @@
}
},
"node_modules/@babel/helpers": {
- "version": "7.22.11",
- "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.11.tgz",
- "integrity": "sha512-vyOXC8PBWaGc5h7GMsNx68OH33cypkEDJCHvYVVgVbbxJDROYVtexSk0gK5iCF1xNjRIN2s8ai7hwkWDq5szWg==",
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
"dev": true,
"dependencies": {
- "@babel/template": "^7.22.5",
- "@babel/traverse": "^7.22.11",
- "@babel/types": "^7.22.11"
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.4"
},
"engines": {
"node": ">=6.9.0"
}
},
- "node_modules/@babel/highlight": {
- "version": "7.23.4",
- "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz",
- "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==",
- "dev": true,
- "dependencies": {
- "@babel/helper-validator-identifier": "^7.22.20",
- "chalk": "^2.4.2",
- "js-tokens": "^4.0.0"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/highlight/node_modules/ansi-styles": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
- "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
- "dev": true,
- "dependencies": {
- "color-convert": "^1.9.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/highlight/node_modules/chalk": {
- "version": "2.4.2",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
- "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
- "dev": true,
- "dependencies": {
- "ansi-styles": "^3.2.1",
- "escape-string-regexp": "^1.0.5",
- "supports-color": "^5.3.0"
- },
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/highlight/node_modules/color-convert": {
- "version": "1.9.3",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
- "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
- "dev": true,
- "dependencies": {
- "color-name": "1.1.3"
- }
- },
- "node_modules/@babel/highlight/node_modules/color-name": {
- "version": "1.1.3",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
- "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
- "dev": true
- },
- "node_modules/@babel/highlight/node_modules/escape-string-regexp": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
- "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
- "dev": true,
- "engines": {
- "node": ">=0.8.0"
- }
- },
- "node_modules/@babel/highlight/node_modules/has-flag": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
- "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
- "dev": true,
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/highlight/node_modules/supports-color": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
- "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "node_modules/@babel/parser": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
"dev": true,
"dependencies": {
- "has-flag": "^3.0.0"
+ "@babel/types": "^7.28.5"
},
- "engines": {
- "node": ">=4"
- }
- },
- "node_modules/@babel/parser": {
- "version": "7.23.5",
- "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.5.tgz",
- "integrity": "sha512-hOOqoiNXrmGdFbhgCzu6GiURxUgM27Xwd/aPuu8RfHEZPBzL1Z54okAHAQjXfcQNwvrlkAmAp4SlRTZ45vlthQ==",
- "dev": true,
"bin": {
"parser": "bin/babel-parser.js"
},
@@ -583,14 +430,14 @@
}
},
"node_modules/@babel/template": {
- "version": "7.22.15",
- "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
- "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
"dev": true,
"dependencies": {
- "@babel/code-frame": "^7.22.13",
- "@babel/parser": "^7.22.15",
- "@babel/types": "^7.22.15"
+ "@babel/code-frame": "^7.27.1",
+ "@babel/parser": "^7.27.2",
+ "@babel/types": "^7.27.1"
},
"engines": {
"node": ">=6.9.0"
@@ -618,14 +465,13 @@
}
},
"node_modules/@babel/types": {
- "version": "7.23.5",
- "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.5.tgz",
- "integrity": "sha512-ON5kSOJwVO6xXVRTvOI0eOnWe7VdUcIpsovGo9U/Br4Ie4UVFQTboO2cYnDhAGU6Fp+UxSiT+pMft0SMHfuq6w==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
"dev": true,
"dependencies": {
- "@babel/helper-string-parser": "^7.23.4",
- "@babel/helper-validator-identifier": "^7.22.20",
- "to-fast-properties": "^2.0.0"
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
},
"engines": {
"node": ">=6.9.0"
@@ -1311,9 +1157,9 @@
"dev": true
},
"node_modules/brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
"dev": true,
"dependencies": {
"balanced-match": "^1.0.0",
@@ -1397,6 +1243,19 @@
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
"dev": true
},
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
@@ -1551,9 +1410,9 @@
"dev": true
},
"node_modules/cross-spawn": {
- "version": "7.0.3",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
- "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
"dev": true,
"dependencies": {
"path-key": "^3.1.0",
@@ -1688,6 +1547,20 @@
"node": ">=8"
}
},
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "dev": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/electron-to-chromium": {
"version": "1.4.500",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.500.tgz",
@@ -1721,6 +1594,51 @@
"is-arrayish": "^0.2.1"
}
},
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "dev": true,
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/escalade": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
@@ -1879,14 +1797,16 @@
}
},
"node_modules/form-data": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz",
- "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==",
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.4.tgz",
+ "integrity": "sha512-f0cRzm6dkyVYV3nPoooP8XlccPQukegwhAnpoLcXy+X+A8KfpGOoXwDr9FLZd3wzgLaBGQBE3lY93Zm/i1JvIQ==",
"dev": true,
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
- "mime-types": "^2.1.12"
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.35"
},
"engines": {
"node": ">= 6"
@@ -1913,10 +1833,13 @@
}
},
"node_modules/function-bind": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
- "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==",
- "dev": true
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
},
"node_modules/gensync": {
"version": "1.0.0-beta.2",
@@ -1936,6 +1859,30 @@
"node": "6.* || 8.* || >= 10.*"
}
},
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
"node_modules/get-package-type": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
@@ -1945,6 +1892,19 @@
"node": ">=8.0.0"
}
},
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "dev": true,
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/get-stream": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
@@ -1986,6 +1946,18 @@
"node": ">=4"
}
},
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
@@ -2013,6 +1985,45 @@
"node": ">=8"
}
},
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "dev": true,
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "dev": true,
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/html-encoding-sniffer": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-2.0.1.tgz",
@@ -2917,9 +2928,9 @@
"dev": true
},
"node_modules/js-yaml": {
- "version": "3.14.1",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
- "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
"dev": true,
"dependencies": {
"argparse": "^1.0.7",
@@ -3125,6 +3136,15 @@
"tmpl": "1.0.5"
}
},
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
@@ -3355,9 +3375,9 @@
"dev": true
},
"node_modules/picocolors": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
- "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
"dev": true
},
"node_modules/picomatch": {
@@ -3796,15 +3816,6 @@
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
"dev": true
},
- "node_modules/to-fast-properties": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
- "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
- "dev": true,
- "engines": {
- "node": ">=4"
- }
- },
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
@@ -3950,6 +3961,20 @@
"is-typedarray": "^1.0.0"
}
},
+ "node_modules/typescript": {
+ "version": "4.9.5",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz",
+ "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==",
+ "dev": true,
+ "peer": true,
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=4.2.0"
+ }
+ },
"node_modules/universalify": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz",