From 717c7b6a7f59b513b15f36a1e9df21652081aea2 Mon Sep 17 00:00:00 2001 From: Mary Gao Date: Tue, 30 Jan 2024 14:06:38 +0800 Subject: [PATCH 1/6] Update the dependencies --- common/config/rush/pnpm-lock.yaml | 272 ++++++++++++++++++++++------ packages/typespec-test/package.json | 18 +- packages/typespec-ts/package.json | 22 +-- 3 files changed, 233 insertions(+), 79 deletions(-) diff --git a/common/config/rush/pnpm-lock.yaml b/common/config/rush/pnpm-lock.yaml index d4714aa664..2fde24ee07 100644 --- a/common/config/rush/pnpm-lock.yaml +++ b/common/config/rush/pnpm-lock.yaml @@ -191,32 +191,32 @@ importers: ../../packages/typespec-test: specifiers: - '@azure-tools/typespec-autorest': '>=0.37.2 <1.0.0' - '@azure-tools/typespec-azure-core': '>=0.37.2 <1.0.0' - '@azure-tools/typespec-client-generator-core': '>=0.37.0 <1.0.0' + '@azure-tools/typespec-autorest': '>=0.38.1 <1.0.0' + '@azure-tools/typespec-azure-core': '>=0.38.0 <1.0.0' + '@azure-tools/typespec-client-generator-core': '>=0.38.0 <1.0.0' '@azure-tools/typespec-ts': workspace:^0.21.0 '@types/mocha': ^5.2.7 '@types/node': ^18.0.0 - '@typespec/compiler': '>=0.51.0 <1.0.0' - '@typespec/http': '>=0.51.0 <1.0.0' - '@typespec/openapi': '>=0.51.0 <1.0.0' - '@typespec/openapi3': '>=0.51.1 <1.0.0' - '@typespec/rest': '>=0.51.0 <1.0.0' - '@typespec/versioning': '>=0.51.0 <1.0.0' + '@typespec/compiler': '>=0.52.0 <1.0.0' + '@typespec/http': '>=0.52.0 <1.0.0' + '@typespec/openapi': '>=0.52.0 <1.0.0' + '@typespec/openapi3': '>=0.52.0 <1.0.0' + '@typespec/rest': '>=0.52.0 <1.0.0' + '@typespec/versioning': '>=0.52.0 <1.0.0' prettier: ^3.1.0 ts-node: ^8.5.2 typescript: ~5.2.0 dependencies: - '@azure-tools/typespec-autorest': 0.37.2_5kmgmb7zg7fbvjypl7nux4vhay - '@azure-tools/typespec-azure-core': 0.37.2_go2aawq7a7w4termvhoebskuti - '@azure-tools/typespec-client-generator-core': 0.37.0_ejqqrhxs4enfjwgbuhtcygsti4 + '@azure-tools/typespec-autorest': 0.38.1_f6nhdcmyim5btmbccbuco5x6xq + '@azure-tools/typespec-azure-core': 0.38.0_4mursonewu5hn2yhcp44ukhhky + '@azure-tools/typespec-client-generator-core': 0.38.0_hcpcasyazuhu5r37x6lfh5gruy '@azure-tools/typespec-ts': link:../typespec-ts - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 - '@typespec/openapi': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 - '@typespec/openapi3': 0.51.1_jt3ymhmg2m4fvil3jq422lyvli - '@typespec/rest': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 - '@typespec/versioning': 0.51.0_@typespec+compiler@0.51.0 + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/openapi': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/openapi3': 0.52.0_cib4advrmwzlvavwzsf74mfcsu + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 prettier: 3.1.1 devDependencies: '@types/mocha': 5.2.7 @@ -486,23 +486,23 @@ packages: - supports-color dev: true - /@azure-tools/typespec-autorest/0.37.2_5kmgmb7zg7fbvjypl7nux4vhay: - resolution: {integrity: sha512-iLNF2tdIET0qVmZ+7LMXGLj9tDI0I8pfDq12kSLQryAp6NzOn6hMz73gUEgIE1Gj6KE4YrQXfHf/F4kNfHpWeA==} + /@azure-tools/typespec-autorest/0.38.1_f6nhdcmyim5btmbccbuco5x6xq: + resolution: {integrity: sha512-3aG5+eTCc/VqDxVBrrE2+wAnobGczt4foaxP6hIzwtBICj2vj83GFDi59l7OItkvYPooUbl3dIcLmS54F9P2ZQ==} engines: {node: '>=18.0.0'} peerDependencies: - '@azure-tools/typespec-azure-core': ~0.37.1 - '@typespec/compiler': ~0.51.0 - '@typespec/http': ~0.51.0 - '@typespec/openapi': ~0.51.0 - '@typespec/rest': ~0.51.0 - '@typespec/versioning': ~0.51.0 - dependencies: - '@azure-tools/typespec-azure-core': 0.37.2_go2aawq7a7w4termvhoebskuti - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 - '@typespec/openapi': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 - '@typespec/rest': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 - '@typespec/versioning': 0.51.0_@typespec+compiler@0.51.0 + '@azure-tools/typespec-azure-core': ~0.38.0 + '@typespec/compiler': ~0.52.0 + '@typespec/http': ~0.52.0 + '@typespec/openapi': ~0.52.0 + '@typespec/rest': ~0.52.0 + '@typespec/versioning': ~0.52.0 + dependencies: + '@azure-tools/typespec-azure-core': 0.38.0_4mursonewu5hn2yhcp44ukhhky + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/openapi': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 dev: false /@azure-tools/typespec-azure-core/0.37.2_go2aawq7a7w4termvhoebskuti: @@ -516,6 +516,20 @@ packages: '@typespec/compiler': 0.51.0 '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 '@typespec/rest': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 + dev: true + + /@azure-tools/typespec-azure-core/0.38.0_4mursonewu5hn2yhcp44ukhhky: + resolution: {integrity: sha512-ASM+njC2lpzPykzw2OicWIaAOH+OBe3bVMrufEnINBjlr7owAtudvjrTLLWmAVMBciL/YOF579KdyjxTbaxJ5A==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@typespec/compiler': ~0.52.0 + '@typespec/http': ~0.52.0 + '@typespec/rest': ~0.52.0 + dependencies: + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana + dev: false /@azure-tools/typespec-client-generator-core/0.37.0_ejqqrhxs4enfjwgbuhtcygsti4: resolution: {integrity: sha512-wOQMN4gL5LrDsGJw1QkIHHT8SxurdX/E1T8I7enNo9UnPnpnNqTmJ9fusYjJhWnA6/qi51mRD7VX2Ymxh9WN6g==} @@ -532,6 +546,24 @@ packages: '@typespec/versioning': 0.51.0_@typespec+compiler@0.51.0 change-case: 4.1.2 pluralize: 8.0.0 + dev: true + + /@azure-tools/typespec-client-generator-core/0.38.0_hcpcasyazuhu5r37x6lfh5gruy: + resolution: {integrity: sha512-DUDIHJikz3Ai8uPk3vKFoMkkGPUxoD5DbGdwkN/pQxaL6Aze8HV4LGEOGtvaIu0SsGjCX9G3XPAXoBoupYgXbw==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@typespec/compiler': ~0.52.0 + '@typespec/http': ~0.52.0 + '@typespec/rest': ~0.52.0 + '@typespec/versioning': ~0.52.0 + dependencies: + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 + change-case: 5.3.0 + pluralize: 8.0.0 + dev: false /@azure/abort-controller/1.1.0: resolution: {integrity: sha512-TrRLIoSQVzfAJX9H1JeFjzAoDGcoK1IYX1UImfceTZpsyYfWr09Ss1aHW1y5TrrR3iq6RZLBwJ3E24uwPhwahw==} @@ -726,15 +758,23 @@ packages: resolution: {integrity: sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==} engines: {node: '>=6.9.0'} dependencies: - '@babel/highlight': 7.22.20 + '@babel/highlight': 7.23.4 + chalk: 2.4.2 + dev: true + + /@babel/code-frame/7.23.5: + resolution: {integrity: sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/highlight': 7.23.4 chalk: 2.4.2 /@babel/helper-validator-identifier/7.22.20: resolution: {integrity: sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==} engines: {node: '>=6.9.0'} - /@babel/highlight/7.22.20: - resolution: {integrity: sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==} + /@babel/highlight/7.23.4: + resolution: {integrity: sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==} engines: {node: '>=6.9.0'} dependencies: '@babel/helper-validator-identifier': 7.22.20 @@ -954,6 +994,11 @@ packages: - supports-color dev: true + /@sindresorhus/merge-streams/1.0.0: + resolution: {integrity: sha512-rUV5WyJrJLoloD4NDN1V1+LDMDWOa4OTsT4yYJwQNpTU6FWxkxHpL7eu4w+DmiH8x/EAM1otkPE1+LaspIbplw==} + engines: {node: '>=18'} + dev: false + /@sinonjs/commons/1.8.6: resolution: {integrity: sha512-Ky+XkAkqPZSm3NLBeUng77EBQl3cmeJhITaGHdYH8kjVB+aun3S4XBRti2zt17mtt0mIUDiNxYeoJm6drVvBJQ==} dependencies: @@ -1011,7 +1056,7 @@ packages: /@ts-morph/common/0.16.0: resolution: {integrity: sha512-SgJpzkTgZKLKqQniCjLaE3c2L2sdL7UShvmTmPBejAKd2OKV/yfMpQ2IWpAuA+VY5wy7PkSUaEObIqEK6afFuw==} dependencies: - fast-glob: 3.3.1 + fast-glob: 3.3.2 minimatch: 5.1.6 mkdirp: 1.0.4 path-browserify: 1.0.1 @@ -1313,6 +1358,27 @@ packages: vscode-languageserver-textdocument: 1.0.8 yaml: 2.3.2 yargs: 17.7.2 + dev: true + + /@typespec/compiler/0.52.0: + resolution: {integrity: sha512-36cZ5RWxRjL4SUe41KjPh3j3RQibpUoOzHcSllQJ3ByTSZdXv1zckMHLiRfaAbTXUADSAn2GMs4ZO3s8GdOGIQ==} + engines: {node: '>=18.0.0'} + hasBin: true + dependencies: + '@babel/code-frame': 7.23.5 + ajv: 8.12.0 + change-case: 5.3.0 + globby: 14.0.0 + mustache: 4.2.0 + picocolors: 1.0.0 + prettier: 3.1.1 + prompts: 2.4.2 + semver: 7.5.4 + vscode-languageserver: 9.0.1 + vscode-languageserver-textdocument: 1.0.8 + yaml: 2.3.4 + yargs: 17.7.2 + dev: false /@typespec/http/0.51.0_@typespec+compiler@0.51.0: resolution: {integrity: sha512-9YtcIPzUP0ELf/ZFBfhgLCPIyERn+DrYJTtEtaWkcO+qEkdFxO5eahwgh1FPuS6iJrW6pUPBuAfGDOAH1+N/PQ==} @@ -1321,32 +1387,42 @@ packages: '@typespec/compiler': ~0.51.0 dependencies: '@typespec/compiler': 0.51.0 + dev: true - /@typespec/openapi/0.51.0_xnewuom45z4pw3jt3dzaukcfn4: - resolution: {integrity: sha512-0Np++QSPculQZJE2Of6zFyrwgJj+n6WHQ30HVT9AdoJba3WjI/FvW6B/HUf08CnG4KxaUbC3hvS6FguwViP0wA==} + /@typespec/http/0.52.1_@typespec+compiler@0.52.0: + resolution: {integrity: sha512-2i7t6eSKi96F/zt1w0yJvhRhubYej0F9o8jDRhPA+TZI6SAxcv/Vyi+lkKnkOcu90HPH7b8T+YNizudb00BO6A==} engines: {node: '>=18.0.0'} peerDependencies: - '@typespec/compiler': ~0.51.0 - '@typespec/http': ~0.51.0 + '@typespec/compiler': ~0.52.0 dependencies: - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 + '@typespec/compiler': 0.52.0 dev: false - /@typespec/openapi3/0.51.1_jt3ymhmg2m4fvil3jq422lyvli: - resolution: {integrity: sha512-tONhr11J8iKwSEirlHwC6PeRE33RyHBDqdrOUNmAji1wVQrdbQkoSJ6iIRXNG6gr81z+h9h/NGaQxFlA4ZWdQQ==} + /@typespec/openapi/0.52.0_ul257hu73e5sf6e67hlhqupana: + resolution: {integrity: sha512-2Otnu9glehxvp6TU7NOHEniBDDKufV03XTmeVGgGEmu/j+cveAMg8lA1/O0RBpS2oHGsCFnMEuPcR8M1c0LI+Q==} engines: {node: '>=18.0.0'} peerDependencies: - '@typespec/compiler': ~0.51.0 - '@typespec/http': ~0.51.0 - '@typespec/openapi': ~0.51.0 - '@typespec/versioning': ~0.51.0 + '@typespec/compiler': ~0.52.0 + '@typespec/http': ~0.52.0 dependencies: - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 - '@typespec/openapi': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 - '@typespec/versioning': 0.51.0_@typespec+compiler@0.51.0 - yaml: 2.3.2 + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + dev: false + + /@typespec/openapi3/0.52.0_cib4advrmwzlvavwzsf74mfcsu: + resolution: {integrity: sha512-PPhNdpKQD2iHJemOaRUhnaeFWa4ApW4HtcZI+jrg4hyNSIwDYxL0OwwRohKjRUKM98iacpXvEh+5rKtkPiY2Qw==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@typespec/compiler': ~0.52.0 + '@typespec/http': ~0.52.0 + '@typespec/openapi': ~0.52.0 + '@typespec/versioning': ~0.52.0 + dependencies: + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/openapi': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 + yaml: 2.3.4 dev: false /@typespec/rest/0.51.0_xnewuom45z4pw3jt3dzaukcfn4: @@ -1358,6 +1434,18 @@ packages: dependencies: '@typespec/compiler': 0.51.0 '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 + dev: true + + /@typespec/rest/0.52.0_ul257hu73e5sf6e67hlhqupana: + resolution: {integrity: sha512-dLsY0fS60IVaAt4eCRcvEqorX/miPVV33du3dETTYYmbHtfEbvBKgTj/m6OH4noey7oaihlvLz5kYyLv8Am7zA==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@typespec/compiler': ~0.52.0 + '@typespec/http': ~0.52.0 + dependencies: + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + dev: false /@typespec/ts-http-runtime/1.0.0-alpha.20231129.4: resolution: {integrity: sha512-H2bI/Pxc31/y0p9bMzKoplA/BuoxpHrT71NJkdPCRugvyTlYFPbrdxWOBqrFr175HKjy4pzaiBn4Dg5+tXpfhg==} @@ -1377,6 +1465,16 @@ packages: '@typespec/compiler': ~0.51.0 dependencies: '@typespec/compiler': 0.51.0 + dev: true + + /@typespec/versioning/0.52.0_@typespec+compiler@0.52.0: + resolution: {integrity: sha512-Vr4WHaZiDOxJqRp8/u6X0R45E+rFKEprYmSZX0o5bzetj0cVjOIEbQZvDJCif1Uz0S3K0KKfqf/kYmdYWMJ7Dw==} + engines: {node: '>=18.0.0'} + peerDependencies: + '@typespec/compiler': ~0.52.0 + dependencies: + '@typespec/compiler': 0.52.0 + dev: false /@ungap/promise-all-settled/1.1.2: resolution: {integrity: sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==} @@ -1910,6 +2008,7 @@ packages: dependencies: pascal-case: 3.1.2 tslib: 2.6.2 + dev: true /camelcase/6.3.0: resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} @@ -1926,6 +2025,7 @@ packages: no-case: 3.0.4 tslib: 2.6.2 upper-case-first: 2.0.2 + dev: true /caseless/0.12.0: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} @@ -1984,6 +2084,11 @@ packages: sentence-case: 3.0.4 snake-case: 3.0.4 tslib: 2.6.2 + dev: true + + /change-case/5.3.0: + resolution: {integrity: sha512-Eykca0fGS/xYlx2fG5NqnGSnsWauhSGiSXYhB1kO6E909GUfo8S54u4UZNS7lMJmgZumZ2SUpWaoLgAcfQRICg==} + dev: false /check-error/1.0.2: resolution: {integrity: sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==} @@ -2149,6 +2254,7 @@ packages: no-case: 3.0.4 tslib: 2.6.2 upper-case: 2.0.2 + dev: true /content-disposition/0.5.4: resolution: {integrity: sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==} @@ -2421,6 +2527,7 @@ packages: engines: {node: '>=8'} dependencies: path-type: 4.0.0 + dev: true /directory-tree/2.4.0: resolution: {integrity: sha512-AM03Th+ypDAHefyB6SP3uezaWkTbol1P43CS5yFU7wePTuHnR4YoHgY6KbGHLr/a065ocN26l9lXOoFBzzM31w==} @@ -2449,6 +2556,7 @@ packages: dependencies: no-case: 3.0.4 tslib: 2.6.2 + dev: true /dotenv/16.3.1: resolution: {integrity: sha512-IPzF4w4/Rd94bA9imS68tZBaYyBWSCE47V1RGuMrB94iyTOIEwRmVL2x/4An+6mETpLrKJ5hQkB8W4kFAadeIQ==} @@ -2887,8 +2995,8 @@ packages: resolution: {integrity: sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==} dev: true - /fast-glob/3.3.1: - resolution: {integrity: sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==} + /fast-glob/3.3.2: + resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==} engines: {node: '>=8.6.0'} dependencies: '@nodelib/fs.stat': 2.0.5 @@ -3268,7 +3376,7 @@ packages: dependencies: array-union: 2.1.0 dir-glob: 3.0.1 - fast-glob: 3.3.1 + fast-glob: 3.3.2 ignore: 5.2.4 merge2: 1.4.1 slash: 3.0.0 @@ -3279,10 +3387,23 @@ packages: engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} dependencies: dir-glob: 3.0.1 - fast-glob: 3.3.1 + fast-glob: 3.3.2 ignore: 5.2.4 merge2: 1.4.1 slash: 4.0.0 + dev: true + + /globby/14.0.0: + resolution: {integrity: sha512-/1WM/LNHRAOH9lZta77uGbq0dAEQM+XjNesWwhlERDVenqothRbnzTrL3/LrIoEPPjeUHC3vrS6TwoyxeHs7MQ==} + engines: {node: '>=18'} + dependencies: + '@sindresorhus/merge-streams': 1.0.0 + fast-glob: 3.3.2 + ignore: 5.2.4 + path-type: 5.0.0 + slash: 5.1.0 + unicorn-magic: 0.1.0 + dev: false /gopd/1.0.1: resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} @@ -3382,6 +3503,7 @@ packages: dependencies: capital-case: 1.0.4 tslib: 2.6.2 + dev: true /hosted-git-info/2.8.9: resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==} @@ -4136,6 +4258,7 @@ packages: resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} dependencies: tslib: 2.6.2 + dev: true /lru-cache/10.0.1: resolution: {integrity: sha512-IJ4uwUTi2qCccrioU6g9g/5rvvVl13bsdczUUcqbciD9iLr095yj8DQKdObriEvuNSx325N1rV1O0sJFszx75g==} @@ -4435,6 +4558,7 @@ packages: dependencies: lower-case: 2.0.2 tslib: 2.6.2 + dev: true /node-cmd/3.0.0: resolution: {integrity: sha512-SBvtm39iEkhEEDbUowR0O2YVaqpbD2nRvQ3fxXP/Tn1FgRpZAaUb8yKeEtFulBIv+xTHDodOKkj4EXIBANj+AQ==} @@ -4654,6 +4778,7 @@ packages: dependencies: dot-case: 3.0.4 tslib: 2.6.2 + dev: true /parent-module/1.0.1: resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} @@ -4674,7 +4799,7 @@ packages: resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} engines: {node: '>=8'} dependencies: - '@babel/code-frame': 7.22.13 + '@babel/code-frame': 7.23.5 error-ex: 1.3.2 json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 @@ -4690,6 +4815,7 @@ packages: dependencies: no-case: 3.0.4 tslib: 2.6.2 + dev: true /path-browserify/1.0.1: resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} @@ -4699,6 +4825,7 @@ packages: dependencies: dot-case: 3.0.4 tslib: 2.6.2 + dev: true /path-exists/4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} @@ -4752,6 +4879,12 @@ packages: /path-type/4.0.0: resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} engines: {node: '>=8'} + dev: true + + /path-type/5.0.0: + resolution: {integrity: sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==} + engines: {node: '>=12'} + dev: false /pathval/1.1.1: resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} @@ -5221,6 +5354,7 @@ packages: no-case: 3.0.4 tslib: 2.6.2 upper-case-first: 2.0.2 + dev: true /serialize-javascript/6.0.0: resolution: {integrity: sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==} @@ -5334,6 +5468,12 @@ packages: /slash/4.0.0: resolution: {integrity: sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==} engines: {node: '>=12'} + dev: true + + /slash/5.1.0: + resolution: {integrity: sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==} + engines: {node: '>=14.16'} + dev: false /smart-buffer/4.2.0: resolution: {integrity: sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==} @@ -5345,6 +5485,7 @@ packages: dependencies: dot-case: 3.0.4 tslib: 2.6.2 + dev: true /socket.io-adapter/2.5.2: resolution: {integrity: sha512-87C3LO/NOMc+eMcpcxUBebGjkpMDkNBS9tf7KJqcDsmL936EChtVva71Dw2q4tQcuVC+hAUy4an2NO/sYXmwRA==} @@ -5923,6 +6064,11 @@ packages: resolution: {integrity: sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==} dev: true + /unicorn-magic/0.1.0: + resolution: {integrity: sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==} + engines: {node: '>=18'} + dev: false + /universalify/0.1.2: resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} engines: {node: '>= 4.0.0'} @@ -5952,11 +6098,13 @@ packages: resolution: {integrity: sha512-514ppYHBaKwfJRK/pNC6c/OxfGa0obSnAl106u97Ed0I625Nin96KAjttZF6ZL3e1XLtphxnqrOi9iWgm+u+bg==} dependencies: tslib: 2.6.2 + dev: true /upper-case/2.0.2: resolution: {integrity: sha512-KgdgDGJt2TpuwBUIjgG6lzw2GWFRCW9Qkfkiv0DxqHHLYJHmtmdUIKcZd8rHgFSjopVTlw6ggzCm1b8MFQwikg==} dependencies: tslib: 2.6.2 + dev: true /uri-js/4.4.1: resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} @@ -6330,6 +6478,12 @@ packages: /yaml/2.3.2: resolution: {integrity: sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==} engines: {node: '>= 14'} + dev: true + + /yaml/2.3.4: + resolution: {integrity: sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==} + engines: {node: '>= 14'} + dev: false /yargs-parser/20.2.4: resolution: {integrity: sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==} diff --git a/packages/typespec-test/package.json b/packages/typespec-test/package.json index 0290449665..f8a1199b91 100644 --- a/packages/typespec-test/package.json +++ b/packages/typespec-test/package.json @@ -4,15 +4,15 @@ "type": "module", "dependencies": { "@azure-tools/typespec-ts": "workspace:^0.21.0", - "@typespec/openapi": ">=0.51.0 <1.0.0", - "@azure-tools/typespec-autorest": ">=0.37.2 <1.0.0", - "@typespec/openapi3": ">=0.51.1 <1.0.0", - "@azure-tools/typespec-azure-core": ">=0.37.2 <1.0.0", - "@azure-tools/typespec-client-generator-core": ">=0.37.0 <1.0.0", - "@typespec/compiler": ">=0.51.0 <1.0.0", - "@typespec/http": ">=0.51.0 <1.0.0", - "@typespec/rest": ">=0.51.0 <1.0.0", - "@typespec/versioning": ">=0.51.0 <1.0.0", + "@typespec/openapi": ">=0.52.0 <1.0.0", + "@azure-tools/typespec-autorest": ">=0.38.1 <1.0.0", + "@typespec/openapi3": ">=0.52.0 <1.0.0", + "@azure-tools/typespec-azure-core": ">=0.38.0 <1.0.0", + "@azure-tools/typespec-client-generator-core": ">=0.38.0 <1.0.0", + "@typespec/compiler": ">=0.52.0 <1.0.0", + "@typespec/http": ">=0.52.1 <1.0.0", + "@typespec/rest": ">=0.52.0 <1.0.0", + "@typespec/versioning": ">=0.52.0 <1.0.0", "prettier": "^3.1.0" }, "devDependencies": { diff --git a/packages/typespec-ts/package.json b/packages/typespec-ts/package.json index 120047d1f5..c02c38d27b 100644 --- a/packages/typespec-ts/package.json +++ b/packages/typespec-ts/package.json @@ -27,7 +27,7 @@ "copy:typespec": "rm -rf temp && mkdirp -p temp && cp -r ./node_modules/@azure-tools/cadl-ranch-specs/* ./temp && cp -r ./test/integration/typespec/* ./temp/http", "generate-and-run:rlc": "npm run generate-tsp-only:rlc && npm run integration-test:alone:rlc && npm run stop-test-server", "generate-and-run:modular": "npm run generate-tsp-only:modular && npm run integration-test:alone:modular && npm run stop-test-server", - "generate-tsp-only": "npm run generate-tsp-only:rlc && npm run generate-tsp-only:modular && npm run generate-tsp-only:non-branded-rlc && npm run generate-tsp-only:non-branded-modular", + "generate-tsp-only": "npm run generate-tsp-only:rlc && npm run generate-tsp-only:modular && npm run generate-tsp-only:non-branded-rlc && npm run generate-tsp-only:non-branded-modular", "generate-tsp-only:rlc": "node --loader ts-node/esm ./test/commands/gen-cadl-ranch.ts --tag=rlc", "generate-tsp-only:modular": "node --loader ts-node/esm ./test/commands/gen-cadl-ranch.ts --tag=modular", "generate-tsp-only:non-branded": "npm run generate-tsp-only:non-branded-rlc && npm run generate-tsp-only:non-branded-modular", @@ -77,18 +77,18 @@ "@typespec/ts-http-runtime": "1.0.0-alpha.20231129.4", "@azure-tools/typespec-azure-core": "0.37.2", "@azure-tools/typespec-client-generator-core": "0.37.0", - "@typespec/compiler": "0.51.0", - "@typespec/http": "0.51.0", - "@typespec/rest": "0.51.0", - "@typespec/versioning": "0.51.0" + "@typespec/compiler": "0.52.0", + "@typespec/http": "0.52.1", + "@typespec/rest": "0.52.0", + "@typespec/versioning": "0.52.0" }, "peerDependencies": { - "@azure-tools/typespec-azure-core": ">=0.37.2 <1.0.0", - "@azure-tools/typespec-client-generator-core": ">=0.37.0 <1.0.0", - "@typespec/compiler": ">=0.51.0 <1.0.0", - "@typespec/http": ">=0.51.0 <1.0.0", - "@typespec/rest": ">=0.51.0 <1.0.0", - "@typespec/versioning": ">=0.51.0 <1.0.0" + "@azure-tools/typespec-azure-core": ">=0.38.0 <1.0.0", + "@azure-tools/typespec-client-generator-core": ">=0.38.0 <1.0.0", + "@typespec/compiler": ">=0.52.0 <1.0.0", + "@typespec/http": ">=0.52.1 <1.0.0", + "@typespec/rest": ">=0.52.0 <1.0.0", + "@typespec/versioning": ">=0.52.0 <1.0.0" }, "dependencies": { "prettier": "^3.1.0", From 90d6b4f7c3f2c44fef802682e71c1f514f50917b Mon Sep 17 00:00:00 2001 From: Mary Gao Date: Tue, 30 Jan 2024 14:08:13 +0800 Subject: [PATCH 2/6] Update lock file --- common/config/rush/pnpm-lock.yaml | 112 ++++++++++++++---------------- 1 file changed, 51 insertions(+), 61 deletions(-) diff --git a/common/config/rush/pnpm-lock.yaml b/common/config/rush/pnpm-lock.yaml index 2fde24ee07..2da6efe461 100644 --- a/common/config/rush/pnpm-lock.yaml +++ b/common/config/rush/pnpm-lock.yaml @@ -198,7 +198,7 @@ importers: '@types/mocha': ^5.2.7 '@types/node': ^18.0.0 '@typespec/compiler': '>=0.52.0 <1.0.0' - '@typespec/http': '>=0.52.0 <1.0.0' + '@typespec/http': '>=0.52.1 <1.0.0' '@typespec/openapi': '>=0.52.0 <1.0.0' '@typespec/openapi3': '>=0.52.0 <1.0.0' '@typespec/rest': '>=0.52.0 <1.0.0' @@ -245,11 +245,11 @@ importers: '@types/node': ^18.0.0 '@typescript-eslint/eslint-plugin': ^6.8.0 '@typescript-eslint/parser': ^6.8.0 - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0 - '@typespec/rest': 0.51.0 + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1 + '@typespec/rest': 0.52.0 '@typespec/ts-http-runtime': 1.0.0-alpha.20231129.4 - '@typespec/versioning': 0.51.0 + '@typespec/versioning': 0.52.0 chai: ^4.3.6 chalk: ^4.0.0 cross-env: ^7.0.3 @@ -272,11 +272,11 @@ importers: tslib: 2.6.2 devDependencies: '@azure-rest/core-client': 1.1.6 - '@azure-tools/cadl-ranch': 0.11.2_jomoca4gmv4qvfhpxu6msmnyuy - '@azure-tools/cadl-ranch-expect': 0.11.0_ejqqrhxs4enfjwgbuhtcygsti4 - '@azure-tools/cadl-ranch-specs': 0.28.7_ktlnye6tq344n3leprf7kev6zm - '@azure-tools/typespec-azure-core': 0.37.2_go2aawq7a7w4termvhoebskuti - '@azure-tools/typespec-client-generator-core': 0.37.0_ejqqrhxs4enfjwgbuhtcygsti4 + '@azure-tools/cadl-ranch': 0.11.2_z4ytlbo62miz7wplut4y2tqdsa + '@azure-tools/cadl-ranch-expect': 0.11.0_hcpcasyazuhu5r37x6lfh5gruy + '@azure-tools/cadl-ranch-specs': 0.28.7_twmjmv6p2zvexrbfeqaqdpjhtm + '@azure-tools/typespec-azure-core': 0.37.2_4mursonewu5hn2yhcp44ukhhky + '@azure-tools/typespec-client-generator-core': 0.37.0_hcpcasyazuhu5r37x6lfh5gruy '@azure/core-auth': 1.5.0 '@azure/core-lro': 2.5.4 '@azure/core-paging': 1.5.0 @@ -289,11 +289,11 @@ importers: '@types/node': 18.18.0 '@typescript-eslint/eslint-plugin': 6.8.0_qc27boxdfajyxyoyktucppwpla '@typescript-eslint/parser': 6.8.0_jk7qbkaijtltyu4ajmze3dfiwa - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 - '@typespec/rest': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana '@typespec/ts-http-runtime': 1.0.0-alpha.20231129.4 - '@typespec/versioning': 0.51.0_@typespec+compiler@0.51.0 + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 chai: 4.3.8 chalk: 4.1.2 cross-env: 7.0.3 @@ -384,7 +384,22 @@ packages: - supports-color dev: true - /@azure-tools/cadl-ranch-expect/0.11.0_ejqqrhxs4enfjwgbuhtcygsti4: + /@azure-tools/cadl-ranch-expect/0.11.0_hcpcasyazuhu5r37x6lfh5gruy: + resolution: {integrity: sha512-0iTdWr8X+Su+3HIfg1glIzlMzXdsIHKIAAZ8ks480wCVwkcCJQtFn4AtSYXLY2FWf0b6jzb+51BvDpAi0SSwqQ==} + engines: {node: '>=16.0.0'} + peerDependencies: + '@typespec/compiler': ~0.51.0 + '@typespec/http': ~0.51.0 + '@typespec/rest': ~0.51.0 + '@typespec/versioning': ~0.51.0 + dependencies: + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 + dev: true + + /@azure-tools/cadl-ranch-expect/0.11.0_yiatrwz2wazvocak7hjejrlhfa: resolution: {integrity: sha512-0iTdWr8X+Su+3HIfg1glIzlMzXdsIHKIAAZ8ks480wCVwkcCJQtFn4AtSYXLY2FWf0b6jzb+51BvDpAi0SSwqQ==} engines: {node: '>=16.0.0'} peerDependencies: @@ -396,10 +411,10 @@ packages: '@typespec/compiler': 0.51.0 '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 '@typespec/rest': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 - '@typespec/versioning': 0.51.0_@typespec+compiler@0.51.0 + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 dev: true - /@azure-tools/cadl-ranch-specs/0.28.7_ktlnye6tq344n3leprf7kev6zm: + /@azure-tools/cadl-ranch-specs/0.28.7_twmjmv6p2zvexrbfeqaqdpjhtm: resolution: {integrity: sha512-UDYR64oL0QZQ4FxKI7SEKPoLhewlYZRUrOD0fTnfUCTPuaQ/P5h+F5NZfEBHer+2WRhuvY5Xc7Glba33NPyTWg==} engines: {node: '>=16.0.0'} peerDependencies: @@ -410,28 +425,28 @@ packages: '@typespec/rest': ~0.51.0 '@typespec/versioning': ~0.51.0 dependencies: - '@azure-tools/cadl-ranch': 0.11.2_jomoca4gmv4qvfhpxu6msmnyuy + '@azure-tools/cadl-ranch': 0.11.2_z4ytlbo62miz7wplut4y2tqdsa '@azure-tools/cadl-ranch-api': 0.4.3 - '@azure-tools/cadl-ranch-expect': 0.11.0_ejqqrhxs4enfjwgbuhtcygsti4 - '@azure-tools/typespec-azure-core': 0.37.2_go2aawq7a7w4termvhoebskuti - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 - '@typespec/rest': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 - '@typespec/versioning': 0.51.0_@typespec+compiler@0.51.0 + '@azure-tools/cadl-ranch-expect': 0.11.0_hcpcasyazuhu5r37x6lfh5gruy + '@azure-tools/typespec-azure-core': 0.37.2_4mursonewu5hn2yhcp44ukhhky + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 transitivePeerDependencies: - '@types/express' - encoding - supports-color dev: true - /@azure-tools/cadl-ranch/0.11.2_jomoca4gmv4qvfhpxu6msmnyuy: + /@azure-tools/cadl-ranch/0.11.2_z4ytlbo62miz7wplut4y2tqdsa: resolution: {integrity: sha512-7y8k2fvlo3+UolO93u7itmfkRzLW4ToknJaNA3nK2X7Nvkm80efFvdchgfl/wAdr+rt7Ms5o5DvDzlKj2vOu8g==} engines: {node: '>=16.0.0'} hasBin: true dependencies: '@azure-tools/cadl-ranch-api': 0.4.3 '@azure-tools/cadl-ranch-coverage-sdk': 0.6.1 - '@azure-tools/cadl-ranch-expect': 0.11.0_ejqqrhxs4enfjwgbuhtcygsti4 + '@azure-tools/cadl-ranch-expect': 0.11.0_yiatrwz2wazvocak7hjejrlhfa '@azure/identity': 3.3.0 '@types/js-yaml': 4.0.6 '@typespec/compiler': 0.51.0 @@ -505,7 +520,7 @@ packages: '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 dev: false - /@azure-tools/typespec-azure-core/0.37.2_go2aawq7a7w4termvhoebskuti: + /@azure-tools/typespec-azure-core/0.37.2_4mursonewu5hn2yhcp44ukhhky: resolution: {integrity: sha512-/503w3jnRnStowsI7etaDynwQcz7ecNqhFKZErBYGbKVZKoEwJIr5d59m52sjJs8cmr2336es6jw2n2TdfotrA==} engines: {node: '>=18.0.0'} peerDependencies: @@ -513,9 +528,9 @@ packages: '@typespec/http': ~0.51.0 '@typespec/rest': ~0.51.0 dependencies: - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 - '@typespec/rest': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana dev: true /@azure-tools/typespec-azure-core/0.38.0_4mursonewu5hn2yhcp44ukhhky: @@ -531,7 +546,7 @@ packages: '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana dev: false - /@azure-tools/typespec-client-generator-core/0.37.0_ejqqrhxs4enfjwgbuhtcygsti4: + /@azure-tools/typespec-client-generator-core/0.37.0_hcpcasyazuhu5r37x6lfh5gruy: resolution: {integrity: sha512-wOQMN4gL5LrDsGJw1QkIHHT8SxurdX/E1T8I7enNo9UnPnpnNqTmJ9fusYjJhWnA6/qi51mRD7VX2Ymxh9WN6g==} engines: {node: '>=18.0.0'} peerDependencies: @@ -540,10 +555,10 @@ packages: '@typespec/rest': ~0.51.0 '@typespec/versioning': ~0.51.0 dependencies: - '@typespec/compiler': 0.51.0 - '@typespec/http': 0.51.0_@typespec+compiler@0.51.0 - '@typespec/rest': 0.51.0_xnewuom45z4pw3jt3dzaukcfn4 - '@typespec/versioning': 0.51.0_@typespec+compiler@0.51.0 + '@typespec/compiler': 0.52.0 + '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 + '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana + '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 change-case: 4.1.2 pluralize: 8.0.0 dev: true @@ -997,7 +1012,6 @@ packages: /@sindresorhus/merge-streams/1.0.0: resolution: {integrity: sha512-rUV5WyJrJLoloD4NDN1V1+LDMDWOa4OTsT4yYJwQNpTU6FWxkxHpL7eu4w+DmiH8x/EAM1otkPE1+LaspIbplw==} engines: {node: '>=18'} - dev: false /@sinonjs/commons/1.8.6: resolution: {integrity: sha512-Ky+XkAkqPZSm3NLBeUng77EBQl3cmeJhITaGHdYH8kjVB+aun3S4XBRti2zt17mtt0mIUDiNxYeoJm6drVvBJQ==} @@ -1356,7 +1370,7 @@ packages: semver: 7.5.4 vscode-languageserver: 9.0.1 vscode-languageserver-textdocument: 1.0.8 - yaml: 2.3.2 + yaml: 2.3.4 yargs: 17.7.2 dev: true @@ -1378,7 +1392,6 @@ packages: vscode-languageserver-textdocument: 1.0.8 yaml: 2.3.4 yargs: 17.7.2 - dev: false /@typespec/http/0.51.0_@typespec+compiler@0.51.0: resolution: {integrity: sha512-9YtcIPzUP0ELf/ZFBfhgLCPIyERn+DrYJTtEtaWkcO+qEkdFxO5eahwgh1FPuS6iJrW6pUPBuAfGDOAH1+N/PQ==} @@ -1396,7 +1409,6 @@ packages: '@typespec/compiler': ~0.52.0 dependencies: '@typespec/compiler': 0.52.0 - dev: false /@typespec/openapi/0.52.0_ul257hu73e5sf6e67hlhqupana: resolution: {integrity: sha512-2Otnu9glehxvp6TU7NOHEniBDDKufV03XTmeVGgGEmu/j+cveAMg8lA1/O0RBpS2oHGsCFnMEuPcR8M1c0LI+Q==} @@ -1445,7 +1457,6 @@ packages: dependencies: '@typespec/compiler': 0.52.0 '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 - dev: false /@typespec/ts-http-runtime/1.0.0-alpha.20231129.4: resolution: {integrity: sha512-H2bI/Pxc31/y0p9bMzKoplA/BuoxpHrT71NJkdPCRugvyTlYFPbrdxWOBqrFr175HKjy4pzaiBn4Dg5+tXpfhg==} @@ -1458,15 +1469,6 @@ packages: - supports-color dev: true - /@typespec/versioning/0.51.0_@typespec+compiler@0.51.0: - resolution: {integrity: sha512-eja0epBhtmJRO+Jq0Zdb2eRcSTsU+uq/X0xgD5SM+KB97nxFtaRkOJYd59QBN+XysvkcfVRrLOGJjzcpNMa0cw==} - engines: {node: '>=18.0.0'} - peerDependencies: - '@typespec/compiler': ~0.51.0 - dependencies: - '@typespec/compiler': 0.51.0 - dev: true - /@typespec/versioning/0.52.0_@typespec+compiler@0.52.0: resolution: {integrity: sha512-Vr4WHaZiDOxJqRp8/u6X0R45E+rFKEprYmSZX0o5bzetj0cVjOIEbQZvDJCif1Uz0S3K0KKfqf/kYmdYWMJ7Dw==} engines: {node: '>=18.0.0'} @@ -1474,7 +1476,6 @@ packages: '@typespec/compiler': ~0.52.0 dependencies: '@typespec/compiler': 0.52.0 - dev: false /@ungap/promise-all-settled/1.1.2: resolution: {integrity: sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==} @@ -2088,7 +2089,6 @@ packages: /change-case/5.3.0: resolution: {integrity: sha512-Eykca0fGS/xYlx2fG5NqnGSnsWauhSGiSXYhB1kO6E909GUfo8S54u4UZNS7lMJmgZumZ2SUpWaoLgAcfQRICg==} - dev: false /check-error/1.0.2: resolution: {integrity: sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==} @@ -3403,7 +3403,6 @@ packages: path-type: 5.0.0 slash: 5.1.0 unicorn-magic: 0.1.0 - dev: false /gopd/1.0.1: resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} @@ -4884,7 +4883,6 @@ packages: /path-type/5.0.0: resolution: {integrity: sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==} engines: {node: '>=12'} - dev: false /pathval/1.1.1: resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} @@ -5473,7 +5471,6 @@ packages: /slash/5.1.0: resolution: {integrity: sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==} engines: {node: '>=14.16'} - dev: false /smart-buffer/4.2.0: resolution: {integrity: sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==} @@ -6067,7 +6064,6 @@ packages: /unicorn-magic/0.1.0: resolution: {integrity: sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==} engines: {node: '>=18'} - dev: false /universalify/0.1.2: resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} @@ -6475,15 +6471,9 @@ packages: /yallist/4.0.0: resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} - /yaml/2.3.2: - resolution: {integrity: sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==} - engines: {node: '>= 14'} - dev: true - /yaml/2.3.4: resolution: {integrity: sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==} engines: {node: '>= 14'} - dev: false /yargs-parser/20.2.4: resolution: {integrity: sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==} From 3ac5e1df910838ca1819b89f76b979f998c30856 Mon Sep 17 00:00:00 2001 From: Mary Gao Date: Tue, 30 Jan 2024 16:01:32 +0800 Subject: [PATCH 3/6] Upgrade the dev dependencies --- common/config/rush/pnpm-lock.yaml | 46 +++++-------------------------- packages/typespec-ts/package.json | 4 +-- 2 files changed, 9 insertions(+), 41 deletions(-) diff --git a/common/config/rush/pnpm-lock.yaml b/common/config/rush/pnpm-lock.yaml index 2da6efe461..95829290b2 100644 --- a/common/config/rush/pnpm-lock.yaml +++ b/common/config/rush/pnpm-lock.yaml @@ -231,8 +231,8 @@ importers: '@azure-tools/cadl-ranch-expect': ^0.11.0 '@azure-tools/cadl-ranch-specs': ^0.28.7 '@azure-tools/rlc-common': workspace:^0.21.0 - '@azure-tools/typespec-azure-core': 0.37.2 - '@azure-tools/typespec-client-generator-core': 0.37.0 + '@azure-tools/typespec-azure-core': 0.38.0 + '@azure-tools/typespec-client-generator-core': 0.38.0 '@azure/core-auth': ^1.3.2 '@azure/core-lro': ^2.5.4 '@azure/core-paging': ^1.5.0 @@ -274,9 +274,9 @@ importers: '@azure-rest/core-client': 1.1.6 '@azure-tools/cadl-ranch': 0.11.2_z4ytlbo62miz7wplut4y2tqdsa '@azure-tools/cadl-ranch-expect': 0.11.0_hcpcasyazuhu5r37x6lfh5gruy - '@azure-tools/cadl-ranch-specs': 0.28.7_twmjmv6p2zvexrbfeqaqdpjhtm - '@azure-tools/typespec-azure-core': 0.37.2_4mursonewu5hn2yhcp44ukhhky - '@azure-tools/typespec-client-generator-core': 0.37.0_hcpcasyazuhu5r37x6lfh5gruy + '@azure-tools/cadl-ranch-specs': 0.28.7_46ngotnwf4x33nef25igcsrjpa + '@azure-tools/typespec-azure-core': 0.38.0_4mursonewu5hn2yhcp44ukhhky + '@azure-tools/typespec-client-generator-core': 0.38.0_hcpcasyazuhu5r37x6lfh5gruy '@azure/core-auth': 1.5.0 '@azure/core-lro': 2.5.4 '@azure/core-paging': 1.5.0 @@ -414,7 +414,7 @@ packages: '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 dev: true - /@azure-tools/cadl-ranch-specs/0.28.7_twmjmv6p2zvexrbfeqaqdpjhtm: + /@azure-tools/cadl-ranch-specs/0.28.7_46ngotnwf4x33nef25igcsrjpa: resolution: {integrity: sha512-UDYR64oL0QZQ4FxKI7SEKPoLhewlYZRUrOD0fTnfUCTPuaQ/P5h+F5NZfEBHer+2WRhuvY5Xc7Glba33NPyTWg==} engines: {node: '>=16.0.0'} peerDependencies: @@ -428,7 +428,7 @@ packages: '@azure-tools/cadl-ranch': 0.11.2_z4ytlbo62miz7wplut4y2tqdsa '@azure-tools/cadl-ranch-api': 0.4.3 '@azure-tools/cadl-ranch-expect': 0.11.0_hcpcasyazuhu5r37x6lfh5gruy - '@azure-tools/typespec-azure-core': 0.37.2_4mursonewu5hn2yhcp44ukhhky + '@azure-tools/typespec-azure-core': 0.38.0_4mursonewu5hn2yhcp44ukhhky '@typespec/compiler': 0.52.0 '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana @@ -520,19 +520,6 @@ packages: '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 dev: false - /@azure-tools/typespec-azure-core/0.37.2_4mursonewu5hn2yhcp44ukhhky: - resolution: {integrity: sha512-/503w3jnRnStowsI7etaDynwQcz7ecNqhFKZErBYGbKVZKoEwJIr5d59m52sjJs8cmr2336es6jw2n2TdfotrA==} - engines: {node: '>=18.0.0'} - peerDependencies: - '@typespec/compiler': ~0.51.0 - '@typespec/http': ~0.51.0 - '@typespec/rest': ~0.51.0 - dependencies: - '@typespec/compiler': 0.52.0 - '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 - '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana - dev: true - /@azure-tools/typespec-azure-core/0.38.0_4mursonewu5hn2yhcp44ukhhky: resolution: {integrity: sha512-ASM+njC2lpzPykzw2OicWIaAOH+OBe3bVMrufEnINBjlr7owAtudvjrTLLWmAVMBciL/YOF579KdyjxTbaxJ5A==} engines: {node: '>=18.0.0'} @@ -544,24 +531,6 @@ packages: '@typespec/compiler': 0.52.0 '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana - dev: false - - /@azure-tools/typespec-client-generator-core/0.37.0_hcpcasyazuhu5r37x6lfh5gruy: - resolution: {integrity: sha512-wOQMN4gL5LrDsGJw1QkIHHT8SxurdX/E1T8I7enNo9UnPnpnNqTmJ9fusYjJhWnA6/qi51mRD7VX2Ymxh9WN6g==} - engines: {node: '>=18.0.0'} - peerDependencies: - '@typespec/compiler': ~0.51.0 - '@typespec/http': ~0.51.0 - '@typespec/rest': ~0.51.0 - '@typespec/versioning': ~0.51.0 - dependencies: - '@typespec/compiler': 0.52.0 - '@typespec/http': 0.52.1_@typespec+compiler@0.52.0 - '@typespec/rest': 0.52.0_ul257hu73e5sf6e67hlhqupana - '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 - change-case: 4.1.2 - pluralize: 8.0.0 - dev: true /@azure-tools/typespec-client-generator-core/0.38.0_hcpcasyazuhu5r37x6lfh5gruy: resolution: {integrity: sha512-DUDIHJikz3Ai8uPk3vKFoMkkGPUxoD5DbGdwkN/pQxaL6Aze8HV4LGEOGtvaIu0SsGjCX9G3XPAXoBoupYgXbw==} @@ -578,7 +547,6 @@ packages: '@typespec/versioning': 0.52.0_@typespec+compiler@0.52.0 change-case: 5.3.0 pluralize: 8.0.0 - dev: false /@azure/abort-controller/1.1.0: resolution: {integrity: sha512-TrRLIoSQVzfAJX9H1JeFjzAoDGcoK1IYX1UImfceTZpsyYfWr09Ss1aHW1y5TrrR3iq6RZLBwJ3E24uwPhwahw==} diff --git a/packages/typespec-ts/package.json b/packages/typespec-ts/package.json index c02c38d27b..c907295441 100644 --- a/packages/typespec-ts/package.json +++ b/packages/typespec-ts/package.json @@ -75,8 +75,8 @@ "@azure/core-util": "^1.4.0", "eslint-plugin-require-extensions": "0.1.3", "@typespec/ts-http-runtime": "1.0.0-alpha.20231129.4", - "@azure-tools/typespec-azure-core": "0.37.2", - "@azure-tools/typespec-client-generator-core": "0.37.0", + "@azure-tools/typespec-azure-core": "0.38.0", + "@azure-tools/typespec-client-generator-core": "0.38.0", "@typespec/compiler": "0.52.0", "@typespec/http": "0.52.1", "@typespec/rest": "0.52.0", From fd97be7f5eca5b0b6b2f849ba9e54821e311d3a2 Mon Sep 17 00:00:00 2001 From: Mary Gao Date: Tue, 30 Jan 2024 16:19:19 +0800 Subject: [PATCH 4/6] Update the batch changes --- .../generated/typespec-ts/src/BatchClient.ts | 60 +- .../typespec-ts/src/api/operations.ts | 82 +- .../generated/typespec-ts/src/index.ts | 294 +- .../generated/typespec-ts/src/models/index.ts | 294 +- .../typespec-ts/src/models/models.ts | 3890 ++++++++--------- 5 files changed, 2310 insertions(+), 2310 deletions(-) diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/BatchClient.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/BatchClient.ts index d7ab3aff0f..f5a11af40c 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/BatchClient.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/BatchClient.ts @@ -4,36 +4,6 @@ import { TokenCredential } from "@azure/core-auth"; import { Pipeline } from "@azure/core-rest-pipeline"; import { - BatchApplication, - PoolUsageMetrics, - BatchPoolCreateOptions, - BatchPool, - AutoScaleRun, - BatchPoolUpdateOptions, - BatchPoolEnableAutoScaleOptions, - BatchPoolEvaluateAutoScaleOptions, - BatchPoolResizeOptions, - BatchPoolReplaceOptions, - NodeRemoveOptions, - ImageInformation, - PoolNodeCounts, - BatchJob, - BatchJobUpdateOptions, - BatchJobDisableOptions, - BatchJobTerminateOptions, - BatchJobCreateOptions, - JobPreparationAndReleaseTaskExecutionInformation, - TaskCountsResult, - BatchCertificate, - BatchJobSchedule, - BatchJobScheduleUpdateOptions, - BatchJobScheduleCreateOptions, - BatchTaskCreateOptions, - BatchTask, - BatchTaskCollection, - TaskAddCollectionResult, - BatchTaskListSubtasksResult, - NodeFile, BatchNodeUserCreateOptions, BatchNodeUserUpdateOptions, BatchNode, @@ -44,6 +14,36 @@ import { UploadBatchServiceLogsOptions, UploadBatchServiceLogsResult, NodeVMExtension, + NodeFile, + BatchTaskCreateOptions, + BatchTask, + BatchTaskCollection, + TaskAddCollectionResult, + BatchTaskListSubtasksResult, + BatchJobSchedule, + BatchJobScheduleUpdateOptions, + BatchJobScheduleCreateOptions, + BatchCertificate, + BatchJob, + BatchJobUpdateOptions, + BatchJobDisableOptions, + BatchJobTerminateOptions, + BatchJobCreateOptions, + JobPreparationAndReleaseTaskExecutionInformation, + TaskCountsResult, + ImageInformation, + PoolNodeCounts, + PoolUsageMetrics, + BatchPoolCreateOptions, + BatchPool, + AutoScaleRun, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchPoolReplaceOptions, + NodeRemoveOptions, + BatchApplication, } from "./models/models.js"; import { ListApplicationsOptions, diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/operations.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/operations.ts index 1f0da51a43..825e30b731 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/operations.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/api/operations.ts @@ -2,47 +2,6 @@ // Licensed under the MIT license. import { - ApplicationListResult, - BatchApplication, - PoolListUsageMetricsResult, - PoolUsageMetrics, - BatchPoolCreateOptions, - BatchPoolListResult, - BatchPool, - AutoScaleRun, - BatchPoolUpdateOptions, - BatchPoolEnableAutoScaleOptions, - BatchPoolEvaluateAutoScaleOptions, - BatchPoolResizeOptions, - BatchPoolReplaceOptions, - NodeRemoveOptions, - AccountListSupportedImagesResult, - ImageInformation, - PoolNodeCountsListResult, - PoolNodeCounts, - BatchJob, - BatchJobUpdateOptions, - BatchJobDisableOptions, - BatchJobTerminateOptions, - BatchJobCreateOptions, - BatchJobListResult, - BatchJobListPreparationAndReleaseTaskStatusResult, - JobPreparationAndReleaseTaskExecutionInformation, - TaskCountsResult, - BatchCertificate, - CertificateListResult, - BatchJobSchedule, - BatchJobScheduleUpdateOptions, - BatchJobScheduleCreateOptions, - BatchJobScheduleListResult, - BatchTaskCreateOptions, - BatchTaskListResult, - BatchTask, - BatchTaskCollection, - TaskAddCollectionResult, - BatchTaskListSubtasksResult, - NodeFileListResult, - NodeFile, BatchNodeUserCreateOptions, BatchNodeUserUpdateOptions, BatchNode, @@ -55,6 +14,47 @@ import { BatchNodeListResult, NodeVMExtension, NodeVMExtensionList, + NodeFileListResult, + NodeFile, + BatchTaskCreateOptions, + BatchTaskListResult, + BatchTask, + BatchTaskCollection, + TaskAddCollectionResult, + BatchTaskListSubtasksResult, + BatchJobSchedule, + BatchJobScheduleUpdateOptions, + BatchJobScheduleCreateOptions, + BatchJobScheduleListResult, + BatchCertificate, + CertificateListResult, + BatchJob, + BatchJobUpdateOptions, + BatchJobDisableOptions, + BatchJobTerminateOptions, + BatchJobCreateOptions, + BatchJobListResult, + BatchJobListPreparationAndReleaseTaskStatusResult, + JobPreparationAndReleaseTaskExecutionInformation, + TaskCountsResult, + AccountListSupportedImagesResult, + ImageInformation, + PoolNodeCountsListResult, + PoolNodeCounts, + PoolListUsageMetricsResult, + PoolUsageMetrics, + BatchPoolCreateOptions, + BatchPoolListResult, + BatchPool, + AutoScaleRun, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchPoolReplaceOptions, + NodeRemoveOptions, + ApplicationListResult, + BatchApplication, } from "../models/models.js"; import { PagedAsyncIterableIterator } from "../models/pagingTypes.js"; import { buildPagedAsyncIterator } from "./pagingHelpers.js"; diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/index.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/index.ts index b7f832d522..2116823f17 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/index.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/index.ts @@ -3,57 +3,136 @@ export { BatchClient, BatchClientOptions } from "./BatchClient.js"; export { - ApplicationListResult, - BatchApplication, + BatchNodeUserCreateOptions, BatchError, ErrorMessage, BatchErrorDetail, - PoolListUsageMetricsResult, - PoolUsageMetrics, - BatchPoolCreateOptions, + BatchNodeUserUpdateOptions, + BatchNode, + BatchNodeState, + SchedulingState, + TaskInformation, + TaskState, + TaskExecutionInformation, + TaskContainerExecutionInformation, + TaskFailureInformation, + ErrorCategory, + NameValuePair, + TaskExecutionResult, + StartTask, + TaskContainerSettings, + ContainerRegistry, + BatchNodeIdentityReference, + ContainerWorkingDirectory, + ResourceFile, + EnvironmentSetting, + UserIdentity, + AutoUserSpecification, + AutoUserScope, + ElevationLevel, + StartTaskInformation, + StartTaskState, + CertificateReference, + CertificateStoreLocation, + CertificateVisibility, + BatchNodeError, + BatchNodeEndpointConfiguration, + InboundEndpoint, + InboundEndpointProtocol, + NodeAgentInformation, + VirtualMachineInfo, + ImageReference, + NodeRebootOptions, + BatchNodeRebootOption, + NodeReimageOptions, + BatchNodeReimageOption, + NodeDisableSchedulingOptions, + DisableBatchNodeSchedulingOption, + BatchNodeRemoteLoginSettingsResult, + UploadBatchServiceLogsOptions, + UploadBatchServiceLogsResult, + BatchNodeListResult, + NodeVMExtension, + VMExtension, + VMExtensionInstanceView, + InstanceViewStatus, + StatusLevelTypes, + NodeVMExtensionList, + NodeFileListResult, + NodeFile, + FileProperties, + BatchTaskCreateOptions, + ExitConditions, + ExitCodeMapping, + ExitOptions, + JobAction, + DependencyAction, + ExitCodeRangeMapping, + OutputFile, + OutputFileDestination, + OutputFileBlobContainerDestination, + HttpHeader, + OutputFileUploadOptions, + OutputFileUploadCondition, + AffinityInformation, + TaskConstraints, + MultiInstanceSettings, + TaskDependencies, + TaskIdRange, + ApplicationPackageReference, + AuthenticationTokenSettings, + AccessScope, + BatchTaskListResult, + BatchTask, + BatchNodeInformation, + TaskStatistics, + BatchTaskCollection, + TaskAddCollectionResult, + TaskAddResult, + TaskAddStatus, + BatchTaskListSubtasksResult, + SubtaskInformation, + SubtaskState, + BatchJobSchedule, + JobScheduleState, + Schedule, + JobSpecification, + OnAllTasksComplete, + OnTaskFailure, + JobNetworkConfiguration, + JobConstraints, + JobManagerTask, + JobPreparationTask, + JobReleaseTask, + PoolInformation, + AutoPoolSpecification, + PoolLifetimeOption, + PoolSpecification, CloudServiceConfiguration, VirtualMachineConfiguration, - ImageReference, WindowsConfiguration, DataDisk, CachingType, StorageAccountType, ContainerConfiguration, ContainerType, - ContainerRegistry, - BatchNodeIdentityReference, DiskEncryptionConfiguration, DiskEncryptionTarget, NodePlacementConfiguration, NodePlacementPolicyType, - VMExtension, OSDisk, DiffDiskSettings, DiffDiskPlacement, + TaskSchedulingPolicy, + BatchNodeFillType, NetworkConfiguration, DynamicVNetAssignmentScope, PoolEndpointConfiguration, InboundNATPool, - InboundEndpointProtocol, NetworkSecurityGroupRule, NetworkSecurityGroupRuleAccess, PublicIpAddressConfiguration, IPAddressProvisioningType, - StartTask, - TaskContainerSettings, - ContainerWorkingDirectory, - ResourceFile, - EnvironmentSetting, - UserIdentity, - AutoUserSpecification, - AutoUserScope, - ElevationLevel, - CertificateReference, - CertificateStoreLocation, - CertificateVisibility, - ApplicationPackageReference, - TaskSchedulingPolicy, - BatchNodeFillType, UserAccount, LinuxUserConfiguration, WindowsUserConfiguration, @@ -65,59 +144,21 @@ export { CifsMountConfiguration, AzureFileShareConfiguration, NodeCommunicationMode, - BatchPoolListResult, - BatchPool, - PoolState, - AllocationState, - ResizeError, - NameValuePair, - AutoScaleRun, - AutoScaleRunError, - PoolStatistics, - UsageStatistics, - ResourceStatistics, - BatchPoolIdentity, - PoolIdentityType, - UserAssignedIdentity, - BatchPoolUpdateOptions, - BatchPoolEnableAutoScaleOptions, - BatchPoolEvaluateAutoScaleOptions, - BatchPoolResizeOptions, - BatchNodeDeallocationOption, - BatchPoolReplaceOptions, - NodeRemoveOptions, - AccountListSupportedImagesResult, - ImageInformation, - OSType, - VerificationType, - PoolNodeCountsListResult, - PoolNodeCounts, - NodeCounts, + JobScheduleExecutionInformation, + RecentJob, + JobScheduleStatistics, + BatchJobScheduleUpdateOptions, + BatchJobScheduleCreateOptions, + BatchJobScheduleListResult, + BatchCertificate, + CertificateState, + DeleteCertificateError, + CertificateFormat, + CertificateListResult, BatchJob, JobState, - JobConstraints, - JobManagerTask, - OutputFile, - OutputFileDestination, - OutputFileBlobContainerDestination, - HttpHeader, - OutputFileUploadOptions, - OutputFileUploadCondition, - TaskConstraints, - AuthenticationTokenSettings, - AccessScope, - JobPreparationTask, - JobReleaseTask, - PoolInformation, - AutoPoolSpecification, - PoolLifetimeOption, - PoolSpecification, - OnAllTasksComplete, - OnTaskFailure, - JobNetworkConfiguration, JobExecutionInformation, JobSchedulingError, - ErrorCategory, JobStatistics, BatchJobUpdateOptions, BatchJobDisableOptions, @@ -129,84 +170,43 @@ export { JobPreparationAndReleaseTaskExecutionInformation, JobPreparationTaskExecutionInformation, JobPreparationTaskState, - TaskContainerExecutionInformation, - TaskFailureInformation, - TaskExecutionResult, JobReleaseTaskExecutionInformation, JobReleaseTaskState, TaskCountsResult, TaskCounts, TaskSlotCounts, - BatchCertificate, - CertificateState, - DeleteCertificateError, - CertificateFormat, - CertificateListResult, - BatchJobSchedule, - JobScheduleState, - Schedule, - JobSpecification, - JobScheduleExecutionInformation, - RecentJob, - JobScheduleStatistics, - BatchJobScheduleUpdateOptions, - BatchJobScheduleCreateOptions, - BatchJobScheduleListResult, - BatchTaskCreateOptions, - ExitConditions, - ExitCodeMapping, - ExitOptions, - JobAction, - DependencyAction, - ExitCodeRangeMapping, - AffinityInformation, - MultiInstanceSettings, - TaskDependencies, - TaskIdRange, - BatchTaskListResult, - BatchTask, - TaskState, - TaskExecutionInformation, - BatchNodeInformation, - TaskStatistics, - BatchTaskCollection, - TaskAddCollectionResult, - TaskAddResult, - TaskAddStatus, - BatchTaskListSubtasksResult, - SubtaskInformation, - SubtaskState, - NodeFileListResult, - NodeFile, - FileProperties, - BatchNodeUserCreateOptions, - BatchNodeUserUpdateOptions, - BatchNode, - BatchNodeState, - SchedulingState, - TaskInformation, - StartTaskInformation, - StartTaskState, - BatchNodeError, - BatchNodeEndpointConfiguration, - InboundEndpoint, - NodeAgentInformation, - VirtualMachineInfo, - NodeRebootOptions, - BatchNodeRebootOption, - NodeReimageOptions, - BatchNodeReimageOption, - NodeDisableSchedulingOptions, - DisableBatchNodeSchedulingOption, - BatchNodeRemoteLoginSettingsResult, - UploadBatchServiceLogsOptions, - UploadBatchServiceLogsResult, - BatchNodeListResult, - NodeVMExtension, - VMExtensionInstanceView, - InstanceViewStatus, - StatusLevelTypes, - NodeVMExtensionList, + AccountListSupportedImagesResult, + ImageInformation, + OSType, + VerificationType, + PoolNodeCountsListResult, + PoolNodeCounts, + NodeCounts, + PoolListUsageMetricsResult, + PoolUsageMetrics, + BatchPoolCreateOptions, + BatchPoolListResult, + BatchPool, + PoolState, + AllocationState, + ResizeError, + AutoScaleRun, + AutoScaleRunError, + PoolStatistics, + UsageStatistics, + ResourceStatistics, + BatchPoolIdentity, + PoolIdentityType, + UserAssignedIdentity, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchNodeDeallocationOption, + BatchPoolReplaceOptions, + NodeRemoveOptions, + ApplicationListResult, + BatchApplication, ListApplicationsOptions, GetApplicationOptions, ListPoolUsageMetricsOptions, diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/index.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/index.ts index b9d5df8780..f4bbaf6ffa 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/index.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/index.ts @@ -2,57 +2,136 @@ // Licensed under the MIT license. export { - ApplicationListResult, - BatchApplication, + BatchNodeUserCreateOptions, BatchError, ErrorMessage, BatchErrorDetail, - PoolListUsageMetricsResult, - PoolUsageMetrics, - BatchPoolCreateOptions, + BatchNodeUserUpdateOptions, + BatchNode, + BatchNodeState, + SchedulingState, + TaskInformation, + TaskState, + TaskExecutionInformation, + TaskContainerExecutionInformation, + TaskFailureInformation, + ErrorCategory, + NameValuePair, + TaskExecutionResult, + StartTask, + TaskContainerSettings, + ContainerRegistry, + BatchNodeIdentityReference, + ContainerWorkingDirectory, + ResourceFile, + EnvironmentSetting, + UserIdentity, + AutoUserSpecification, + AutoUserScope, + ElevationLevel, + StartTaskInformation, + StartTaskState, + CertificateReference, + CertificateStoreLocation, + CertificateVisibility, + BatchNodeError, + BatchNodeEndpointConfiguration, + InboundEndpoint, + InboundEndpointProtocol, + NodeAgentInformation, + VirtualMachineInfo, + ImageReference, + NodeRebootOptions, + BatchNodeRebootOption, + NodeReimageOptions, + BatchNodeReimageOption, + NodeDisableSchedulingOptions, + DisableBatchNodeSchedulingOption, + BatchNodeRemoteLoginSettingsResult, + UploadBatchServiceLogsOptions, + UploadBatchServiceLogsResult, + BatchNodeListResult, + NodeVMExtension, + VMExtension, + VMExtensionInstanceView, + InstanceViewStatus, + StatusLevelTypes, + NodeVMExtensionList, + NodeFileListResult, + NodeFile, + FileProperties, + BatchTaskCreateOptions, + ExitConditions, + ExitCodeMapping, + ExitOptions, + JobAction, + DependencyAction, + ExitCodeRangeMapping, + OutputFile, + OutputFileDestination, + OutputFileBlobContainerDestination, + HttpHeader, + OutputFileUploadOptions, + OutputFileUploadCondition, + AffinityInformation, + TaskConstraints, + MultiInstanceSettings, + TaskDependencies, + TaskIdRange, + ApplicationPackageReference, + AuthenticationTokenSettings, + AccessScope, + BatchTaskListResult, + BatchTask, + BatchNodeInformation, + TaskStatistics, + BatchTaskCollection, + TaskAddCollectionResult, + TaskAddResult, + TaskAddStatus, + BatchTaskListSubtasksResult, + SubtaskInformation, + SubtaskState, + BatchJobSchedule, + JobScheduleState, + Schedule, + JobSpecification, + OnAllTasksComplete, + OnTaskFailure, + JobNetworkConfiguration, + JobConstraints, + JobManagerTask, + JobPreparationTask, + JobReleaseTask, + PoolInformation, + AutoPoolSpecification, + PoolLifetimeOption, + PoolSpecification, CloudServiceConfiguration, VirtualMachineConfiguration, - ImageReference, WindowsConfiguration, DataDisk, CachingType, StorageAccountType, ContainerConfiguration, ContainerType, - ContainerRegistry, - BatchNodeIdentityReference, DiskEncryptionConfiguration, DiskEncryptionTarget, NodePlacementConfiguration, NodePlacementPolicyType, - VMExtension, OSDisk, DiffDiskSettings, DiffDiskPlacement, + TaskSchedulingPolicy, + BatchNodeFillType, NetworkConfiguration, DynamicVNetAssignmentScope, PoolEndpointConfiguration, InboundNATPool, - InboundEndpointProtocol, NetworkSecurityGroupRule, NetworkSecurityGroupRuleAccess, PublicIpAddressConfiguration, IPAddressProvisioningType, - StartTask, - TaskContainerSettings, - ContainerWorkingDirectory, - ResourceFile, - EnvironmentSetting, - UserIdentity, - AutoUserSpecification, - AutoUserScope, - ElevationLevel, - CertificateReference, - CertificateStoreLocation, - CertificateVisibility, - ApplicationPackageReference, - TaskSchedulingPolicy, - BatchNodeFillType, UserAccount, LinuxUserConfiguration, WindowsUserConfiguration, @@ -64,59 +143,21 @@ export { CifsMountConfiguration, AzureFileShareConfiguration, NodeCommunicationMode, - BatchPoolListResult, - BatchPool, - PoolState, - AllocationState, - ResizeError, - NameValuePair, - AutoScaleRun, - AutoScaleRunError, - PoolStatistics, - UsageStatistics, - ResourceStatistics, - BatchPoolIdentity, - PoolIdentityType, - UserAssignedIdentity, - BatchPoolUpdateOptions, - BatchPoolEnableAutoScaleOptions, - BatchPoolEvaluateAutoScaleOptions, - BatchPoolResizeOptions, - BatchNodeDeallocationOption, - BatchPoolReplaceOptions, - NodeRemoveOptions, - AccountListSupportedImagesResult, - ImageInformation, - OSType, - VerificationType, - PoolNodeCountsListResult, - PoolNodeCounts, - NodeCounts, + JobScheduleExecutionInformation, + RecentJob, + JobScheduleStatistics, + BatchJobScheduleUpdateOptions, + BatchJobScheduleCreateOptions, + BatchJobScheduleListResult, + BatchCertificate, + CertificateState, + DeleteCertificateError, + CertificateFormat, + CertificateListResult, BatchJob, JobState, - JobConstraints, - JobManagerTask, - OutputFile, - OutputFileDestination, - OutputFileBlobContainerDestination, - HttpHeader, - OutputFileUploadOptions, - OutputFileUploadCondition, - TaskConstraints, - AuthenticationTokenSettings, - AccessScope, - JobPreparationTask, - JobReleaseTask, - PoolInformation, - AutoPoolSpecification, - PoolLifetimeOption, - PoolSpecification, - OnAllTasksComplete, - OnTaskFailure, - JobNetworkConfiguration, JobExecutionInformation, JobSchedulingError, - ErrorCategory, JobStatistics, BatchJobUpdateOptions, BatchJobDisableOptions, @@ -128,84 +169,43 @@ export { JobPreparationAndReleaseTaskExecutionInformation, JobPreparationTaskExecutionInformation, JobPreparationTaskState, - TaskContainerExecutionInformation, - TaskFailureInformation, - TaskExecutionResult, JobReleaseTaskExecutionInformation, JobReleaseTaskState, TaskCountsResult, TaskCounts, TaskSlotCounts, - BatchCertificate, - CertificateState, - DeleteCertificateError, - CertificateFormat, - CertificateListResult, - BatchJobSchedule, - JobScheduleState, - Schedule, - JobSpecification, - JobScheduleExecutionInformation, - RecentJob, - JobScheduleStatistics, - BatchJobScheduleUpdateOptions, - BatchJobScheduleCreateOptions, - BatchJobScheduleListResult, - BatchTaskCreateOptions, - ExitConditions, - ExitCodeMapping, - ExitOptions, - JobAction, - DependencyAction, - ExitCodeRangeMapping, - AffinityInformation, - MultiInstanceSettings, - TaskDependencies, - TaskIdRange, - BatchTaskListResult, - BatchTask, - TaskState, - TaskExecutionInformation, - BatchNodeInformation, - TaskStatistics, - BatchTaskCollection, - TaskAddCollectionResult, - TaskAddResult, - TaskAddStatus, - BatchTaskListSubtasksResult, - SubtaskInformation, - SubtaskState, - NodeFileListResult, - NodeFile, - FileProperties, - BatchNodeUserCreateOptions, - BatchNodeUserUpdateOptions, - BatchNode, - BatchNodeState, - SchedulingState, - TaskInformation, - StartTaskInformation, - StartTaskState, - BatchNodeError, - BatchNodeEndpointConfiguration, - InboundEndpoint, - NodeAgentInformation, - VirtualMachineInfo, - NodeRebootOptions, - BatchNodeRebootOption, - NodeReimageOptions, - BatchNodeReimageOption, - NodeDisableSchedulingOptions, - DisableBatchNodeSchedulingOption, - BatchNodeRemoteLoginSettingsResult, - UploadBatchServiceLogsOptions, - UploadBatchServiceLogsResult, - BatchNodeListResult, - NodeVMExtension, - VMExtensionInstanceView, - InstanceViewStatus, - StatusLevelTypes, - NodeVMExtensionList, + AccountListSupportedImagesResult, + ImageInformation, + OSType, + VerificationType, + PoolNodeCountsListResult, + PoolNodeCounts, + NodeCounts, + PoolListUsageMetricsResult, + PoolUsageMetrics, + BatchPoolCreateOptions, + BatchPoolListResult, + BatchPool, + PoolState, + AllocationState, + ResizeError, + AutoScaleRun, + AutoScaleRunError, + PoolStatistics, + UsageStatistics, + ResourceStatistics, + BatchPoolIdentity, + PoolIdentityType, + UserAssignedIdentity, + BatchPoolUpdateOptions, + BatchPoolEnableAutoScaleOptions, + BatchPoolEvaluateAutoScaleOptions, + BatchPoolResizeOptions, + BatchNodeDeallocationOption, + BatchPoolReplaceOptions, + NodeRemoveOptions, + ApplicationListResult, + BatchApplication, } from "./models.js"; export { ListApplicationsOptions, diff --git a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/models.ts b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/models.ts index 69a4c6c4bf..c74e6df322 100644 --- a/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/models.ts +++ b/packages/typespec-test/test/batch_modular/generated/typespec-ts/src/models/models.ts @@ -1,22 +1,18 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -/** The result of listing the applications available in an Account. */ -export interface ApplicationListResult { - /** The list of applications available in the Account. */ - value?: BatchApplication[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** Contains information about an application in an Azure Batch Account. */ -export interface BatchApplication { - /** A string that uniquely identifies the application within the Account. */ - id: string; - /** The display name for the application. */ - displayName: string; - /** The list of available versions of the application. */ - versions: string[]; +/** Options for creating a user account for RDP or SSH access on an Azure Batch Compute Node. */ +export interface BatchNodeUserCreateOptions { + /** The user name of the Account. */ + name: string; + /** Whether the Account should be an administrator on the Compute Node. The default value is false. */ + isAdmin?: boolean; + /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ + expiryTime?: Date; + /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. */ + password?: string; + /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + sshPublicKey?: string; } /** An error response received from the Azure Batch service. */ @@ -45,58 +41,52 @@ export interface BatchErrorDetail { value?: string; } -/** The result of a listing the usage metrics for an Account. */ -export interface PoolListUsageMetricsResult { - /** The Pool usage metrics data. */ - value?: PoolUsageMetrics[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; -} - -/** Usage metrics for a Pool across an aggregation interval. */ -export interface PoolUsageMetrics { - /** The ID of the Pool whose metrics are aggregated in this entry. */ - poolId: string; - /** The start time of the aggregation interval covered by this entry. */ - startTime: Date; - /** The end time of the aggregation interval covered by this entry. */ - endTime: Date; - /** The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ - vmSize: string; - /** The total core hours used in the Pool during this aggregation interval. */ - totalCoreHours: number; +/** Options for updating a user account for RDP or SSH access on an Azure Batch Compute Node. */ +export interface BatchNodeUserUpdateOptions { + /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */ + password?: string; + /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ + expiryTime?: Date; + /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. */ + sshPublicKey?: string; } -/** Options for creating an Azure Batch Pool. */ -export interface BatchPoolCreateOptions { - /** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). */ - id: string; - /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ - displayName?: string; - /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines for Cloud Services Pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and A2V2. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */ - vmSize: string; - /** The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ - cloudServiceConfiguration?: CloudServiceConfiguration; - /** The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. */ - virtualMachineConfiguration?: VirtualMachineConfiguration; - /** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - resizeTimeout?: string; - /** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ - targetDedicatedNodes?: number; - /** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ - targetLowPriorityNodes?: number; - /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */ - enableAutoScale?: boolean; - /** A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). */ - autoScaleFormula?: string; - /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - autoScaleEvaluationInterval?: string; - /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. */ - enableInterNodeCommunication?: boolean; - /** The network configuration for the Pool. */ - networkConfiguration?: NetworkConfiguration; - /** A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. */ +/** A Compute Node in the Batch service. */ +export interface BatchNode { + /** The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. */ + id?: string; + /** The URL of the Compute Node. */ + url?: string; + /** The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. */ + state?: BatchNodeState; + /** Whether the Compute Node is available for Task scheduling. */ + schedulingState?: SchedulingState; + /** The time at which the Compute Node entered its current state. */ + stateTransitionTime?: Date; + /** The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. */ + lastBootTime?: Date; + /** The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. */ + allocationTime?: Date; + /** The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. */ + ipAddress?: string; + /** An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ + affinityId?: string; + /** The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + vmSize?: string; + /** The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ + totalTasksRun?: number; + /** The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ + runningTasksCount?: number; + /** The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ + runningTaskSlotsCount?: number; + /** The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ + totalTasksSucceeded?: number; + /** A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. */ + recentTasks?: TaskInformation[]; + /** The Task specified to run on the Compute Node as it joins the Pool. */ startTask?: StartTask; + /** Runtime information about the execution of the StartTask on the Compute Node. */ + startTaskInfo?: StartTaskInformation; /** * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. @@ -104,311 +94,106 @@ export interface BatchPoolCreateOptions { * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ certificateReferences?: CertificateReference[]; - /** The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ - applicationPackageReferences?: ApplicationPackageReference[]; - /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. */ - applicationLicenses?: string[]; - /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ - taskSlotsPerNode?: number; - /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ - taskSchedulingPolicy?: TaskSchedulingPolicy; - /** The list of user Accounts to be created on each Compute Node in the Pool. */ - userAccounts?: UserAccount[]; - /** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ - metadata?: MetadataItem[]; - /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. */ - mountConfiguration?: MountConfiguration[]; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ - targetNodeCommunicationMode?: NodeCommunicationMode; -} - -/** - * The configuration for Compute Nodes in a Pool based on the Azure Cloud Services - * platform. - */ -export interface CloudServiceConfiguration { - /** - * Possible values are: - * 2 - OS Family 2, equivalent to Windows Server 2008 R2 - * SP1. - * 3 - OS Family 3, equivalent to Windows Server 2012. - * 4 - OS Family 4, - * equivalent to Windows Server 2012 R2. - * 5 - OS Family 5, equivalent to Windows - * Server 2016. - * 6 - OS Family 6, equivalent to Windows Server 2019. For more - * information, see Azure Guest OS Releases - * (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). - */ - osFamily: string; - /** The Azure Guest OS version to be installed on the virtual machines in the Pool. The default value is * which specifies the latest operating system version for the specified OS family. */ - osVersion?: string; + /** The list of errors that are currently being encountered by the Compute Node. */ + errors?: BatchNodeError[]; + /** Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. */ + isDedicated?: boolean; + /** The endpoint configuration for the Compute Node. */ + endpointConfiguration?: BatchNodeEndpointConfiguration; + /** Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. */ + nodeAgentInfo?: NodeAgentInformation; + /** Info about the current state of the virtual machine. */ + virtualMachineInfo?: VirtualMachineInfo; } -/** - * The configuration for Compute Nodes in a Pool based on the Azure Virtual - * Machines infrastructure. - */ -export interface VirtualMachineConfiguration { - /** A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. */ - imageReference: ImageReference; - /** The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. */ - nodeAgentSKUId: string; - /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ - windowsConfiguration?: WindowsConfiguration; - /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ - dataDisks?: DataDisk[]; - /** - * This only applies to Images that contain the Windows operating system, and - * should only be used when you hold valid on-premises licenses for the Compute - * Nodes which will be deployed. If omitted, no on-premises licensing discount is - * applied. Values are: - * - * Windows_Server - The on-premises license is for Windows - * Server. - * Windows_Client - The on-premises license is for Windows Client. - * - */ - licenseType?: string; - /** The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. */ - containerConfiguration?: ContainerConfiguration; - /** The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. */ - diskEncryptionConfiguration?: DiskEncryptionConfiguration; - /** The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. */ - nodePlacementConfiguration?: NodePlacementConfiguration; - /** The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. */ - extensions?: VMExtension[]; - /** Settings for the operating system disk of the Virtual Machine. */ - osDisk?: OSDisk; -} +/** BatchNodeState enums */ +/** "idle", "rebooting", "reimaging", "running", "unusable", "creating", "starting", "waitingforstarttask", "starttaskfailed", "unknown", "leavingpool", "offline", "preempted" */ +export type BatchNodeState = string; +/** SchedulingState enums */ +/** "enabled", "disabled" */ +export type SchedulingState = string; -/** - * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image - * Gallery Image. To get the list of all Azure Marketplace Image references - * verified by Azure Batch, see the 'List Supported Images' operation. - */ -export interface ImageReference { - /** The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. */ - publisher?: string; - /** The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. */ - offer?: string; - /** The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. */ - sku?: string; - /** The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. */ - version?: string; - /** The ARM resource identifier of the Shared Image Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Shared Image Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ - virtualMachineImageId?: string; - /** The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. */ - readonly exactVersion?: string; +/** Information about a Task running on a Compute Node. */ +export interface TaskInformation { + /** The URL of the Task. */ + taskUrl?: string; + /** The ID of the Job to which the Task belongs. */ + jobId?: string; + /** The ID of the Task. */ + taskId?: string; + /** The ID of the subtask if the Task is a multi-instance Task. */ + subtaskId?: number; + /** The current state of the Task. */ + taskState: TaskState; + /** Information about the execution of the Task. */ + executionInfo?: TaskExecutionInformation; } -/** Windows operating system settings to apply to the virtual machine. */ -export interface WindowsConfiguration { - /** Whether automatic updates are enabled on the virtual machine. If omitted, the default value is true. */ - enableAutomaticUpdates?: boolean; -} +/** TaskState enums */ +/** "active", "preparing", "running", "completed" */ +export type TaskState = string; -/** - * Settings which will be used by the data disks associated to Compute Nodes in - * the Pool. When using attached data disks, you need to mount and format the - * disks from within a VM to use them. - */ -export interface DataDisk { - /** The logical unit number. The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. The value must be between 0 and 63, inclusive. */ - lun: number; - /** The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. */ - caching?: CachingType; - /** The initial disk size in gigabytes. */ - diskSizeGB: number; - /** The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". */ - storageAccountType?: StorageAccountType; -} - -/** CachingType enums */ -/** "none", "readonly", "readwrite" */ -export type CachingType = string; -/** StorageAccountType enums */ -/** "standard_lrs", "premium_lrs" */ -export type StorageAccountType = string; - -/** The configuration for container-enabled Pools. */ -export interface ContainerConfiguration { - /** The container technology to be used. */ - type: ContainerType; - /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ - containerImageNames?: string[]; - /** Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. */ - containerRegistries?: ContainerRegistry[]; -} - -/** ContainerType enums */ -/** "dockerCompatible", "criCompatible" */ -export type ContainerType = string; - -/** A private container registry. */ -export interface ContainerRegistry { - /** The user name to log into the registry server. */ - username?: string; - /** The password to log into the registry server. */ - password?: string; - /** The registry URL. If omitted, the default is "docker.io". */ - registryServer?: string; - /** The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. */ - identityReference?: BatchNodeIdentityReference; -} - -/** - * The reference to a user assigned identity associated with the Batch pool which - * a compute node will use. - */ -export interface BatchNodeIdentityReference { - /** The ARM resource id of the user assigned identity. */ - resourceId?: string; -} - -/** - * The disk encryption configuration applied on compute nodes in the pool. Disk - * encryption configuration is not supported on Linux pool created with Shared - * Image Gallery Image. - */ -export interface DiskEncryptionConfiguration { - /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ - targets?: DiskEncryptionTarget[]; -} - -/** DiskEncryptionTarget enums */ -/** "osdisk", "temporarydisk" */ -export type DiskEncryptionTarget = string; - -/** - * For regional placement, nodes in the pool will be allocated in the same region. - * For zonal placement, nodes in the pool will be spread across different zones - * with best effort balancing. - */ -export interface NodePlacementConfiguration { - /** Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. */ - policy?: NodePlacementPolicyType; -} - -/** NodePlacementPolicyType enums */ -/** "regional", "zonal" */ -export type NodePlacementPolicyType = string; - -/** The configuration for virtual machine extensions. */ -export interface VMExtension { - /** The name of the virtual machine extension. */ - name: string; - /** The name of the extension handler publisher. */ - publisher: string; - /** The type of the extension. */ - type: string; - /** The version of script handler. */ - typeHandlerVersion?: string; - /** Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. */ - autoUpgradeMinorVersion?: boolean; - /** Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available. */ - enableAutomaticUpgrade?: boolean; - /** JSON formatted public settings for the extension. */ - settings?: Record; - /** The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. */ - protectedSettings?: Record; - /** The collection of extension names. Collection of extension names after which this extension needs to be provisioned. */ - provisionAfterExtensions?: string[]; -} - -/** Settings for the operating system disk of the compute node (VM). */ -export interface OSDisk { - /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ - ephemeralOSDiskSettings?: DiffDiskSettings; -} - -/** - * Specifies the ephemeral Disk Settings for the operating system disk used by the - * compute node (VM). - */ -export interface DiffDiskSettings { - /** Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */ - placement?: DiffDiskPlacement; -} - -/** AccessDiffDiskPlacementScope enums */ -/** "cachedisk" */ -export type DiffDiskPlacement = string; - -/** The network configuration for a Pool. */ -export interface NetworkConfiguration { - /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools created with cloudServiceConfiguration both ARM and classic virtual networks are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For Pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ - subnetId?: string; - /** The scope of dynamic vnet assignment. */ - dynamicVNetAssignmentScope?: DynamicVNetAssignmentScope; - /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ - endpointConfiguration?: PoolEndpointConfiguration; - /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ - publicIPAddressConfiguration?: PublicIpAddressConfiguration; - /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ - enableAcceleratedNetworking?: boolean; -} - -/** DynamicVNetAssignmentScope enums */ -/** "none", "job" */ -export type DynamicVNetAssignmentScope = string; - -/** The endpoint configuration for a Pool. */ -export interface PoolEndpointConfiguration { - /** A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. */ - inboundNATPools: InboundNATPool[]; +/** Information about the execution of a Task. */ +export interface TaskExecutionInformation { + /** The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state. */ + startTime?: Date; + /** The time at which the Task completed. This property is set only if the Task is in the Completed state. */ + endTime?: Date; + /** The exit code of the program specified on the Task command line. This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ + retryCount: number; + /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ + lastRetryTime?: Date; + /** The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons. */ + requeueCount: number; + /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ + lastRequeueTime?: Date; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; } -/** - * A inbound NAT Pool that can be used to address specific ports on Compute Nodes - * in a Batch Pool externally. - */ -export interface InboundNATPool { - /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ - name: string; - /** The protocol of the endpoint. */ - protocol: InboundEndpointProtocol; - /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ - backendPort: number; - /** The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ - frontendPortRangeStart: number; - /** The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ - frontendPortRangeEnd: number; - /** A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. */ - networkSecurityGroupRules?: NetworkSecurityGroupRule[]; +/** Contains information about the container which a Task is executing. */ +export interface TaskContainerExecutionInformation { + /** The ID of the container. */ + containerId?: string; + /** The state of the container. This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect". */ + state?: string; + /** Detailed error information about the container. This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect". */ + error?: string; } -/** InboundEndpointProtocol enums */ -/** "tcp", "udp" */ -export type InboundEndpointProtocol = string; - -/** A network security group rule to apply to an inbound endpoint. */ -export interface NetworkSecurityGroupRule { - /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ - priority: number; - /** The action that should be taken for a specified IP address, subnet range or tag. */ - access: NetworkSecurityGroupRuleAccess; - /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ - sourceAddressPrefix: string; - /** The source port ranges to match for the rule. Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'. */ - sourcePortRanges?: string[]; +/** Information about a Task failure. */ +export interface TaskFailureInformation { + /** The category of the Task error. */ + category: ErrorCategory; + /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Task error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional details related to the error. */ + details?: NameValuePair[]; } -/** NetworkSecurityGroupRuleAccess enums */ -/** "allow", "deny" */ -export type NetworkSecurityGroupRuleAccess = string; +/** ErrorCategory enums */ +/** "usererror", "servererror" */ +export type ErrorCategory = string; -/** The public IP Address configuration of the networking configuration of a Pool. */ -export interface PublicIpAddressConfiguration { - /** The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. */ - provision?: IPAddressProvisioningType; - /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ - ipAddressIds?: string[]; +/** Represents a name-value pair. */ +export interface NameValuePair { + /** The name in the name-value pair. */ + name?: string; + /** The value in the name-value pair. */ + value?: string; } -/** IPAddressProvisioningType enums */ -/** "batchmanaged", "usermanaged", "nopublicipaddresses" */ -export type IPAddressProvisioningType = string; +/** TaskExecutionResult enums */ +/** "success", "failure" */ +export type TaskExecutionResult = string; /** * Batch will retry Tasks when a recovery operation is triggered on a Node. @@ -454,13 +239,34 @@ export interface TaskContainerSettings { workingDirectory?: ContainerWorkingDirectory; } -/** ContainerWorkingDirectory enums */ -/** "taskWorkingDirectory", "containerImageDefault" */ -export type ContainerWorkingDirectory = string; +/** A private container registry. */ +export interface ContainerRegistry { + /** The user name to log into the registry server. */ + username?: string; + /** The password to log into the registry server. */ + password?: string; + /** The registry URL. If omitted, the default is "docker.io". */ + registryServer?: string; + /** The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. */ + identityReference?: BatchNodeIdentityReference; +} -/** A single file or multiple files to be downloaded to a Compute Node. */ -export interface ResourceFile { - /** The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. */ +/** + * The reference to a user assigned identity associated with the Batch pool which + * a compute node will use. + */ +export interface BatchNodeIdentityReference { + /** The ARM resource id of the user assigned identity. */ + resourceId?: string; +} + +/** ContainerWorkingDirectory enums */ +/** "taskWorkingDirectory", "containerImageDefault" */ +export type ContainerWorkingDirectory = string; + +/** A single file or multiple files to be downloaded to a Compute Node. */ +export interface ResourceFile { + /** The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. */ autoStorageContainerName?: string; /** The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. */ storageContainerUrl?: string; @@ -507,6 +313,32 @@ export type AutoUserScope = string; /** "nonadmin", "admin" */ export type ElevationLevel = string; +/** Information about a StartTask running on a Compute Node. */ +export interface StartTaskInformation { + /** The state of the StartTask on the Compute Node. */ + state: StartTaskState; + /** The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). */ + startTime: Date; + /** The time at which the StartTask stopped running. This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running. */ + endTime?: Date; + /** The exit code of the program specified on the StartTask command line. This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ + retryCount: number; + /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ + lastRetryTime?: Date; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; +} + +/** StartTaskState enums */ +/** "running", "completed" */ +export type StartTaskState = string; + /** A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ export interface CertificateReference { /** The thumbprint of the Certificate. */ @@ -528,730 +360,831 @@ export type CertificateStoreLocation = string; /** "starttask", "task", "remoteuser" */ export type CertificateVisibility = string; -/** A reference to an Package to be deployed to Compute Nodes. */ -export interface ApplicationPackageReference { - /** The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). */ - applicationId: string; - /** The version of the application to deploy. If omitted, the default version is deployed. If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error. */ - version?: string; +/** An error encountered by a Compute Node. */ +export interface BatchNodeError { + /** An identifier for the Compute Node error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Compute Node error, intended to be suitable for display in a user interface. */ + message?: string; + /** The list of additional error details related to the Compute Node error. */ + errorDetails?: NameValuePair[]; } -/** Specifies how Tasks should be distributed across Compute Nodes. */ -export interface TaskSchedulingPolicy { - /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ - nodeFillType: BatchNodeFillType; +/** The endpoint configuration for the Compute Node. */ +export interface BatchNodeEndpointConfiguration { + /** The list of inbound endpoints that are accessible on the Compute Node. */ + inboundEndpoints: InboundEndpoint[]; } -/** BatchNodeFillType enums */ -/** "spread", "pack" */ -export type BatchNodeFillType = string; - -/** - * Properties used to create a user used to execute Tasks on an Azure Batch - * Compute Node. - */ -export interface UserAccount { - /** The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. */ +/** An inbound endpoint on a Compute Node. */ +export interface InboundEndpoint { + /** The name of the endpoint. */ name: string; - /** The password for the user Account. */ - password: string; - /** The elevation level of the user Account. The default value is nonAdmin. */ - elevationLevel?: ElevationLevel; - /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ - linuxUserConfiguration?: LinuxUserConfiguration; - /** The Windows-specific user configuration for the user Account. This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options. */ - windowsUserConfiguration?: WindowsUserConfiguration; + /** The protocol of the endpoint. */ + protocol: InboundEndpointProtocol; + /** The public IP address of the Compute Node. */ + publicIPAddress?: string; + /** The public fully qualified domain name for the Compute Node. */ + publicFQDN?: string; + /** The public port number of the endpoint. */ + frontendPort: number; + /** The backend port number of the endpoint. */ + backendPort: number; } -/** Properties used to create a user Account on a Linux Compute Node. */ -export interface LinuxUserConfiguration { - /** The user ID of the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid. */ - uid?: number; - /** The group ID for the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid. */ - gid?: number; - /** The SSH private key for the user Account. The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done). */ - sshPrivateKey?: string; -} +/** InboundEndpointProtocol enums */ +/** "tcp", "udp" */ +export type InboundEndpointProtocol = string; -/** Properties used to create a user Account on a Windows Compute Node. */ -export interface WindowsUserConfiguration { - /** The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch' and for CloudServiceConfiguration Pools is 'interactive'. */ - loginMode?: LoginMode; +/** + * The Batch Compute Node agent is a program that runs on each Compute Node in the + * Pool and provides Batch capability on the Compute Node. + */ +export interface NodeAgentInformation { + /** The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. */ + version: string; + /** The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version. */ + lastUpdateTime: Date; } -/** LoginMode enums */ -/** "batch", "interactive" */ -export type LoginMode = string; +/** Info about the current state of the virtual machine. */ +export interface VirtualMachineInfo { + /** The reference to the Azure Virtual Machine's Marketplace Image. */ + imageReference?: ImageReference; +} /** - * The Batch service does not assign any meaning to this metadata; it is solely - * for the use of user code. + * A reference to an Azure Virtual Machines Marketplace Image or a Shared Image + * Gallery Image. To get the list of all Azure Marketplace Image references + * verified by Azure Batch, see the 'List Supported Images' operation. */ -export interface MetadataItem { - /** The name of the metadata item. */ - name: string; - /** The value of the metadata item. */ - value: string; +export interface ImageReference { + /** The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. */ + publisher?: string; + /** The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. */ + offer?: string; + /** The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. */ + sku?: string; + /** The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. */ + version?: string; + /** The ARM resource identifier of the Shared Image Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Shared Image Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + virtualMachineImageId?: string; + /** The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. */ + readonly exactVersion?: string; } -/** The file system to mount on each node. */ -export interface MountConfiguration { - /** The Azure Storage Container to mount using blob FUSE on each node. This property is mutually exclusive with all other properties. */ - azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; - /** The NFS file system to mount on each node. This property is mutually exclusive with all other properties. */ - nfsMountConfiguration?: NfsMountConfiguration; - /** The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all other properties. */ - cifsMountConfiguration?: CifsMountConfiguration; - /** The Azure File Share to mount on each node. This property is mutually exclusive with all other properties. */ - azureFileShareConfiguration?: AzureFileShareConfiguration; +/** Options for rebooting an Azure Batch Compute Node. */ +export interface NodeRebootOptions { + /** When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. */ + nodeRebootOption?: BatchNodeRebootOption; } -/** Information used to connect to an Azure Storage Container using Blobfuse. */ -export interface AzureBlobFileSystemConfiguration { - /** The Azure Storage Account name. */ - accountName: string; - /** The Azure Blob Storage Container name. */ - containerName: string; - /** The Azure Storage Account key. This property is mutually exclusive with both sasKey and identity; exactly one must be specified. */ - accountKey?: string; - /** The Azure Storage SAS token. This property is mutually exclusive with both accountKey and identity; exactly one must be specified. */ - sasKey?: string; - /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ - blobfuseOptions?: string; - /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ - relativeMountPath: string; - /** The reference to the user assigned identity to use to access containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one must be specified. */ - identityReference?: BatchNodeIdentityReference; -} +/** BatchNodeRebootOption enums */ +/** "requeue", "terminate", "taskcompletion", "retaineddata" */ +export type BatchNodeRebootOption = string; -/** Information used to connect to an NFS file system. */ -export interface NfsMountConfiguration { - /** The URI of the file system to mount. */ - source: string; - /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ - relativeMountPath: string; - /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; +/** Options for reimaging an Azure Batch Compute Node. */ +export interface NodeReimageOptions { + /** When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. */ + nodeReimageOption?: BatchNodeReimageOption; } -/** Information used to connect to a CIFS file system. */ -export interface CifsMountConfiguration { - /** The user to use for authentication against the CIFS file system. */ - username: string; - /** The URI of the file system to mount. */ - source: string; - /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ - relativeMountPath: string; - /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; - /** The password to use for authentication against the CIFS file system. */ - password: string; -} +/** BatchNodeReimageOption enums */ +/** "requeue", "terminate", "taskcompletion", "retaineddata" */ +export type BatchNodeReimageOption = string; -/** Information used to connect to an Azure Fileshare. */ -export interface AzureFileShareConfiguration { - /** The Azure Storage account name. */ - accountName: string; - /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */ - azureFileUrl: string; - /** The Azure Storage account key. */ - accountKey: string; - /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ - relativeMountPath: string; - /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ - mountOptions?: string; +/** Options for disabling scheduling on an Azure Batch Compute Node. */ +export interface NodeDisableSchedulingOptions { + /** What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. */ + nodeDisableSchedulingOption?: DisableBatchNodeSchedulingOption; } -/** NodeCommunicationMode enums */ -/** "default", "classic", "simplified" */ -export type NodeCommunicationMode = string; +/** DisableBatchNodeSchedulingOption enums */ +/** "requeue", "terminate", "taskcompletion" */ +export type DisableBatchNodeSchedulingOption = string; -/** The result of listing the Pools in an Account. */ -export interface BatchPoolListResult { - /** The list of Pools. */ - value?: BatchPool[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +/** The remote login settings for a Compute Node. */ +export interface BatchNodeRemoteLoginSettingsResult { + /** The IP address used for remote login to the Compute Node. */ + remoteLoginIPAddress: string; + /** The port used for remote login to the Compute Node. */ + remoteLoginPort: number; } -/** A Pool in the Azure Batch service. */ -export interface BatchPool { - /** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ - readonly id?: string; - /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ - readonly displayName?: string; - /** The URL of the Pool. */ - readonly url?: string; - /** The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. */ - readonly eTag?: string; - /** The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. */ - readonly lastModified?: Date; - /** The creation time of the Pool. */ - readonly creationTime?: Date; - /** The current state of the Pool. */ - readonly state?: PoolState; - /** The time at which the Pool entered its current state. */ - readonly stateTransitionTime?: Date; - /** Whether the Pool is resizing. */ - readonly allocationState?: AllocationState; - /** The time at which the Pool entered its current allocation state. */ - readonly allocationStateTransitionTime?: Date; - /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ - readonly vmSize?: string; - /** The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ - readonly cloudServiceConfiguration?: CloudServiceConfiguration; - /** The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. */ - readonly virtualMachineConfiguration?: VirtualMachineConfiguration; - /** The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. */ - readonly resizeTimeout?: string; - /** A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. */ - readonly resizeErrors?: ResizeError[]; - /** The number of dedicated Compute Nodes currently in the Pool. */ - readonly currentDedicatedNodes?: number; - /** The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. */ - readonly currentLowPriorityNodes?: number; - /** The desired number of dedicated Compute Nodes in the Pool. */ - readonly targetDedicatedNodes?: number; - /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ - readonly targetLowPriorityNodes?: number; - /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */ - readonly enableAutoScale?: boolean; - /** A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ - readonly autoScaleFormula?: string; - /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ - readonly autoScaleEvaluationInterval?: string; - /** The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ - readonly autoScaleRun?: AutoScaleRun; - /** Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. */ - readonly enableInterNodeCommunication?: boolean; - /** The network configuration for the Pool. */ - readonly networkConfiguration?: NetworkConfiguration; - /** A Task specified to run on each Compute Node as it joins the Pool. */ - startTask?: StartTask; - /** - * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - */ - readonly certificateReferences?: CertificateReference[]; - /** The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ - readonly applicationPackageReferences?: ApplicationPackageReference[]; - /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. */ - readonly applicationLicenses?: string[]; - /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ - readonly taskSlotsPerNode?: number; - /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ - readonly taskSchedulingPolicy?: TaskSchedulingPolicy; - /** The list of user Accounts to be created on each Compute Node in the Pool. */ - readonly userAccounts?: UserAccount[]; - /** A list of name-value pairs associated with the Pool as metadata. */ - readonly metadata?: MetadataItem[]; - /** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ - readonly stats?: PoolStatistics; - /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ - readonly mountConfiguration?: MountConfiguration[]; - /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ - readonly identity?: BatchPoolIdentity; - /** The desired node communication mode for the pool. If omitted, the default value is Default. */ - targetNodeCommunicationMode?: NodeCommunicationMode; - /** The current state of the pool communication mode. */ - readonly currentNodeCommunicationMode?: NodeCommunicationMode; +/** The Azure Batch service log files upload options for a Compute Node. */ +export interface UploadBatchServiceLogsOptions { + /** The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. */ + containerUrl: string; + /** The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. */ + startTime: Date; + /** The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. */ + endTime?: Date; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ + identityReference?: BatchNodeIdentityReference; } -/** PoolState enums */ -/** "active", "deleting" */ -export type PoolState = string; -/** AllocationState enums */ -/** "steady", "resizing", "stopping" */ -export type AllocationState = string; +/** The result of uploading Batch service log files from a specific Compute Node. */ +export interface UploadBatchServiceLogsResult { + /** The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. */ + virtualDirectoryName: string; + /** The number of log files which will be uploaded. */ + numberOfFilesUploaded: number; +} -/** An error that occurred when resizing a Pool. */ -export interface ResizeError { - /** An identifier for the Pool resize error. Codes are invariant and are intended to be consumed programmatically. */ - code?: string; - /** A message describing the Pool resize error, intended to be suitable for display in a user interface. */ - message?: string; - /** A list of additional error details related to the Pool resize error. */ - values?: NameValuePair[]; +/** The result of listing the Compute Nodes in a Pool. */ +export interface BatchNodeListResult { + /** The list of Compute Nodes. */ + value?: BatchNode[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; } -/** Represents a name-value pair. */ -export interface NameValuePair { - /** The name in the name-value pair. */ - name?: string; - /** The value in the name-value pair. */ - value?: string; +/** The configuration for virtual machine extension instance view. */ +export interface NodeVMExtension { + /** The provisioning state of the virtual machine extension. */ + provisioningState?: string; + /** The virtual machine extension. */ + vmExtension?: VMExtension; + /** The vm extension instance view. */ + instanceView?: VMExtensionInstanceView; } -/** The results and errors from an execution of a Pool autoscale formula. */ -export interface AutoScaleRun { - /** The time at which the autoscale formula was last evaluated. */ - timestamp: Date; - /** The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form $variable=value, and variables are separated by semicolons. */ - results?: string; - /** Details of the error encountered evaluating the autoscale formula on the Pool, if the evaluation was unsuccessful. */ - error?: AutoScaleRunError; +/** The configuration for virtual machine extensions. */ +export interface VMExtension { + /** The name of the virtual machine extension. */ + name: string; + /** The name of the extension handler publisher. */ + publisher: string; + /** The type of the extension. */ + type: string; + /** The version of script handler. */ + typeHandlerVersion?: string; + /** Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. */ + autoUpgradeMinorVersion?: boolean; + /** Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available. */ + enableAutomaticUpgrade?: boolean; + /** JSON formatted public settings for the extension. */ + settings?: Record; + /** The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. */ + protectedSettings?: Record; + /** The collection of extension names. Collection of extension names after which this extension needs to be provisioned. */ + provisionAfterExtensions?: string[]; } -/** An error that occurred when executing or evaluating a Pool autoscale formula. */ -export interface AutoScaleRunError { - /** An identifier for the autoscale error. Codes are invariant and are intended to be consumed programmatically. */ +/** The vm extension instance view. */ +export interface VMExtensionInstanceView { + /** The name of the vm extension instance view. */ + name?: string; + /** The resource status information. */ + statuses?: InstanceViewStatus[]; + /** The resource status information. */ + subStatuses?: InstanceViewStatus[]; +} + +/** The instance view status. */ +export interface InstanceViewStatus { + /** The status code. */ code?: string; - /** A message describing the autoscale error, intended to be suitable for display in a user interface. */ + /** The localized label for the status. */ + displayStatus?: string; + /** Level code. */ + level?: StatusLevelTypes; + /** The detailed status message. */ message?: string; - /** A list of additional error details related to the autoscale error. */ - values?: NameValuePair[]; + /** The time of the status. */ + time?: string; } -/** Contains utilization and resource usage statistics for the lifetime of a Pool. */ -export interface PoolStatistics { - /** The URL for the statistics. */ - url: string; - /** The start time of the time range covered by the statistics. */ - startTime: Date; - /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ - lastUpdateTime: Date; - /** Statistics related to Pool usage, such as the amount of core-time used. */ - usageStats?: UsageStatistics; - /** Statistics related to resource consumption by Compute Nodes in the Pool. */ - resourceStats?: ResourceStatistics; +/** Level code. */ +/** "Error", "Info", "Warning" */ +export type StatusLevelTypes = string; + +/** The result of listing the Compute Node extensions in a Node. */ +export interface NodeVMExtensionList { + /** The list of Compute Node extensions. */ + value?: NodeVMExtension[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; } -/** Statistics related to Pool usage information. */ -export interface UsageStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date; - /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ - lastUpdateTime: Date; - /** The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. */ - dedicatedCoreTime: string; +/** + * The result of listing the files on a Compute Node, or the files associated with + * a Task on a Compute Node. + */ +export interface NodeFileListResult { + /** The list of files. */ + value?: NodeFile[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; } -/** Statistics related to resource consumption by Compute Nodes in a Pool. */ -export interface ResourceStatistics { - /** The start time of the time range covered by the statistics. */ - startTime: Date; - /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ - lastUpdateTime: Date; - /** The average CPU usage across all Compute Nodes in the Pool (percentage per node). */ - avgCPUPercentage: number; - /** The average memory usage in GiB across all Compute Nodes in the Pool. */ - avgMemoryGiB: number; - /** The peak memory usage in GiB across all Compute Nodes in the Pool. */ - peakMemoryGiB: number; - /** The average used disk space in GiB across all Compute Nodes in the Pool. */ - avgDiskGiB: number; - /** The peak used disk space in GiB across all Compute Nodes in the Pool. */ - peakDiskGiB: number; - /** The total number of disk read operations across all Compute Nodes in the Pool. */ - diskReadIOps: number; - /** The total number of disk write operations across all Compute Nodes in the Pool. */ - diskWriteIOps: number; - /** The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. */ - diskReadGiB: number; - /** The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. */ - diskWriteGiB: number; - /** The total amount of data in GiB of network reads across all Compute Nodes in the Pool. */ - networkReadGiB: number; - /** The total amount of data in GiB of network writes across all Compute Nodes in the Pool. */ - networkWriteGiB: number; +/** Information about a file or directory on a Compute Node. */ +export interface NodeFile { + /** The file path. */ + name?: string; + /** The URL of the file. */ + url?: string; + /** Whether the object represents a directory. */ + isDirectory?: boolean; + /** The file properties. */ + properties?: FileProperties; } -/** The identity of the Batch pool, if configured. */ -export interface BatchPoolIdentity { - /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ - type: PoolIdentityType; - /** The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ - userAssignedIdentities?: UserAssignedIdentity[]; +/** The properties of a file on a Compute Node. */ +export interface FileProperties { + /** The file creation time. The creation time is not returned for files on Linux Compute Nodes. */ + creationTime?: Date; + /** The time at which the file was last modified. */ + lastModified: Date; + /** The length of the file. */ + contentLength: number; + /** The content type of the file. */ + contentType?: string; + /** The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes. */ + fileMode?: string; } -/** PoolIdentityType enums */ -/** "UserAssigned", "None" */ -export type PoolIdentityType = string; - -/** The user assigned Identity */ -export interface UserAssignedIdentity { - /** The ARM resource id of the user assigned identity. */ - resourceId: string; - /** The client id of the user assigned identity. */ - readonly clientId?: string; - /** The principal id of the user assigned identity. */ - readonly principalId?: string; +/** Options for creating an Azure Batch Task. */ +export interface BatchTaskCreateOptions { + /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). */ + id: string; + /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** How the Batch service should respond when the Task completes. */ + exitConditions?: ExitConditions; + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + resourceFiles?: ResourceFile[]; + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ + outputFiles?: OutputFile[]; + /** A list of environment variable settings for the Task. */ + environmentSettings?: EnvironmentSetting[]; + /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ + affinityInfo?: AffinityInformation; + /** The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. */ + constraints?: TaskConstraints; + /** The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ + requiredSlots?: number; + /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + userIdentity?: UserIdentity; + /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ + multiInstanceSettings?: MultiInstanceSettings; + /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. */ + dependsOn?: TaskDependencies; + /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ + authenticationTokenSettings?: AuthenticationTokenSettings; } -/** Options for updating an Azure Batch Pool. */ -export interface BatchPoolUpdateOptions { - /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. */ - startTask?: StartTask; - /** - * If this element is present, it replaces any existing Certificate references configured on the Pool. - * If omitted, any existing Certificate references are left unchanged. - * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - */ - certificateReferences?: CertificateReference[]; - /** A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. */ - applicationPackageReferences?: ApplicationPackageReference[]; - /** A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. */ - metadata?: MetadataItem[]; - /** The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. */ - targetNodeCommunicationMode?: NodeCommunicationMode; +/** Specifies how the Batch service should respond when the Task completes. */ +export interface ExitConditions { + /** A list of individual Task exit codes and how the Batch service should respond to them. */ + exitCodes?: ExitCodeMapping[]; + /** A list of Task exit code ranges and how the Batch service should respond to them. */ + exitCodeRanges?: ExitCodeRangeMapping[]; + /** How the Batch service should respond if the Task fails to start due to an error. */ + preProcessingError?: ExitOptions; + /** How the Batch service should respond if a file upload error occurs. If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence. */ + fileUploadError?: ExitOptions; + /** How the Batch service should respond if the Task fails with an exit condition not covered by any of the other properties. This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. */ + default?: ExitOptions; } -/** Options for enabling automatic scaling on an Azure Batch Pool. */ -export interface BatchPoolEnableAutoScaleOptions { - /** The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ - autoScaleFormula?: string; - /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */ - autoScaleEvaluationInterval?: string; +/** + * How the Batch service should respond if a Task exits with a particular exit + * code. + */ +export interface ExitCodeMapping { + /** A process exit code. */ + code: number; + /** How the Batch service should respond if the Task exits with this exit code. */ + exitOptions: ExitOptions; } -/** Options for evaluating an automatic scaling formula on an Azure Batch Pool. */ -export interface BatchPoolEvaluateAutoScaleOptions { - /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ - autoScaleFormula: string; +/** Specifies how the Batch service responds to a particular exit condition. */ +export interface ExitOptions { + /** An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + jobAction?: JobAction; + /** An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. */ + dependencyAction?: DependencyAction; } -/** Options for changing the size of an Azure Batch Pool. */ -export interface BatchPoolResizeOptions { - /** The desired number of dedicated Compute Nodes in the Pool. */ - targetDedicatedNodes?: number; - /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ - targetLowPriorityNodes?: number; - /** The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - resizeTimeout?: string; - /** Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. */ - nodeDeallocationOption?: BatchNodeDeallocationOption; +/** JobAction enums */ +/** "none", "disable", "terminate" */ +export type JobAction = string; +/** DependencyAction enums */ +/** "satisfy", "block" */ +export type DependencyAction = string; + +/** + * A range of exit codes and how the Batch service should respond to exit codes + * within that range. + */ +export interface ExitCodeRangeMapping { + /** The first exit code in the range. */ + start: number; + /** The last exit code in the range. */ + end: number; + /** How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive). */ + exitOptions: ExitOptions; } -/** BatchNodeDeallocationOption enums */ -/** "requeue", "terminate", "taskcompletion", "retaineddata" */ -export type BatchNodeDeallocationOption = string; +/** On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. */ +export interface OutputFile { + /** A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. */ + filePattern: string; + /** The destination for the output file(s). */ + destination: OutputFileDestination; + /** Additional options for the upload operation, including under what conditions to perform the upload. */ + uploadOptions: OutputFileUploadOptions; +} -/** Options for replacing properties on an Azure Batch Pool. */ -export interface BatchPoolReplaceOptions { - /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. */ - startTask?: StartTask; - /** - * This list replaces any existing Certificate references configured on the Pool. - * If you specify an empty collection, any existing Certificate references are removed from the Pool. - * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - */ - certificateReferences: CertificateReference[]; - /** The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. */ - applicationPackageReferences: ApplicationPackageReference[]; - /** A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. */ - metadata: MetadataItem[]; - /** The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. */ - targetNodeCommunicationMode?: NodeCommunicationMode; +/** The destination to which a file should be uploaded. */ +export interface OutputFileDestination { + /** A location in Azure blob storage to which files are uploaded. */ + container?: OutputFileBlobContainerDestination; } -/** Options for removing nodes from an Azure Batch Pool. */ -export interface NodeRemoveOptions { - /** A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. */ - nodeList: string[]; - /** The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - resizeTimeout?: string; - /** Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. */ - nodeDeallocationOption?: BatchNodeDeallocationOption; +/** Specifies a file upload destination within an Azure blob storage container. */ +export interface OutputFileBlobContainerDestination { + /** The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. */ + path?: string; + /** The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. */ + containerUrl: string; + /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ + identityReference?: BatchNodeIdentityReference; + /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ + uploadHeaders?: HttpHeader[]; } -/** The result of listing the supported Virtual Machine Images. */ -export interface AccountListSupportedImagesResult { - /** The list of supported Virtual Machine Images. */ - value?: ImageInformation[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +/** An HTTP header name-value pair */ +export interface HttpHeader { + /** The case-insensitive name of the header to be used while uploading output files. */ + name: string; + /** The value of the header to be used while uploading output files. */ + value?: string; } /** - * A reference to the Azure Virtual Machines Marketplace Image and additional - * information about the Image. + * Options for an output file upload operation, including under what conditions + * to perform the upload. */ -export interface ImageInformation { - /** The ID of the Compute Node agent SKU which the Image supports. */ - nodeAgentSKUId: string; - /** The reference to the Azure Virtual Machine's Marketplace Image. */ - imageReference: ImageReference; - /** The type of operating system (e.g. Windows or Linux) of the Image. */ - osType: OSType; - /** The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. */ - capabilities?: string[]; - /** The time when the Azure Batch service will stop accepting create Pool requests for the Image. */ - batchSupportEndOfLife?: Date; - /** Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. */ - verificationType: VerificationType; +export interface OutputFileUploadOptions { + /** The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. */ + uploadCondition: OutputFileUploadCondition; } -/** OSType enums */ -/** "linux", "windows" */ -export type OSType = string; -/** VerificationType enums */ -/** "verified", "unverified" */ -export type VerificationType = string; +/** OutputFileUploadCondition enums */ +/** "tasksuccess", "taskfailure", "taskcompletion" */ +export type OutputFileUploadCondition = string; -/** The result of listing the Compute Node counts in the Account. */ -export interface PoolNodeCountsListResult { - /** A list of Compute Node counts by Pool. */ - value?: PoolNodeCounts[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +/** + * A locality hint that can be used by the Batch service to select a Compute Node + * on which to start a Task. + */ +export interface AffinityInformation { + /** An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ + affinityId: string; } -/** The number of Compute Nodes in each state for a Pool. */ -export interface PoolNodeCounts { - /** The ID of the Pool. */ - poolId: string; - /** The number of dedicated Compute Nodes in each state. */ - dedicated?: NodeCounts; - /** The number of Spot/Low-priority Compute Nodes in each state. */ - lowPriority?: NodeCounts; -} - -/** The number of Compute Nodes in each Compute Node state. */ -export interface NodeCounts { - /** The number of Compute Nodes in the creating state. */ - creating: number; - /** The number of Compute Nodes in the idle state. */ - idle: number; - /** The number of Compute Nodes in the offline state. */ - offline: number; - /** The number of Compute Nodes in the preempted state. */ - preempted: number; - /** The count of Compute Nodes in the rebooting state. */ - rebooting: number; - /** The number of Compute Nodes in the reimaging state. */ - reimaging: number; - /** The number of Compute Nodes in the running state. */ - running: number; - /** The number of Compute Nodes in the starting state. */ - starting: number; - /** The number of Compute Nodes in the startTaskFailed state. */ - startTaskFailed: number; - /** The number of Compute Nodes in the leavingPool state. */ - leavingPool: number; - /** The number of Compute Nodes in the unknown state. */ - unknown: number; - /** The number of Compute Nodes in the unusable state. */ - unusable: number; - /** The number of Compute Nodes in the waitingForStartTask state. */ - waitingForStartTask: number; - /** The total number of Compute Nodes. */ - total: number; -} - -/** An Azure Batch Job. */ -export interface BatchJob { - /** A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ - readonly id?: string; - /** The display name for the Job. */ - readonly displayName?: string; - /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ - readonly usesTaskDependencies?: boolean; - /** The URL of the Job. */ - readonly url?: string; - /** The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */ - readonly eTag?: string; - /** The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. */ - readonly lastModified?: Date; - /** The creation time of the Job. */ - readonly creationTime?: Date; - /** The current state of the Job. */ - readonly state?: JobState; - /** The time at which the Job entered its current state. */ - readonly stateTransitionTime?: Date; - /** The previous state of the Job. This property is not set if the Job is in its initial Active state. */ - readonly previousState?: JobState; - /** The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. */ - readonly previousStateTransitionTime?: Date; - /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ - priority?: number; - /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ - allowTaskPreemption?: boolean; - /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ - maxParallelTasks?: number; - /** The execution constraints for the Job. */ - constraints?: JobConstraints; - /** Details of a Job Manager Task to be launched when the Job is started. */ - readonly jobManagerTask?: JobManagerTask; - /** The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. */ - readonly jobPreparationTask?: JobPreparationTask; - /** The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. */ - readonly jobReleaseTask?: JobReleaseTask; - /** The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ - readonly commonEnvironmentSettings?: EnvironmentSetting[]; - /** The Pool settings associated with the Job. */ - poolInfo: PoolInformation; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. */ - onAllTasksComplete?: OnAllTasksComplete; - /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ - readonly onTaskFailure?: OnTaskFailure; - /** The network configuration for the Job. */ - readonly networkConfiguration?: JobNetworkConfiguration; - /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ - metadata?: MetadataItem[]; - /** The execution information for the Job. */ - readonly executionInfo?: JobExecutionInformation; - /** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ - readonly stats?: JobStatistics; -} - -/** JobState enums */ -/** "active", "disabling", "disabled", "enabling", "terminating", "completed", "deleting" */ -export type JobState = string; - -/** The execution constraints for a Job. */ -export interface JobConstraints { - /** The maximum elapsed time that the Job may run, measured from the time the Job is created. If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run. */ +/** Execution constraints to apply to a Task. */ +export interface TaskConstraints { + /** The maximum elapsed time that the Task may run, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run. */ maxWallClockTime?: string; - /** The maximum number of times each Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries). */ + /** The minimum time to retain the Task directory on the Compute Node where it ran, from the time it completes execution. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. */ + retentionTime?: string; + /** The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). */ maxTaskRetryCount?: number; } /** - * Specifies details of a Job Manager Task. - * The Job Manager Task is automatically started when the Job is created. The - * Batch service tries to schedule the Job Manager Task before any other Tasks in - * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where - * Job Manager Tasks are running for as long as possible (that is, Compute Nodes - * running 'normal' Tasks are removed before Compute Nodes running Job Manager - * Tasks). When a Job Manager Task fails and needs to be restarted, the system - * tries to schedule it at the highest priority. If there are no idle Compute - * Nodes available, the system may terminate one of the running Tasks in the Pool - * and return it to the queue in order to make room for the Job Manager Task to - * restart. Note that a Job Manager Task in one Job does not have priority over - * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For - * example, if a Job Manager in a priority 0 Job needs to be restarted, it will - * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery - * operation is triggered on a Node. Examples of recovery operations include (but - * are not limited to) when an unhealthy Node is rebooted or a Compute Node - * disappeared due to host failure. Retries due to recovery operations are - * independent of and are not counted against the maxTaskRetryCount. Even if the - * maxTaskRetryCount is 0, an internal retry due to a recovery operation may - * occur. Because of this, all Tasks should be idempotent. This means Tasks need - * to tolerate being interrupted and restarted without causing any corruption or - * duplicate data. The best practice for long running Tasks is to use some form of - * checkpointing. + * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, + * if any of the subtasks fail (for example due to exiting with a non-zero exit + * code) the entire multi-instance Task fails. The multi-instance Task is then + * terminated and retried, up to its retry limit. */ -export interface JobManagerTask { - /** A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. */ - id: string; - /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ - displayName?: string; - /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ - commandLine: string; - /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ - containerSettings?: TaskContainerSettings; - /** A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ - resourceFiles?: ResourceFile[]; - /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ - outputFiles?: OutputFile[]; - /** A list of environment variable settings for the Job Manager Task. */ - environmentSettings?: EnvironmentSetting[]; - /** Constraints that apply to the Job Manager Task. */ - constraints?: TaskConstraints; - /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this property is not supported and must not be specified. */ - requiredSlots?: number; - /** Whether completion of the Job Manager Task signifies completion of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false. */ - killJobOnCompletion?: boolean; - /** The user identity under which the Job Manager Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentity; - /** Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true. */ - runExclusive?: boolean; - /** - * A list of Application Packages that the Batch service will deploy to the - * Compute Node before running the command line.Application Packages are - * downloaded and deployed to a shared directory, not the Task working - * directory. Therefore, if a referenced Application Package is already - * on the Compute Node, and is up to date, then it is not re-downloaded; - * the existing copy on the Compute Node is used. If a referenced Application - * Package cannot be installed, for example because the package has been deleted - * or because download failed, the Task fails. - */ - applicationPackageReferences?: ApplicationPackageReference[]; - /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ - authenticationTokenSettings?: AuthenticationTokenSettings; - /** Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is true. */ - allowLowPriorityNode?: boolean; -} - -/** On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. */ -export interface OutputFile { - /** A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. */ - filePattern: string; - /** The destination for the output file(s). */ - destination: OutputFileDestination; - /** Additional options for the upload operation, including under what conditions to perform the upload. */ - uploadOptions: OutputFileUploadOptions; +export interface MultiInstanceSettings { + /** The number of Compute Nodes required by the Task. If omitted, the default is 1. */ + numberOfInstances?: number; + /** The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. */ + coordinationCommandLine: string; + /** A list of files that the Batch service will download before running the coordination command line. The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + commonResourceFiles?: ResourceFile[]; } -/** The destination to which a file should be uploaded. */ -export interface OutputFileDestination { - /** A location in Azure blob storage to which files are uploaded. */ - container?: OutputFileBlobContainerDestination; +/** + * Specifies any dependencies of a Task. Any Task that is explicitly specified or + * within a dependency range must complete before the dependant Task will be + * scheduled. + */ +export interface TaskDependencies { + /** The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead. */ + taskIds?: string[]; + /** The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete successfully before the dependent Task can be scheduled. */ + taskIdRanges?: TaskIdRange[]; } -/** Specifies a file upload destination within an Azure blob storage container. */ -export interface OutputFileBlobContainerDestination { - /** The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. */ - path?: string; - /** The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. */ - containerUrl: string; - /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ - identityReference?: BatchNodeIdentityReference; - /** A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. */ - uploadHeaders?: HttpHeader[]; +/** + * The start and end of the range are inclusive. For example, if a range has start + * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + */ +export interface TaskIdRange { + /** The first Task ID in the range. */ + start: number; + /** The last Task ID in the range. */ + end: number; } -/** An HTTP header name-value pair */ -export interface HttpHeader { - /** The case-insensitive name of the header to be used while uploading output files. */ - name: string; - /** The value of the header to be used while uploading output files. */ - value?: string; +/** A reference to an Package to be deployed to Compute Nodes. */ +export interface ApplicationPackageReference { + /** The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). */ + applicationId: string; + /** The version of the application to deploy. If omitted, the default version is deployed. If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error. */ + version?: string; } /** - * Options for an output file upload operation, including under what conditions - * to perform the upload. + * The settings for an authentication token that the Task can use to perform Batch + * service operations. */ -export interface OutputFileUploadOptions { - /** The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. */ - uploadCondition: OutputFileUploadCondition; +export interface AuthenticationTokenSettings { + /** The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. */ + access?: AccessScope[]; } -/** OutputFileUploadCondition enums */ -/** "tasksuccess", "taskfailure", "taskcompletion" */ -export type OutputFileUploadCondition = string; +/** AccessScope enums */ +/** "job" */ +export type AccessScope = string; -/** Execution constraints to apply to a Task. */ -export interface TaskConstraints { - /** The maximum elapsed time that the Task may run, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run. */ - maxWallClockTime?: string; - /** The minimum time to retain the Task directory on the Compute Node where it ran, from the time it completes execution. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. */ - retentionTime?: string; - /** The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). */ - maxTaskRetryCount?: number; +/** The result of listing the Tasks in a Job. */ +export interface BatchTaskListResult { + /** The list of Tasks. */ + value?: BatchTask[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; } /** - * The settings for an authentication token that the Task can use to perform Batch - * service operations. + * Batch will retry Tasks when a recovery operation is triggered on a Node. + * Examples of recovery operations include (but are not limited to) when an + * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + * Retries due to recovery operations are independent of and are not counted + * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + * retry due to a recovery operation may occur. Because of this, all Tasks should + * be idempotent. This means Tasks need to tolerate being interrupted and + * restarted without causing any corruption or duplicate data. The best practice + * for long running Tasks is to use some form of checkpointing. */ -export interface AuthenticationTokenSettings { - /** The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. */ - access?: AccessScope[]; +export interface BatchTask { + /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. */ + readonly id?: string; + /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + readonly displayName?: string; + /** The URL of the Task. */ + readonly url?: string; + /** The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Task. */ + readonly lastModified?: Date; + /** The creation time of the Task. */ + readonly creationTime?: Date; + /** How the Batch service should respond when the Task completes. */ + readonly exitConditions?: ExitConditions; + /** The current state of the Task. */ + readonly state?: TaskState; + /** The time at which the Task entered its current state. */ + readonly stateTransitionTime?: Date; + /** The previous state of the Task. This property is not set if the Task is in its initial Active state. */ + readonly previousState?: TaskState; + /** The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. */ + readonly previousStateTransitionTime?: Date; + /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + readonly commandLine?: string; + /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + readonly containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + readonly resourceFiles?: ResourceFile[]; + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ + readonly outputFiles?: OutputFile[]; + /** A list of environment variable settings for the Task. */ + readonly environmentSettings?: EnvironmentSetting[]; + /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ + readonly affinityInfo?: AffinityInformation; + /** The execution constraints that apply to this Task. */ + constraints?: TaskConstraints; + /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ + readonly requiredSlots?: number; + /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + readonly userIdentity?: UserIdentity; + /** Information about the execution of the Task. */ + readonly executionInfo?: TaskExecutionInformation; + /** Information about the Compute Node on which the Task ran. */ + readonly nodeInfo?: BatchNodeInformation; + /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ + readonly multiInstanceSettings?: MultiInstanceSettings; + /** Resource usage statistics for the Task. */ + readonly stats?: TaskStatistics; + /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. */ + readonly dependsOn?: TaskDependencies; + /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ + readonly applicationPackageReferences?: ApplicationPackageReference[]; + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ + readonly authenticationTokenSettings?: AuthenticationTokenSettings; +} + +/** Information about the Compute Node on which a Task ran. */ +export interface BatchNodeInformation { + /** An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. */ + affinityId?: string; + /** The URL of the Compute Node on which the Task ran. */ + nodeUrl?: string; + /** The ID of the Pool on which the Task ran. */ + poolId?: string; + /** The ID of the Compute Node on which the Task ran. */ + nodeId?: string; + /** The root directory of the Task on the Compute Node. */ + taskRootDirectory?: string; + /** The URL to the root directory of the Task on the Compute Node. */ + taskRootDirectoryUrl?: string; +} + +/** Resource usage statistics for a Task. */ +export interface TaskStatistics { + /** The URL of the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ + userCPUTime: string; + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ + kernelCPUTime: string; + /** The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. */ + wallClockTime: string; + /** The total number of disk read operations made by the Task. */ + readIOps: number; + /** The total number of disk write operations made by the Task. */ + writeIOps: number; + /** The total gibibytes read from disk by the Task. */ + readIOGiB: number; + /** The total gibibytes written to disk by the Task. */ + writeIOGiB: number; + /** The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). */ + waitTime: string; +} + +/** A collection of Azure Batch Tasks to add. */ +export interface BatchTaskCollection { + /** The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. */ + value: BatchTaskCreateOptions[]; +} + +/** The result of adding a collection of Tasks to a Job. */ +export interface TaskAddCollectionResult { + /** The results of the add Task collection operation. */ + value?: TaskAddResult[]; +} + +/** Result for a single Task added as part of an add Task collection operation. */ +export interface TaskAddResult { + /** The status of the add Task request. */ + status: TaskAddStatus; + /** The ID of the Task for which this is the result. */ + taskId: string; + /** The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */ + eTag?: string; + /** The last modified time of the Task. */ + lastModified?: Date; + /** The URL of the Task, if the Task was successfully added. */ + location?: string; + /** The error encountered while attempting to add the Task. */ + error?: BatchError; +} + +/** TaskAddStatus enums */ +/** "Success", "clienterror", "servererror" */ +export type TaskAddStatus = string; + +/** The result of listing the subtasks of a Task. */ +export interface BatchTaskListSubtasksResult { + /** The list of subtasks. */ + value?: SubtaskInformation[]; +} + +/** Information about an Azure Batch subtask. */ +export interface SubtaskInformation { + /** The ID of the subtask. */ + id?: number; + /** Information about the Compute Node on which the subtask ran. */ + nodeInfo?: BatchNodeInformation; + /** The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running. */ + startTime?: Date; + /** The time at which the subtask completed. This property is set only if the subtask is in the Completed state. */ + endTime?: Date; + /** The exit code of the program specified on the subtask command line. This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The current state of the subtask. */ + state?: SubtaskState; + /** The time at which the subtask entered its current state. */ + stateTransitionTime?: Date; + /** The previous state of the subtask. This property is not set if the subtask is in its initial running state. */ + previousState?: SubtaskState; + /** The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. */ + previousStateTransitionTime?: Date; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; +} + +/** SubtaskState enums */ +/** "preparing", "running", "completed" */ +export type SubtaskState = string; + +/** + * A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a + * specification used to create each Job. + */ +export interface BatchJobSchedule { + /** A string that uniquely identifies the schedule within the Account. */ + readonly id?: string; + /** The display name for the schedule. */ + readonly displayName?: string; + /** The URL of the Job Schedule. */ + readonly url?: string; + /** The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. */ + readonly lastModified?: Date; + /** The creation time of the Job Schedule. */ + readonly creationTime?: Date; + /** The current state of the Job Schedule. */ + readonly state?: JobScheduleState; + /** The time at which the Job Schedule entered the current state. */ + readonly stateTransitionTime?: Date; + /** The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. */ + readonly previousState?: JobScheduleState; + /** The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. */ + readonly previousStateTransitionTime?: Date; + /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. */ + schedule: Schedule; + /** The details of the Jobs to be created on this schedule. */ + jobSpecification: JobSpecification; + /** Information about Jobs that have been and will be run under this schedule. */ + readonly executionInfo?: JobScheduleExecutionInformation; + /** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; + /** The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + readonly stats?: JobScheduleStatistics; +} + +/** JobScheduleState enums */ +/** "active", "completed", "disabled", "terminating", "deleting" */ +export type JobScheduleState = string; + +/** + * The schedule according to which Jobs will be created. All times are fixed + * respective to UTC and are not impacted by daylight saving time. + */ +export interface Schedule { + /** The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately. */ + doNotRunUntil?: Date; + /** A time after which no Job will be created under this Job Schedule. The schedule will move to the completed state as soon as this deadline is past and there is no active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it. */ + doNotRunAfter?: Date; + /** The time interval, starting from the time at which the schedule indicates a Job should be created, within which a Job must be created. If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + startWindow?: string; + /** The time interval between the start times of two successive Jobs under the Job Schedule. A Job Schedule can have at most one active Job under it at any given time. Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + recurrenceInterval?: string; +} + +/** Specifies details of the Jobs to be created on a schedule. */ +export interface JobSpecification { + /** The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ + usesTaskDependencies?: boolean; + /** The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + onAllTasksComplete?: OnAllTasksComplete; + /** The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + onTaskFailure?: OnTaskFailure; + /** The network configuration for the Job. */ + networkConfiguration?: JobNetworkConfiguration; + /** The execution constraints for Jobs created under this schedule. */ + constraints?: JobConstraints; + /** The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. */ + jobManagerTask?: JobManagerTask; + /** The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ + jobPreparationTask?: JobPreparationTask; + /** The Job Release Task for Jobs created under this schedule. The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task. */ + jobReleaseTask?: JobReleaseTask; + /** A list of common environment variable settings. These environment variables are set for all Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ + commonEnvironmentSettings?: EnvironmentSetting[]; + /** The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. */ + poolInfo: PoolInformation; + /** A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; +} + +/** The action the Batch service should take when all Tasks in the Job are in the completed state. */ +/** "noaction", "terminatejob" */ +export type OnAllTasksComplete = string; +/** OnTaskFailure enums */ +/** "noaction", "performexitoptionsjobaction" */ +export type OnTaskFailure = string; + +/** The network configuration for the Job. */ +export interface JobNetworkConfiguration { + /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + subnetId: string; +} + +/** The execution constraints for a Job. */ +export interface JobConstraints { + /** The maximum elapsed time that the Job may run, measured from the time the Job is created. If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run. */ + maxWallClockTime?: string; + /** The maximum number of times each Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries). */ + maxTaskRetryCount?: number; } -/** AccessScope enums */ -/** "job" */ -export type AccessScope = string; +/** + * Specifies details of a Job Manager Task. + * The Job Manager Task is automatically started when the Job is created. The + * Batch service tries to schedule the Job Manager Task before any other Tasks in + * the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where + * Job Manager Tasks are running for as long as possible (that is, Compute Nodes + * running 'normal' Tasks are removed before Compute Nodes running Job Manager + * Tasks). When a Job Manager Task fails and needs to be restarted, the system + * tries to schedule it at the highest priority. If there are no idle Compute + * Nodes available, the system may terminate one of the running Tasks in the Pool + * and return it to the queue in order to make room for the Job Manager Task to + * restart. Note that a Job Manager Task in one Job does not have priority over + * Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For + * example, if a Job Manager in a priority 0 Job needs to be restarted, it will + * not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery + * operation is triggered on a Node. Examples of recovery operations include (but + * are not limited to) when an unhealthy Node is rebooted or a Compute Node + * disappeared due to host failure. Retries due to recovery operations are + * independent of and are not counted against the maxTaskRetryCount. Even if the + * maxTaskRetryCount is 0, an internal retry due to a recovery operation may + * occur. Because of this, all Tasks should be idempotent. This means Tasks need + * to tolerate being interrupted and restarted without causing any corruption or + * duplicate data. The best practice for long running Tasks is to use some form of + * checkpointing. + */ +export interface JobManagerTask { + /** A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. */ + id: string; + /** The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ + commandLine: string; + /** The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ + containerSettings?: TaskContainerSettings; + /** A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ + resourceFiles?: ResourceFile[]; + /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ + outputFiles?: OutputFile[]; + /** A list of environment variable settings for the Job Manager Task. */ + environmentSettings?: EnvironmentSetting[]; + /** Constraints that apply to the Job Manager Task. */ + constraints?: TaskConstraints; + /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this property is not supported and must not be specified. */ + requiredSlots?: number; + /** Whether completion of the Job Manager Task signifies completion of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false. */ + killJobOnCompletion?: boolean; + /** The user identity under which the Job Manager Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ + userIdentity?: UserIdentity; + /** Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true. */ + runExclusive?: boolean; + /** + * A list of Application Packages that the Batch service will deploy to the + * Compute Node before running the command line.Application Packages are + * downloaded and deployed to a shared directory, not the Task working + * directory. Therefore, if a referenced Application Package is already + * on the Compute Node, and is up to date, then it is not re-downloaded; + * the existing copy on the Compute Node is used. If a referenced Application + * Package cannot be installed, for example because the package has been deleted + * or because download failed, the Task fails. + */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ + authenticationTokenSettings?: AuthenticationTokenSettings; + /** Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is true. */ + allowLowPriorityNode?: boolean; +} /** * A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. @@ -1418,455 +1351,357 @@ export interface PoolSpecification { targetNodeCommunicationMode?: NodeCommunicationMode; } -/** The action the Batch service should take when all Tasks in the Job are in the completed state. */ -/** "noaction", "terminatejob" */ -export type OnAllTasksComplete = string; -/** OnTaskFailure enums */ -/** "noaction", "performexitoptionsjobaction" */ -export type OnTaskFailure = string; - -/** The network configuration for the Job. */ -export interface JobNetworkConfiguration { - /** The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ - subnetId: string; -} - -/** Contains information about the execution of a Job in the Azure Batch service. */ -export interface JobExecutionInformation { - /** The start time of the Job. This is the time at which the Job was created. */ - startTime: Date; - /** The completion time of the Job. This property is set only if the Job is in the completed state. */ - endTime?: Date; - /** The ID of the Pool to which this Job is assigned. This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool. */ - poolId?: string; - /** Details of any error encountered by the service in starting the Job. This property is not set if there was no error starting the Job. */ - schedulingError?: JobSchedulingError; - /** A string describing the reason the Job ended. This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation. */ - terminateReason?: string; -} - -/** An error encountered by the Batch service when scheduling a Job. */ -export interface JobSchedulingError { - /** The category of the Job scheduling error. */ - category: ErrorCategory; - /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ - code?: string; - /** A message describing the Job scheduling error, intended to be suitable for display in a user interface. */ - message?: string; - /** A list of additional error details related to the scheduling error. */ - details?: NameValuePair[]; -} - -/** ErrorCategory enums */ -/** "usererror", "servererror" */ -export type ErrorCategory = string; - -/** Resource usage statistics for a Job. */ -export interface JobStatistics { - /** The URL of the statistics. */ - url: string; - /** The start time of the time range covered by the statistics. */ - startTime: Date; - /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ - lastUpdateTime: Date; - /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ - userCPUTime: string; - /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ - kernelCPUTime: string; - /** The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */ - wallClockTime: string; - /** The total number of disk read operations made by all Tasks in the Job. */ - readIOps: number; - /** The total number of disk write operations made by all Tasks in the Job. */ - writeIOps: number; - /** The total amount of data in GiB read from disk by all Tasks in the Job. */ - readIOGiB: number; - /** The total amount of data in GiB written to disk by all Tasks in the Job. */ - writeIOGiB: number; - /** The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. */ - numSucceededTasks: number; - /** The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. */ - numFailedTasks: number; - /** The total number of retries on all the Tasks in the Job during the given time range. */ - numTaskRetries: number; - /** The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */ - waitTime: string; -} - -/** Options for updating an Azure Batch Job. */ -export interface BatchJobUpdateOptions { - /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. */ - priority?: number; - /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ - allowTaskPreemption?: boolean; - /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ - maxParallelTasks?: number; - /** The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. */ - constraints?: JobConstraints; - /** The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. */ - poolInfo?: PoolInformation; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - onAllTasksComplete?: OnAllTasksComplete; - /** A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. */ - metadata?: MetadataItem[]; -} - -/** Options for disabling an Azure Batch Job. */ -export interface BatchJobDisableOptions { - /** What to do with active Tasks associated with the Job. */ - disableTasks: DisableJobOption; -} - -/** DisableJobOption enums */ -/** "requeue", "terminate", "wait" */ -export type DisableJobOption = string; - -/** Options for terminating an Azure Batch Job. */ -export interface BatchJobTerminateOptions { - /** The text you want to appear as the Job's TerminateReason. The default is 'UserTerminate'. */ - terminateReason?: string; +/** + * The configuration for Compute Nodes in a Pool based on the Azure Cloud Services + * platform. + */ +export interface CloudServiceConfiguration { + /** + * Possible values are: + * 2 - OS Family 2, equivalent to Windows Server 2008 R2 + * SP1. + * 3 - OS Family 3, equivalent to Windows Server 2012. + * 4 - OS Family 4, + * equivalent to Windows Server 2012 R2. + * 5 - OS Family 5, equivalent to Windows + * Server 2016. + * 6 - OS Family 6, equivalent to Windows Server 2019. For more + * information, see Azure Guest OS Releases + * (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + */ + osFamily: string; + /** The Azure Guest OS version to be installed on the virtual machines in the Pool. The default value is * which specifies the latest operating system version for the specified OS family. */ + osVersion?: string; } -/** Options for creating an Azure Batch Job. */ -export interface BatchJobCreateOptions { - /** A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ - id: string; - /** The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ - displayName?: string; - /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ - usesTaskDependencies?: boolean; - /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ - priority?: number; - /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ - allowTaskPreemption?: boolean; - /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ - maxParallelTasks?: number; - /** The execution constraints for the Job. */ - constraints?: JobConstraints; - /** Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. */ - jobManagerTask?: JobManagerTask; - /** The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ - jobPreparationTask?: JobPreparationTask; - /** The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. */ - jobReleaseTask?: JobReleaseTask; - /** The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ - commonEnvironmentSettings?: EnvironmentSetting[]; - /** The Pool on which the Batch service runs the Job's Tasks. */ - poolInfo: PoolInformation; - /** The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ - onAllTasksComplete?: OnAllTasksComplete; - /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ - onTaskFailure?: OnTaskFailure; - /** The network configuration for the Job. */ - networkConfiguration?: JobNetworkConfiguration; - /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ - metadata?: MetadataItem[]; +/** + * The configuration for Compute Nodes in a Pool based on the Azure Virtual + * Machines infrastructure. + */ +export interface VirtualMachineConfiguration { + /** A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. */ + imageReference: ImageReference; + /** The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. */ + nodeAgentSKUId: string; + /** Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. */ + windowsConfiguration?: WindowsConfiguration; + /** The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. */ + dataDisks?: DataDisk[]; + /** + * This only applies to Images that contain the Windows operating system, and + * should only be used when you hold valid on-premises licenses for the Compute + * Nodes which will be deployed. If omitted, no on-premises licensing discount is + * applied. Values are: + * + * Windows_Server - The on-premises license is for Windows + * Server. + * Windows_Client - The on-premises license is for Windows Client. + * + */ + licenseType?: string; + /** The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. */ + containerConfiguration?: ContainerConfiguration; + /** The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. */ + diskEncryptionConfiguration?: DiskEncryptionConfiguration; + /** The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. */ + nodePlacementConfiguration?: NodePlacementConfiguration; + /** The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. */ + extensions?: VMExtension[]; + /** Settings for the operating system disk of the Virtual Machine. */ + osDisk?: OSDisk; } -/** The result of listing the Jobs in an Account. */ -export interface BatchJobListResult { - /** The list of Jobs. */ - value?: BatchJob[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +/** Windows operating system settings to apply to the virtual machine. */ +export interface WindowsConfiguration { + /** Whether automatic updates are enabled on the virtual machine. If omitted, the default value is true. */ + enableAutomaticUpdates?: boolean; } /** - * The result of listing the status of the Job Preparation and Job Release Tasks - * for a Job. + * Settings which will be used by the data disks associated to Compute Nodes in + * the Pool. When using attached data disks, you need to mount and format the + * disks from within a VM to use them. */ -export interface BatchJobListPreparationAndReleaseTaskStatusResult { - /** A list of Job Preparation and Job Release Task execution information. */ - value?: JobPreparationAndReleaseTaskExecutionInformation[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +export interface DataDisk { + /** The logical unit number. The lun is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct lun. The value must be between 0 and 63, inclusive. */ + lun: number; + /** The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. */ + caching?: CachingType; + /** The initial disk size in gigabytes. */ + diskSizeGB: number; + /** The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". */ + storageAccountType?: StorageAccountType; } -/** The status of the Job Preparation and Job Release Tasks on a Compute Node. */ -export interface JobPreparationAndReleaseTaskExecutionInformation { - /** The ID of the Pool containing the Compute Node to which this entry refers. */ - poolId?: string; - /** The ID of the Compute Node to which this entry refers. */ - nodeId?: string; - /** The URL of the Compute Node to which this entry refers. */ - nodeUrl?: string; - /** Information about the execution status of the Job Preparation Task on this Compute Node. */ - jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformation; - /** Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. */ - jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformation; +/** CachingType enums */ +/** "none", "readonly", "readwrite" */ +export type CachingType = string; +/** StorageAccountType enums */ +/** "standard_lrs", "premium_lrs" */ +export type StorageAccountType = string; + +/** The configuration for container-enabled Pools. */ +export interface ContainerConfiguration { + /** The container technology to be used. */ + type: ContainerType; + /** The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. */ + containerImageNames?: string[]; + /** Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. */ + containerRegistries?: ContainerRegistry[]; } +/** ContainerType enums */ +/** "dockerCompatible", "criCompatible" */ +export type ContainerType = string; + /** - * Contains information about the execution of a Job Preparation Task on a Compute - * Node. + * The disk encryption configuration applied on compute nodes in the pool. Disk + * encryption configuration is not supported on Linux pool created with Shared + * Image Gallery Image. */ -export interface JobPreparationTaskExecutionInformation { - /** The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. */ - startTime: Date; - /** The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. */ - endTime?: Date; - /** The current state of the Job Preparation Task on the Compute Node. */ - state: JobPreparationTaskState; - /** The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ - taskRootDirectory?: string; - /** The URL to the root directory of the Job Preparation Task on the Compute Node. */ - taskRootDirectoryUrl?: string; - /** The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. */ - exitCode?: number; - /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformation; - /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ - failureInfo?: TaskFailureInformation; - /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ - retryCount: number; - /** The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ - lastRetryTime?: Date; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ - result?: TaskExecutionResult; +export interface DiskEncryptionConfiguration { + /** The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. */ + targets?: DiskEncryptionTarget[]; } -/** JobPreparationTaskState enums */ -/** "running", "completed" */ -export type JobPreparationTaskState = string; +/** DiskEncryptionTarget enums */ +/** "osdisk", "temporarydisk" */ +export type DiskEncryptionTarget = string; -/** Contains information about the container which a Task is executing. */ -export interface TaskContainerExecutionInformation { - /** The ID of the container. */ - containerId?: string; - /** The state of the container. This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect". */ - state?: string; - /** Detailed error information about the container. This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect". */ - error?: string; +/** + * For regional placement, nodes in the pool will be allocated in the same region. + * For zonal placement, nodes in the pool will be spread across different zones + * with best effort balancing. + */ +export interface NodePlacementConfiguration { + /** Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. */ + policy?: NodePlacementPolicyType; } -/** Information about a Task failure. */ -export interface TaskFailureInformation { - /** The category of the Task error. */ - category: ErrorCategory; - /** An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. */ - code?: string; - /** A message describing the Task error, intended to be suitable for display in a user interface. */ - message?: string; - /** A list of additional details related to the error. */ - details?: NameValuePair[]; -} +/** NodePlacementPolicyType enums */ +/** "regional", "zonal" */ +export type NodePlacementPolicyType = string; -/** TaskExecutionResult enums */ -/** "success", "failure" */ -export type TaskExecutionResult = string; +/** Settings for the operating system disk of the compute node (VM). */ +export interface OSDisk { + /** Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). */ + ephemeralOSDiskSettings?: DiffDiskSettings; +} /** - * Contains information about the execution of a Job Release Task on a Compute - * Node. + * Specifies the ephemeral Disk Settings for the operating system disk used by the + * compute node (VM). */ -export interface JobReleaseTaskExecutionInformation { - /** The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. */ - startTime: Date; - /** The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. */ - endTime?: Date; - /** The current state of the Job Release Task on the Compute Node. */ - state: JobReleaseTaskState; - /** The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ - taskRootDirectory?: string; - /** The URL to the root directory of the Job Release Task on the Compute Node. */ - taskRootDirectoryUrl?: string; - /** The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. */ - exitCode?: number; - /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformation; - /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ - failureInfo?: TaskFailureInformation; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ - result?: TaskExecutionResult; +export interface DiffDiskSettings { + /** Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. */ + placement?: DiffDiskPlacement; } -/** JobReleaseTaskState enums */ -/** "running", "completed" */ -export type JobReleaseTaskState = string; +/** AccessDiffDiskPlacementScope enums */ +/** "cachedisk" */ +export type DiffDiskPlacement = string; -/** The Task and TaskSlot counts for a Job. */ -export interface TaskCountsResult { - /** The number of Tasks per state. */ - taskCounts: TaskCounts; - /** The number of TaskSlots required by Tasks per state. */ - taskSlotCounts: TaskSlotCounts; +/** Specifies how Tasks should be distributed across Compute Nodes. */ +export interface TaskSchedulingPolicy { + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + nodeFillType: BatchNodeFillType; } -/** The Task counts for a Job. */ -export interface TaskCounts { - /** The number of Tasks in the active state. */ - active: number; - /** The number of Tasks in the running or preparing state. */ - running: number; - /** The number of Tasks in the completed state. */ - completed: number; - /** The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'. */ - succeeded: number; - /** The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'. */ - failed: number; +/** BatchNodeFillType enums */ +/** "spread", "pack" */ +export type BatchNodeFillType = string; + +/** The network configuration for a Pool. */ +export interface NetworkConfiguration { + /** The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, but for Pools created with cloudServiceConfiguration both ARM and classic virtual networks are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. For Pools created with a cloud service configuration, enable ports 10100, 20100, and 30100. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. */ + subnetId?: string; + /** The scope of dynamic vnet assignment. */ + dynamicVNetAssignmentScope?: DynamicVNetAssignmentScope; + /** The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. */ + endpointConfiguration?: PoolEndpointConfiguration; + /** The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. */ + publicIPAddressConfiguration?: PublicIpAddressConfiguration; + /** Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. */ + enableAcceleratedNetworking?: boolean; +} + +/** DynamicVNetAssignmentScope enums */ +/** "none", "job" */ +export type DynamicVNetAssignmentScope = string; + +/** The endpoint configuration for a Pool. */ +export interface PoolEndpointConfiguration { + /** A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. */ + inboundNATPools: InboundNATPool[]; +} + +/** + * A inbound NAT Pool that can be used to address specific ports on Compute Nodes + * in a Batch Pool externally. + */ +export interface InboundNATPool { + /** The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. */ + name: string; + /** The protocol of the endpoint. */ + protocol: InboundEndpointProtocol; + /** The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. */ + backendPort: number; + /** The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ + frontendPortRangeStart: number; + /** The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. */ + frontendPortRangeEnd: number; + /** A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. */ + networkSecurityGroupRules?: NetworkSecurityGroupRule[]; +} + +/** A network security group rule to apply to an inbound endpoint. */ +export interface NetworkSecurityGroupRule { + /** The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. */ + priority: number; + /** The action that should be taken for a specified IP address, subnet range or tag. */ + access: NetworkSecurityGroupRuleAccess; + /** The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. */ + sourceAddressPrefix: string; + /** The source port ranges to match for the rule. Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'. */ + sourcePortRanges?: string[]; } -/** The TaskSlot counts for a Job. */ -export interface TaskSlotCounts { - /** The number of TaskSlots for active Tasks. */ - active: number; - /** The number of TaskSlots for running Tasks. */ - running: number; - /** The number of TaskSlots for completed Tasks. */ - completed: number; - /** The number of TaskSlots for succeeded Tasks. */ - succeeded: number; - /** The number of TaskSlots for failed Tasks. */ - failed: number; +/** NetworkSecurityGroupRuleAccess enums */ +/** "allow", "deny" */ +export type NetworkSecurityGroupRuleAccess = string; + +/** The public IP Address configuration of the networking configuration of a Pool. */ +export interface PublicIpAddressConfiguration { + /** The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. */ + provision?: IPAddressProvisioningType; + /** The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. */ + ipAddressIds?: string[]; } +/** IPAddressProvisioningType enums */ +/** "batchmanaged", "usermanaged", "nopublicipaddresses" */ +export type IPAddressProvisioningType = string; + /** - * A Certificate that can be installed on Compute Nodes and can be used to - * authenticate operations on the machine. + * Properties used to create a user used to execute Tasks on an Azure Batch + * Compute Node. */ -export interface BatchCertificate { - /** The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). */ - thumbprint: string; - /** The algorithm used to derive the thumbprint. This must be sha1. */ - thumbprintAlgorithm: string; - /** The URL of the Certificate. */ - readonly url?: string; - /** The state of the Certificate. */ - readonly state?: CertificateState; - /** The time at which the Certificate entered its current state. */ - readonly stateTransitionTime?: Date; - /** The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. */ - readonly previousState?: CertificateState; - /** The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. */ - readonly previousStateTransitionTime?: Date; - /** The public part of the Certificate as a base-64 encoded .cer file. */ - readonly publicData?: Uint8Array; - /** The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. */ - readonly deleteCertificateError?: DeleteCertificateError; - /** The base64-encoded contents of the Certificate. The maximum size is 10KB. */ - data: Uint8Array; - /** The format of the Certificate data. */ - certificateFormat?: CertificateFormat; - /** The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. */ - password?: string; +export interface UserAccount { + /** The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. */ + name: string; + /** The password for the user Account. */ + password: string; + /** The elevation level of the user Account. The default value is nonAdmin. */ + elevationLevel?: ElevationLevel; + /** The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. */ + linuxUserConfiguration?: LinuxUserConfiguration; + /** The Windows-specific user configuration for the user Account. This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options. */ + windowsUserConfiguration?: WindowsUserConfiguration; } -/** CertificateState enums */ -/** "active", "deleting", "deletefailed" */ -export type CertificateState = string; - -/** An error encountered by the Batch service when deleting a Certificate. */ -export interface DeleteCertificateError { - /** An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. */ - code?: string; - /** A message describing the Certificate deletion error, intended to be suitable for display in a user interface. */ - message?: string; - /** A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. */ - values?: NameValuePair[]; +/** Properties used to create a user Account on a Linux Compute Node. */ +export interface LinuxUserConfiguration { + /** The user ID of the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid. */ + uid?: number; + /** The group ID for the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid. */ + gid?: number; + /** The SSH private key for the user Account. The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done). */ + sshPrivateKey?: string; } -/** CertificateFormat enums */ -/** "pfx", "cer" */ -export type CertificateFormat = string; - -/** The result of listing the Certificates in the Account. */ -export interface CertificateListResult { - /** The list of Certificates. */ - value?: BatchCertificate[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +/** Properties used to create a user Account on a Windows Compute Node. */ +export interface WindowsUserConfiguration { + /** The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch' and for CloudServiceConfiguration Pools is 'interactive'. */ + loginMode?: LoginMode; } +/** LoginMode enums */ +/** "batch", "interactive" */ +export type LoginMode = string; + /** - * A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a - * specification used to create each Job. + * The Batch service does not assign any meaning to this metadata; it is solely + * for the use of user code. */ -export interface BatchJobSchedule { - /** A string that uniquely identifies the schedule within the Account. */ - readonly id?: string; - /** The display name for the schedule. */ - readonly displayName?: string; - /** The URL of the Job Schedule. */ - readonly url?: string; - /** The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. */ - readonly eTag?: string; - /** The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. */ - readonly lastModified?: Date; - /** The creation time of the Job Schedule. */ - readonly creationTime?: Date; - /** The current state of the Job Schedule. */ - readonly state?: JobScheduleState; - /** The time at which the Job Schedule entered the current state. */ - readonly stateTransitionTime?: Date; - /** The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. */ - readonly previousState?: JobScheduleState; - /** The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. */ - readonly previousStateTransitionTime?: Date; - /** The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. */ - schedule: Schedule; - /** The details of the Jobs to be created on this schedule. */ - jobSpecification: JobSpecification; - /** Information about Jobs that have been and will be run under this schedule. */ - readonly executionInfo?: JobScheduleExecutionInformation; - /** A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ - metadata?: MetadataItem[]; - /** The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ - readonly stats?: JobScheduleStatistics; +export interface MetadataItem { + /** The name of the metadata item. */ + name: string; + /** The value of the metadata item. */ + value: string; } -/** JobScheduleState enums */ -/** "active", "completed", "disabled", "terminating", "deleting" */ -export type JobScheduleState = string; +/** The file system to mount on each node. */ +export interface MountConfiguration { + /** The Azure Storage Container to mount using blob FUSE on each node. This property is mutually exclusive with all other properties. */ + azureBlobFileSystemConfiguration?: AzureBlobFileSystemConfiguration; + /** The NFS file system to mount on each node. This property is mutually exclusive with all other properties. */ + nfsMountConfiguration?: NfsMountConfiguration; + /** The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all other properties. */ + cifsMountConfiguration?: CifsMountConfiguration; + /** The Azure File Share to mount on each node. This property is mutually exclusive with all other properties. */ + azureFileShareConfiguration?: AzureFileShareConfiguration; +} -/** - * The schedule according to which Jobs will be created. All times are fixed - * respective to UTC and are not impacted by daylight saving time. - */ -export interface Schedule { - /** The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately. */ - doNotRunUntil?: Date; - /** A time after which no Job will be created under this Job Schedule. The schedule will move to the completed state as soon as this deadline is past and there is no active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it. */ - doNotRunAfter?: Date; - /** The time interval, starting from the time at which the schedule indicates a Job should be created, within which a Job must be created. If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - startWindow?: string; - /** The time interval between the start times of two successive Jobs under the Job Schedule. A Job Schedule can have at most one active Job under it at any given time. Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - recurrenceInterval?: string; +/** Information used to connect to an Azure Storage Container using Blobfuse. */ +export interface AzureBlobFileSystemConfiguration { + /** The Azure Storage Account name. */ + accountName: string; + /** The Azure Blob Storage Container name. */ + containerName: string; + /** The Azure Storage Account key. This property is mutually exclusive with both sasKey and identity; exactly one must be specified. */ + accountKey?: string; + /** The Azure Storage SAS token. This property is mutually exclusive with both accountKey and identity; exactly one must be specified. */ + sasKey?: string; + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ + blobfuseOptions?: string; + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ + relativeMountPath: string; + /** The reference to the user assigned identity to use to access containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one must be specified. */ + identityReference?: BatchNodeIdentityReference; } -/** Specifies details of the Jobs to be created on a schedule. */ -export interface JobSpecification { - /** The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. */ - priority?: number; - /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ - allowTaskPreemption?: boolean; - /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ - maxParallelTasks?: number; - /** The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ - displayName?: string; - /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ - usesTaskDependencies?: boolean; - /** The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ - onAllTasksComplete?: OnAllTasksComplete; - /** The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ - onTaskFailure?: OnTaskFailure; - /** The network configuration for the Job. */ - networkConfiguration?: JobNetworkConfiguration; - /** The execution constraints for Jobs created under this schedule. */ - constraints?: JobConstraints; - /** The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. */ - jobManagerTask?: JobManagerTask; - /** The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ - jobPreparationTask?: JobPreparationTask; - /** The Job Release Task for Jobs created under this schedule. The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task. */ - jobReleaseTask?: JobReleaseTask; - /** A list of common environment variable settings. These environment variables are set for all Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ - commonEnvironmentSettings?: EnvironmentSetting[]; - /** The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. */ - poolInfo: PoolInformation; - /** A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ - metadata?: MetadataItem[]; +/** Information used to connect to an NFS file system. */ +export interface NfsMountConfiguration { + /** The URI of the file system to mount. */ + source: string; + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ + relativeMountPath: string; + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ + mountOptions?: string; +} + +/** Information used to connect to a CIFS file system. */ +export interface CifsMountConfiguration { + /** The user to use for authentication against the CIFS file system. */ + username: string; + /** The URI of the file system to mount. */ + source: string; + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ + relativeMountPath: string; + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ + mountOptions?: string; + /** The password to use for authentication against the CIFS file system. */ + password: string; +} + +/** Information used to connect to an Azure Fileshare. */ +export interface AzureFileShareConfiguration { + /** The Azure Storage account name. */ + accountName: string; + /** The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. */ + azureFileUrl: string; + /** The Azure Storage account key. */ + accountKey: string; + /** The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. */ + relativeMountPath: string; + /** Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. */ + mountOptions?: string; } +/** NodeCommunicationMode enums */ +/** "default", "classic", "simplified" */ +export type NodeCommunicationMode = string; + /** * Contains information about Jobs that have been and will be run under a Job * Schedule. @@ -1952,455 +1787,518 @@ export interface BatchJobScheduleListResult { "odata.nextLink"?: string; } -/** Options for creating an Azure Batch Task. */ -export interface BatchTaskCreateOptions { - /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). */ - id: string; - /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ - displayName?: string; - /** How the Batch service should respond when the Task completes. */ - exitConditions?: ExitConditions; - /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ - commandLine: string; - /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ - containerSettings?: TaskContainerSettings; - /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ - resourceFiles?: ResourceFile[]; - /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ - outputFiles?: OutputFile[]; - /** A list of environment variable settings for the Task. */ - environmentSettings?: EnvironmentSetting[]; - /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ - affinityInfo?: AffinityInformation; - /** The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. */ - constraints?: TaskConstraints; - /** The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ - requiredSlots?: number; - /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ - userIdentity?: UserIdentity; - /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ - multiInstanceSettings?: MultiInstanceSettings; - /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. */ - dependsOn?: TaskDependencies; - /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ - applicationPackageReferences?: ApplicationPackageReference[]; - /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ - authenticationTokenSettings?: AuthenticationTokenSettings; -} - -/** Specifies how the Batch service should respond when the Task completes. */ -export interface ExitConditions { - /** A list of individual Task exit codes and how the Batch service should respond to them. */ - exitCodes?: ExitCodeMapping[]; - /** A list of Task exit code ranges and how the Batch service should respond to them. */ - exitCodeRanges?: ExitCodeRangeMapping[]; - /** How the Batch service should respond if the Task fails to start due to an error. */ - preProcessingError?: ExitOptions; - /** How the Batch service should respond if a file upload error occurs. If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence. */ - fileUploadError?: ExitOptions; - /** How the Batch service should respond if the Task fails with an exit condition not covered by any of the other properties. This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. */ - default?: ExitOptions; -} - -/** - * How the Batch service should respond if a Task exits with a particular exit - * code. - */ -export interface ExitCodeMapping { - /** A process exit code. */ - code: number; - /** How the Batch service should respond if the Task exits with this exit code. */ - exitOptions: ExitOptions; -} - -/** Specifies how the Batch service responds to a particular exit condition. */ -export interface ExitOptions { - /** An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - jobAction?: JobAction; - /** An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. */ - dependencyAction?: DependencyAction; -} - -/** JobAction enums */ -/** "none", "disable", "terminate" */ -export type JobAction = string; -/** DependencyAction enums */ -/** "satisfy", "block" */ -export type DependencyAction = string; - -/** - * A range of exit codes and how the Batch service should respond to exit codes - * within that range. - */ -export interface ExitCodeRangeMapping { - /** The first exit code in the range. */ - start: number; - /** The last exit code in the range. */ - end: number; - /** How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive). */ - exitOptions: ExitOptions; -} - /** - * A locality hint that can be used by the Batch service to select a Compute Node - * on which to start a Task. + * A Certificate that can be installed on Compute Nodes and can be used to + * authenticate operations on the machine. */ -export interface AffinityInformation { - /** An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ - affinityId: string; +export interface BatchCertificate { + /** The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). */ + thumbprint: string; + /** The algorithm used to derive the thumbprint. This must be sha1. */ + thumbprintAlgorithm: string; + /** The URL of the Certificate. */ + readonly url?: string; + /** The state of the Certificate. */ + readonly state?: CertificateState; + /** The time at which the Certificate entered its current state. */ + readonly stateTransitionTime?: Date; + /** The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. */ + readonly previousState?: CertificateState; + /** The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. */ + readonly previousStateTransitionTime?: Date; + /** The public part of the Certificate as a base-64 encoded .cer file. */ + readonly publicData?: Uint8Array; + /** The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. */ + readonly deleteCertificateError?: DeleteCertificateError; + /** The base64-encoded contents of the Certificate. The maximum size is 10KB. */ + data: Uint8Array; + /** The format of the Certificate data. */ + certificateFormat?: CertificateFormat; + /** The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. */ + password?: string; } -/** - * Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - * if any of the subtasks fail (for example due to exiting with a non-zero exit - * code) the entire multi-instance Task fails. The multi-instance Task is then - * terminated and retried, up to its retry limit. - */ -export interface MultiInstanceSettings { - /** The number of Compute Nodes required by the Task. If omitted, the default is 1. */ - numberOfInstances?: number; - /** The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. */ - coordinationCommandLine: string; - /** A list of files that the Batch service will download before running the coordination command line. The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ - commonResourceFiles?: ResourceFile[]; -} +/** CertificateState enums */ +/** "active", "deleting", "deletefailed" */ +export type CertificateState = string; -/** - * Specifies any dependencies of a Task. Any Task that is explicitly specified or - * within a dependency range must complete before the dependant Task will be - * scheduled. - */ -export interface TaskDependencies { - /** The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead. */ - taskIds?: string[]; - /** The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete successfully before the dependent Task can be scheduled. */ - taskIdRanges?: TaskIdRange[]; +/** An error encountered by the Batch service when deleting a Certificate. */ +export interface DeleteCertificateError { + /** An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Certificate deletion error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. */ + values?: NameValuePair[]; } -/** - * The start and end of the range are inclusive. For example, if a range has start - * 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. - */ -export interface TaskIdRange { - /** The first Task ID in the range. */ - start: number; - /** The last Task ID in the range. */ - end: number; -} +/** CertificateFormat enums */ +/** "pfx", "cer" */ +export type CertificateFormat = string; -/** The result of listing the Tasks in a Job. */ -export interface BatchTaskListResult { - /** The list of Tasks. */ - value?: BatchTask[]; +/** The result of listing the Certificates in the Account. */ +export interface CertificateListResult { + /** The list of Certificates. */ + value?: BatchCertificate[]; /** The URL to get the next set of results. */ "odata.nextLink"?: string; } -/** - * Batch will retry Tasks when a recovery operation is triggered on a Node. - * Examples of recovery operations include (but are not limited to) when an - * unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - * Retries due to recovery operations are independent of and are not counted - * against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - * retry due to a recovery operation may occur. Because of this, all Tasks should - * be idempotent. This means Tasks need to tolerate being interrupted and - * restarted without causing any corruption or duplicate data. The best practice - * for long running Tasks is to use some form of checkpointing. - */ -export interface BatchTask { - /** A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. */ +/** An Azure Batch Job. */ +export interface BatchJob { + /** A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ readonly id?: string; - /** A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + /** The display name for the Job. */ readonly displayName?: string; - /** The URL of the Task. */ + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ + readonly usesTaskDependencies?: boolean; + /** The URL of the Job. */ readonly url?: string; - /** The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. */ + /** The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */ readonly eTag?: string; - /** The last modified time of the Task. */ + /** The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. */ readonly lastModified?: Date; - /** The creation time of the Task. */ + /** The creation time of the Job. */ readonly creationTime?: Date; - /** How the Batch service should respond when the Task completes. */ - readonly exitConditions?: ExitConditions; - /** The current state of the Task. */ - readonly state?: TaskState; - /** The time at which the Task entered its current state. */ + /** The current state of the Job. */ + readonly state?: JobState; + /** The time at which the Job entered its current state. */ readonly stateTransitionTime?: Date; - /** The previous state of the Task. This property is not set if the Task is in its initial Active state. */ - readonly previousState?: TaskState; - /** The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. */ + /** The previous state of the Job. This property is not set if the Job is in its initial Active state. */ + readonly previousState?: JobState; + /** The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. */ readonly previousStateTransitionTime?: Date; - /** The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). */ - readonly commandLine?: string; - /** The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. */ - readonly containerSettings?: TaskContainerSettings; - /** A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. */ - readonly resourceFiles?: ResourceFile[]; - /** A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. */ - readonly outputFiles?: OutputFile[]; - /** A list of environment variable settings for the Task. */ - readonly environmentSettings?: EnvironmentSetting[]; - /** A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. */ - readonly affinityInfo?: AffinityInformation; - /** The execution constraints that apply to this Task. */ - constraints?: TaskConstraints; - /** The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. */ - readonly requiredSlots?: number; - /** The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. */ - readonly userIdentity?: UserIdentity; - /** Information about the execution of the Task. */ - readonly executionInfo?: TaskExecutionInformation; - /** Information about the Compute Node on which the Task ran. */ - readonly nodeInfo?: BatchNodeInformation; - /** An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. */ - readonly multiInstanceSettings?: MultiInstanceSettings; - /** Resource usage statistics for the Task. */ - readonly stats?: TaskStatistics; - /** The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. */ - readonly dependsOn?: TaskDependencies; - /** A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. */ - readonly applicationPackageReferences?: ApplicationPackageReference[]; - /** The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. */ - readonly authenticationTokenSettings?: AuthenticationTokenSettings; + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The execution constraints for the Job. */ + constraints?: JobConstraints; + /** Details of a Job Manager Task to be launched when the Job is started. */ + readonly jobManagerTask?: JobManagerTask; + /** The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. */ + readonly jobPreparationTask?: JobPreparationTask; + /** The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. */ + readonly jobReleaseTask?: JobReleaseTask; + /** The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ + readonly commonEnvironmentSettings?: EnvironmentSetting[]; + /** The Pool settings associated with the Job. */ + poolInfo: PoolInformation; + /** The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. */ + onAllTasksComplete?: OnAllTasksComplete; + /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + readonly onTaskFailure?: OnTaskFailure; + /** The network configuration for the Job. */ + readonly networkConfiguration?: JobNetworkConfiguration; + /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; + /** The execution information for the Job. */ + readonly executionInfo?: JobExecutionInformation; + /** Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + readonly stats?: JobStatistics; } -/** TaskState enums */ -/** "active", "preparing", "running", "completed" */ -export type TaskState = string; +/** JobState enums */ +/** "active", "disabling", "disabled", "enabling", "terminating", "completed", "deleting" */ +export type JobState = string; -/** Information about the execution of a Task. */ -export interface TaskExecutionInformation { - /** The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state. */ - startTime?: Date; - /** The time at which the Task completed. This property is set only if the Task is in the Completed state. */ +/** Contains information about the execution of a Job in the Azure Batch service. */ +export interface JobExecutionInformation { + /** The start time of the Job. This is the time at which the Job was created. */ + startTime: Date; + /** The completion time of the Job. This property is set only if the Job is in the completed state. */ endTime?: Date; - /** The exit code of the program specified on the Task command line. This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ - exitCode?: number; - /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformation; - /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ - failureInfo?: TaskFailureInformation; - /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ - retryCount: number; - /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ - lastRetryTime?: Date; - /** The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons. */ - requeueCount: number; - /** The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. */ - lastRequeueTime?: Date; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ - result?: TaskExecutionResult; + /** The ID of the Pool to which this Job is assigned. This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool. */ + poolId?: string; + /** Details of any error encountered by the service in starting the Job. This property is not set if there was no error starting the Job. */ + schedulingError?: JobSchedulingError; + /** A string describing the reason the Job ended. This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation. */ + terminateReason?: string; } -/** Information about the Compute Node on which a Task ran. */ -export interface BatchNodeInformation { - /** An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. */ - affinityId?: string; - /** The URL of the Compute Node on which the Task ran. */ - nodeUrl?: string; - /** The ID of the Pool on which the Task ran. */ - poolId?: string; - /** The ID of the Compute Node on which the Task ran. */ - nodeId?: string; - /** The root directory of the Task on the Compute Node. */ - taskRootDirectory?: string; - /** The URL to the root directory of the Task on the Compute Node. */ - taskRootDirectoryUrl?: string; +/** An error encountered by the Batch service when scheduling a Job. */ +export interface JobSchedulingError { + /** The category of the Job scheduling error. */ + category: ErrorCategory; + /** An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the Job scheduling error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the scheduling error. */ + details?: NameValuePair[]; } -/** Resource usage statistics for a Task. */ -export interface TaskStatistics { +/** Resource usage statistics for a Job. */ +export interface JobStatistics { /** The URL of the statistics. */ url: string; /** The start time of the time range covered by the statistics. */ startTime: Date; /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ lastUpdateTime: Date; - /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ + /** The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ userCPUTime: string; - /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. */ + /** The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. */ kernelCPUTime: string; - /** The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. */ + /** The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. */ wallClockTime: string; - /** The total number of disk read operations made by the Task. */ + /** The total number of disk read operations made by all Tasks in the Job. */ readIOps: number; - /** The total number of disk write operations made by the Task. */ + /** The total number of disk write operations made by all Tasks in the Job. */ writeIOps: number; - /** The total gibibytes read from disk by the Task. */ + /** The total amount of data in GiB read from disk by all Tasks in the Job. */ readIOGiB: number; - /** The total gibibytes written to disk by the Task. */ + /** The total amount of data in GiB written to disk by all Tasks in the Job. */ writeIOGiB: number; - /** The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). */ + /** The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. */ + numSucceededTasks: number; + /** The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. */ + numFailedTasks: number; + /** The total number of retries on all the Tasks in the Job during the given time range. */ + numTaskRetries: number; + /** The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. */ waitTime: string; } -/** A collection of Azure Batch Tasks to add. */ -export interface BatchTaskCollection { - /** The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. */ - value: BatchTaskCreateOptions[]; +/** Options for updating an Azure Batch Job. */ +export interface BatchJobUpdateOptions { + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. */ + constraints?: JobConstraints; + /** The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. */ + poolInfo?: PoolInformation; + /** The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + onAllTasksComplete?: OnAllTasksComplete; + /** A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. */ + metadata?: MetadataItem[]; } -/** The result of adding a collection of Tasks to a Job. */ -export interface TaskAddCollectionResult { - /** The results of the add Task collection operation. */ - value?: TaskAddResult[]; +/** Options for disabling an Azure Batch Job. */ +export interface BatchJobDisableOptions { + /** What to do with active Tasks associated with the Job. */ + disableTasks: DisableJobOption; } -/** Result for a single Task added as part of an add Task collection operation. */ -export interface TaskAddResult { - /** The status of the add Task request. */ - status: TaskAddStatus; - /** The ID of the Task for which this is the result. */ - taskId: string; - /** The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. */ - eTag?: string; - /** The last modified time of the Task. */ - lastModified?: Date; - /** The URL of the Task, if the Task was successfully added. */ - location?: string; - /** The error encountered while attempting to add the Task. */ - error?: BatchError; +/** DisableJobOption enums */ +/** "requeue", "terminate", "wait" */ +export type DisableJobOption = string; + +/** Options for terminating an Azure Batch Job. */ +export interface BatchJobTerminateOptions { + /** The text you want to appear as the Job's TerminateReason. The default is 'UserTerminate'. */ + terminateReason?: string; +} + +/** Options for creating an Azure Batch Job. */ +export interface BatchJobCreateOptions { + /** A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + id: string; + /** The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** Whether Tasks in the Job can define dependencies on each other. The default is false. */ + usesTaskDependencies?: boolean; + /** The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. */ + priority?: number; + /** Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. */ + allowTaskPreemption?: boolean; + /** The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. */ + maxParallelTasks?: number; + /** The execution constraints for the Job. */ + constraints?: JobConstraints; + /** Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. */ + jobManagerTask?: JobManagerTask; + /** The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. */ + jobPreparationTask?: JobPreparationTask; + /** The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. */ + jobReleaseTask?: JobReleaseTask; + /** The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. */ + commonEnvironmentSettings?: EnvironmentSetting[]; + /** The Pool on which the Batch service runs the Job's Tasks. */ + poolInfo: PoolInformation; + /** The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. */ + onAllTasksComplete?: OnAllTasksComplete; + /** The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. */ + onTaskFailure?: OnTaskFailure; + /** The network configuration for the Job. */ + networkConfiguration?: JobNetworkConfiguration; + /** A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; +} + +/** The result of listing the Jobs in an Account. */ +export interface BatchJobListResult { + /** The list of Jobs. */ + value?: BatchJob[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** + * The result of listing the status of the Job Preparation and Job Release Tasks + * for a Job. + */ +export interface BatchJobListPreparationAndReleaseTaskStatusResult { + /** A list of Job Preparation and Job Release Task execution information. */ + value?: JobPreparationAndReleaseTaskExecutionInformation[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** The status of the Job Preparation and Job Release Tasks on a Compute Node. */ +export interface JobPreparationAndReleaseTaskExecutionInformation { + /** The ID of the Pool containing the Compute Node to which this entry refers. */ + poolId?: string; + /** The ID of the Compute Node to which this entry refers. */ + nodeId?: string; + /** The URL of the Compute Node to which this entry refers. */ + nodeUrl?: string; + /** Information about the execution status of the Job Preparation Task on this Compute Node. */ + jobPreparationTaskExecutionInfo?: JobPreparationTaskExecutionInformation; + /** Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. */ + jobReleaseTaskExecutionInfo?: JobReleaseTaskExecutionInformation; +} + +/** + * Contains information about the execution of a Job Preparation Task on a Compute + * Node. + */ +export interface JobPreparationTaskExecutionInformation { + /** The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. */ + startTime: Date; + /** The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. */ + endTime?: Date; + /** The current state of the Job Preparation Task on the Compute Node. */ + state: JobPreparationTaskState; + /** The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ + taskRootDirectory?: string; + /** The URL to the root directory of the Job Preparation Task on the Compute Node. */ + taskRootDirectoryUrl?: string; + /** The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. */ + exitCode?: number; + /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ + containerInfo?: TaskContainerExecutionInformation; + /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ + failureInfo?: TaskFailureInformation; + /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ + retryCount: number; + /** The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ + lastRetryTime?: Date; + /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ + result?: TaskExecutionResult; } -/** TaskAddStatus enums */ -/** "Success", "clienterror", "servererror" */ -export type TaskAddStatus = string; - -/** The result of listing the subtasks of a Task. */ -export interface BatchTaskListSubtasksResult { - /** The list of subtasks. */ - value?: SubtaskInformation[]; -} +/** JobPreparationTaskState enums */ +/** "running", "completed" */ +export type JobPreparationTaskState = string; -/** Information about an Azure Batch subtask. */ -export interface SubtaskInformation { - /** The ID of the subtask. */ - id?: number; - /** Information about the Compute Node on which the subtask ran. */ - nodeInfo?: BatchNodeInformation; - /** The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running. */ - startTime?: Date; - /** The time at which the subtask completed. This property is set only if the subtask is in the Completed state. */ +/** + * Contains information about the execution of a Job Release Task on a Compute + * Node. + */ +export interface JobReleaseTaskExecutionInformation { + /** The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. */ + startTime: Date; + /** The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. */ endTime?: Date; - /** The exit code of the program specified on the subtask command line. This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ + /** The current state of the Job Release Task on the Compute Node. */ + state: JobReleaseTaskState; + /** The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. */ + taskRootDirectory?: string; + /** The URL to the root directory of the Job Release Task on the Compute Node. */ + taskRootDirectoryUrl?: string; + /** The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. */ exitCode?: number; /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ containerInfo?: TaskContainerExecutionInformation; /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ failureInfo?: TaskFailureInformation; - /** The current state of the subtask. */ - state?: SubtaskState; - /** The time at which the subtask entered its current state. */ - stateTransitionTime?: Date; - /** The previous state of the subtask. This property is not set if the subtask is in its initial running state. */ - previousState?: SubtaskState; - /** The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. */ - previousStateTransitionTime?: Date; /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ result?: TaskExecutionResult; } -/** SubtaskState enums */ -/** "preparing", "running", "completed" */ -export type SubtaskState = string; +/** JobReleaseTaskState enums */ +/** "running", "completed" */ +export type JobReleaseTaskState = string; -/** - * The result of listing the files on a Compute Node, or the files associated with - * a Task on a Compute Node. - */ -export interface NodeFileListResult { - /** The list of files. */ - value?: NodeFile[]; +/** The Task and TaskSlot counts for a Job. */ +export interface TaskCountsResult { + /** The number of Tasks per state. */ + taskCounts: TaskCounts; + /** The number of TaskSlots required by Tasks per state. */ + taskSlotCounts: TaskSlotCounts; +} + +/** The Task counts for a Job. */ +export interface TaskCounts { + /** The number of Tasks in the active state. */ + active: number; + /** The number of Tasks in the running or preparing state. */ + running: number; + /** The number of Tasks in the completed state. */ + completed: number; + /** The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'. */ + succeeded: number; + /** The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'. */ + failed: number; +} + +/** The TaskSlot counts for a Job. */ +export interface TaskSlotCounts { + /** The number of TaskSlots for active Tasks. */ + active: number; + /** The number of TaskSlots for running Tasks. */ + running: number; + /** The number of TaskSlots for completed Tasks. */ + completed: number; + /** The number of TaskSlots for succeeded Tasks. */ + succeeded: number; + /** The number of TaskSlots for failed Tasks. */ + failed: number; +} + +/** The result of listing the supported Virtual Machine Images. */ +export interface AccountListSupportedImagesResult { + /** The list of supported Virtual Machine Images. */ + value?: ImageInformation[]; /** The URL to get the next set of results. */ "odata.nextLink"?: string; } -/** Information about a file or directory on a Compute Node. */ -export interface NodeFile { - /** The file path. */ - name?: string; - /** The URL of the file. */ - url?: string; - /** Whether the object represents a directory. */ - isDirectory?: boolean; - /** The file properties. */ - properties?: FileProperties; +/** + * A reference to the Azure Virtual Machines Marketplace Image and additional + * information about the Image. + */ +export interface ImageInformation { + /** The ID of the Compute Node agent SKU which the Image supports. */ + nodeAgentSKUId: string; + /** The reference to the Azure Virtual Machine's Marketplace Image. */ + imageReference: ImageReference; + /** The type of operating system (e.g. Windows or Linux) of the Image. */ + osType: OSType; + /** The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. */ + capabilities?: string[]; + /** The time when the Azure Batch service will stop accepting create Pool requests for the Image. */ + batchSupportEndOfLife?: Date; + /** Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. */ + verificationType: VerificationType; } -/** The properties of a file on a Compute Node. */ -export interface FileProperties { - /** The file creation time. The creation time is not returned for files on Linux Compute Nodes. */ - creationTime?: Date; - /** The time at which the file was last modified. */ - lastModified: Date; - /** The length of the file. */ - contentLength: number; - /** The content type of the file. */ - contentType?: string; - /** The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes. */ - fileMode?: string; +/** OSType enums */ +/** "linux", "windows" */ +export type OSType = string; +/** VerificationType enums */ +/** "verified", "unverified" */ +export type VerificationType = string; + +/** The result of listing the Compute Node counts in the Account. */ +export interface PoolNodeCountsListResult { + /** A list of Compute Node counts by Pool. */ + value?: PoolNodeCounts[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; } -/** Options for creating a user account for RDP or SSH access on an Azure Batch Compute Node. */ -export interface BatchNodeUserCreateOptions { - /** The user name of the Account. */ - name: string; - /** Whether the Account should be an administrator on the Compute Node. The default value is false. */ - isAdmin?: boolean; - /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ - expiryTime?: Date; - /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. */ - password?: string; - /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ - sshPublicKey?: string; +/** The number of Compute Nodes in each state for a Pool. */ +export interface PoolNodeCounts { + /** The ID of the Pool. */ + poolId: string; + /** The number of dedicated Compute Nodes in each state. */ + dedicated?: NodeCounts; + /** The number of Spot/Low-priority Compute Nodes in each state. */ + lowPriority?: NodeCounts; } -/** Options for updating a user account for RDP or SSH access on an Azure Batch Compute Node. */ -export interface BatchNodeUserUpdateOptions { - /** The password of the Account. The password is required for Windows Compute Nodes (those created with 'cloudServiceConfiguration', or created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. */ - password?: string; - /** The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. */ - expiryTime?: Date; - /** The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. */ - sshPublicKey?: string; +/** The number of Compute Nodes in each Compute Node state. */ +export interface NodeCounts { + /** The number of Compute Nodes in the creating state. */ + creating: number; + /** The number of Compute Nodes in the idle state. */ + idle: number; + /** The number of Compute Nodes in the offline state. */ + offline: number; + /** The number of Compute Nodes in the preempted state. */ + preempted: number; + /** The count of Compute Nodes in the rebooting state. */ + rebooting: number; + /** The number of Compute Nodes in the reimaging state. */ + reimaging: number; + /** The number of Compute Nodes in the running state. */ + running: number; + /** The number of Compute Nodes in the starting state. */ + starting: number; + /** The number of Compute Nodes in the startTaskFailed state. */ + startTaskFailed: number; + /** The number of Compute Nodes in the leavingPool state. */ + leavingPool: number; + /** The number of Compute Nodes in the unknown state. */ + unknown: number; + /** The number of Compute Nodes in the unusable state. */ + unusable: number; + /** The number of Compute Nodes in the waitingForStartTask state. */ + waitingForStartTask: number; + /** The total number of Compute Nodes. */ + total: number; } -/** A Compute Node in the Batch service. */ -export interface BatchNode { - /** The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. */ - id?: string; - /** The URL of the Compute Node. */ - url?: string; - /** The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. */ - state?: BatchNodeState; - /** Whether the Compute Node is available for Task scheduling. */ - schedulingState?: SchedulingState; - /** The time at which the Compute Node entered its current state. */ - stateTransitionTime?: Date; - /** The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. */ - lastBootTime?: Date; - /** The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. */ - allocationTime?: Date; - /** The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. */ - ipAddress?: string; - /** An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. */ - affinityId?: string; - /** The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ - vmSize?: string; - /** The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ - totalTasksRun?: number; - /** The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ - runningTasksCount?: number; - /** The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ - runningTaskSlotsCount?: number; - /** The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. */ - totalTasksSucceeded?: number; - /** A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. */ - recentTasks?: TaskInformation[]; - /** The Task specified to run on the Compute Node as it joins the Pool. */ +/** The result of a listing the usage metrics for an Account. */ +export interface PoolListUsageMetricsResult { + /** The Pool usage metrics data. */ + value?: PoolUsageMetrics[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; +} + +/** Usage metrics for a Pool across an aggregation interval. */ +export interface PoolUsageMetrics { + /** The ID of the Pool whose metrics are aggregated in this entry. */ + poolId: string; + /** The start time of the aggregation interval covered by this entry. */ + startTime: Date; + /** The end time of the aggregation interval covered by this entry. */ + endTime: Date; + /** The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + vmSize: string; + /** The total core hours used in the Pool during this aggregation interval. */ + totalCoreHours: number; +} + +/** Options for creating an Azure Batch Pool. */ +export interface BatchPoolCreateOptions { + /** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). */ + id: string; + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + displayName?: string; + /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines for Cloud Services Pools (pools created with cloudServiceConfiguration), see Sizes for Cloud Services (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and A2V2. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). */ + vmSize: string; + /** The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ + cloudServiceConfiguration?: CloudServiceConfiguration; + /** The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. */ + virtualMachineConfiguration?: VirtualMachineConfiguration; + /** The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + resizeTimeout?: string; + /** The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ + targetDedicatedNodes?: number; + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. */ + targetLowPriorityNodes?: number; + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */ + enableAutoScale?: boolean; + /** A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). */ + autoScaleFormula?: string; + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + autoScaleEvaluationInterval?: string; + /** Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. */ + enableInterNodeCommunication?: boolean; + /** The network configuration for the Pool. */ + networkConfiguration?: NetworkConfiguration; + /** A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. */ startTask?: StartTask; - /** Runtime information about the execution of the StartTask on the Compute Node. */ - startTaskInfo?: StartTaskInformation; /** * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. @@ -2408,224 +2306,326 @@ export interface BatchNode { * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. */ certificateReferences?: CertificateReference[]; - /** The list of errors that are currently being encountered by the Compute Node. */ - errors?: BatchNodeError[]; - /** Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. */ - isDedicated?: boolean; - /** The endpoint configuration for the Compute Node. */ - endpointConfiguration?: BatchNodeEndpointConfiguration; - /** Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. */ - nodeAgentInfo?: NodeAgentInformation; - /** Info about the current state of the virtual machine. */ - virtualMachineInfo?: VirtualMachineInfo; + /** The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. */ + applicationLicenses?: string[]; + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ + taskSlotsPerNode?: number; + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + taskSchedulingPolicy?: TaskSchedulingPolicy; + /** The list of user Accounts to be created on each Compute Node in the Pool. */ + userAccounts?: UserAccount[]; + /** A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. */ + metadata?: MetadataItem[]; + /** Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. */ + mountConfiguration?: MountConfiguration[]; + /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + targetNodeCommunicationMode?: NodeCommunicationMode; } -/** BatchNodeState enums */ -/** "idle", "rebooting", "reimaging", "running", "unusable", "creating", "starting", "waitingforstarttask", "starttaskfailed", "unknown", "leavingpool", "offline", "preempted" */ -export type BatchNodeState = string; -/** SchedulingState enums */ -/** "enabled", "disabled" */ -export type SchedulingState = string; - -/** Information about a Task running on a Compute Node. */ -export interface TaskInformation { - /** The URL of the Task. */ - taskUrl?: string; - /** The ID of the Job to which the Task belongs. */ - jobId?: string; - /** The ID of the Task. */ - taskId?: string; - /** The ID of the subtask if the Task is a multi-instance Task. */ - subtaskId?: number; - /** The current state of the Task. */ - taskState: TaskState; - /** Information about the execution of the Task. */ - executionInfo?: TaskExecutionInformation; +/** The result of listing the Pools in an Account. */ +export interface BatchPoolListResult { + /** The list of Pools. */ + value?: BatchPool[]; + /** The URL to get the next set of results. */ + "odata.nextLink"?: string; } -/** Information about a StartTask running on a Compute Node. */ -export interface StartTaskInformation { - /** The state of the StartTask on the Compute Node. */ - state: StartTaskState; - /** The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). */ - startTime: Date; - /** The time at which the StartTask stopped running. This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running. */ - endTime?: Date; - /** The exit code of the program specified on the StartTask command line. This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. */ - exitCode?: number; - /** Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. */ - containerInfo?: TaskContainerExecutionInformation; - /** Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. */ - failureInfo?: TaskFailureInformation; - /** The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. */ - retryCount: number; - /** The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. */ - lastRetryTime?: Date; - /** The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. */ - result?: TaskExecutionResult; +/** A Pool in the Azure Batch service. */ +export interface BatchPool { + /** A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). */ + readonly id?: string; + /** The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. */ + readonly displayName?: string; + /** The URL of the Pool. */ + readonly url?: string; + /** The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. */ + readonly eTag?: string; + /** The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. */ + readonly lastModified?: Date; + /** The creation time of the Pool. */ + readonly creationTime?: Date; + /** The current state of the Pool. */ + readonly state?: PoolState; + /** The time at which the Pool entered its current state. */ + readonly stateTransitionTime?: Date; + /** Whether the Pool is resizing. */ + readonly allocationState?: AllocationState; + /** The time at which the Pool entered its current allocation state. */ + readonly allocationStateTransitionTime?: Date; + /** The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). */ + readonly vmSize?: string; + /** The cloud service configuration for the Pool. This property and virtualMachineConfiguration are mutually exclusive and one of the properties must be specified. This property cannot be specified if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. */ + readonly cloudServiceConfiguration?: CloudServiceConfiguration; + /** The virtual machine configuration for the Pool. This property and cloudServiceConfiguration are mutually exclusive and one of the properties must be specified. */ + readonly virtualMachineConfiguration?: VirtualMachineConfiguration; + /** The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. */ + readonly resizeTimeout?: string; + /** A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. */ + readonly resizeErrors?: ResizeError[]; + /** The number of dedicated Compute Nodes currently in the Pool. */ + readonly currentDedicatedNodes?: number; + /** The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. */ + readonly currentLowPriorityNodes?: number; + /** The desired number of dedicated Compute Nodes in the Pool. */ + readonly targetDedicatedNodes?: number; + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ + readonly targetLowPriorityNodes?: number; + /** Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. */ + readonly enableAutoScale?: boolean; + /** A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleFormula?: string; + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleEvaluationInterval?: string; + /** The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. */ + readonly autoScaleRun?: AutoScaleRun; + /** Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. */ + readonly enableInterNodeCommunication?: boolean; + /** The network configuration for the Pool. */ + readonly networkConfiguration?: NetworkConfiguration; + /** A Task specified to run on each Compute Node as it joins the Pool. */ + startTask?: StartTask; + /** + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + readonly certificateReferences?: CertificateReference[]; + /** The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. */ + readonly applicationPackageReferences?: ApplicationPackageReference[]; + /** The list of application licenses the Batch service will make available on each Compute Node in the Pool. The list of application licenses must be a subset of available Batch service application licenses. If a license is requested which is not supported, Pool creation will fail. */ + readonly applicationLicenses?: string[]; + /** The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. */ + readonly taskSlotsPerNode?: number; + /** How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. */ + readonly taskSchedulingPolicy?: TaskSchedulingPolicy; + /** The list of user Accounts to be created on each Compute Node in the Pool. */ + readonly userAccounts?: UserAccount[]; + /** A list of name-value pairs associated with the Pool as metadata. */ + readonly metadata?: MetadataItem[]; + /** Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. */ + readonly stats?: PoolStatistics; + /** A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. */ + readonly mountConfiguration?: MountConfiguration[]; + /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + readonly identity?: BatchPoolIdentity; + /** The desired node communication mode for the pool. If omitted, the default value is Default. */ + targetNodeCommunicationMode?: NodeCommunicationMode; + /** The current state of the pool communication mode. */ + readonly currentNodeCommunicationMode?: NodeCommunicationMode; } -/** StartTaskState enums */ -/** "running", "completed" */ -export type StartTaskState = string; +/** PoolState enums */ +/** "active", "deleting" */ +export type PoolState = string; +/** AllocationState enums */ +/** "steady", "resizing", "stopping" */ +export type AllocationState = string; -/** An error encountered by a Compute Node. */ -export interface BatchNodeError { - /** An identifier for the Compute Node error. Codes are invariant and are intended to be consumed programmatically. */ +/** An error that occurred when resizing a Pool. */ +export interface ResizeError { + /** An identifier for the Pool resize error. Codes are invariant and are intended to be consumed programmatically. */ code?: string; - /** A message describing the Compute Node error, intended to be suitable for display in a user interface. */ + /** A message describing the Pool resize error, intended to be suitable for display in a user interface. */ message?: string; - /** The list of additional error details related to the Compute Node error. */ - errorDetails?: NameValuePair[]; -} - -/** The endpoint configuration for the Compute Node. */ -export interface BatchNodeEndpointConfiguration { - /** The list of inbound endpoints that are accessible on the Compute Node. */ - inboundEndpoints: InboundEndpoint[]; -} - -/** An inbound endpoint on a Compute Node. */ -export interface InboundEndpoint { - /** The name of the endpoint. */ - name: string; - /** The protocol of the endpoint. */ - protocol: InboundEndpointProtocol; - /** The public IP address of the Compute Node. */ - publicIPAddress?: string; - /** The public fully qualified domain name for the Compute Node. */ - publicFQDN?: string; - /** The public port number of the endpoint. */ - frontendPort: number; - /** The backend port number of the endpoint. */ - backendPort: number; -} - -/** - * The Batch Compute Node agent is a program that runs on each Compute Node in the - * Pool and provides Batch capability on the Compute Node. - */ -export interface NodeAgentInformation { - /** The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. */ - version: string; - /** The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version. */ - lastUpdateTime: Date; + /** A list of additional error details related to the Pool resize error. */ + values?: NameValuePair[]; } -/** Info about the current state of the virtual machine. */ -export interface VirtualMachineInfo { - /** The reference to the Azure Virtual Machine's Marketplace Image. */ - imageReference?: ImageReference; +/** The results and errors from an execution of a Pool autoscale formula. */ +export interface AutoScaleRun { + /** The time at which the autoscale formula was last evaluated. */ + timestamp: Date; + /** The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form $variable=value, and variables are separated by semicolons. */ + results?: string; + /** Details of the error encountered evaluating the autoscale formula on the Pool, if the evaluation was unsuccessful. */ + error?: AutoScaleRunError; } -/** Options for rebooting an Azure Batch Compute Node. */ -export interface NodeRebootOptions { - /** When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. */ - nodeRebootOption?: BatchNodeRebootOption; +/** An error that occurred when executing or evaluating a Pool autoscale formula. */ +export interface AutoScaleRunError { + /** An identifier for the autoscale error. Codes are invariant and are intended to be consumed programmatically. */ + code?: string; + /** A message describing the autoscale error, intended to be suitable for display in a user interface. */ + message?: string; + /** A list of additional error details related to the autoscale error. */ + values?: NameValuePair[]; } -/** BatchNodeRebootOption enums */ -/** "requeue", "terminate", "taskcompletion", "retaineddata" */ -export type BatchNodeRebootOption = string; +/** Contains utilization and resource usage statistics for the lifetime of a Pool. */ +export interface PoolStatistics { + /** The URL for the statistics. */ + url: string; + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** Statistics related to Pool usage, such as the amount of core-time used. */ + usageStats?: UsageStatistics; + /** Statistics related to resource consumption by Compute Nodes in the Pool. */ + resourceStats?: ResourceStatistics; +} -/** Options for reimaging an Azure Batch Compute Node. */ -export interface NodeReimageOptions { - /** When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. */ - nodeReimageOption?: BatchNodeReimageOption; +/** Statistics related to Pool usage information. */ +export interface UsageStatistics { + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. */ + dedicatedCoreTime: string; } -/** BatchNodeReimageOption enums */ -/** "requeue", "terminate", "taskcompletion", "retaineddata" */ -export type BatchNodeReimageOption = string; +/** Statistics related to resource consumption by Compute Nodes in a Pool. */ +export interface ResourceStatistics { + /** The start time of the time range covered by the statistics. */ + startTime: Date; + /** The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. */ + lastUpdateTime: Date; + /** The average CPU usage across all Compute Nodes in the Pool (percentage per node). */ + avgCPUPercentage: number; + /** The average memory usage in GiB across all Compute Nodes in the Pool. */ + avgMemoryGiB: number; + /** The peak memory usage in GiB across all Compute Nodes in the Pool. */ + peakMemoryGiB: number; + /** The average used disk space in GiB across all Compute Nodes in the Pool. */ + avgDiskGiB: number; + /** The peak used disk space in GiB across all Compute Nodes in the Pool. */ + peakDiskGiB: number; + /** The total number of disk read operations across all Compute Nodes in the Pool. */ + diskReadIOps: number; + /** The total number of disk write operations across all Compute Nodes in the Pool. */ + diskWriteIOps: number; + /** The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. */ + diskReadGiB: number; + /** The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. */ + diskWriteGiB: number; + /** The total amount of data in GiB of network reads across all Compute Nodes in the Pool. */ + networkReadGiB: number; + /** The total amount of data in GiB of network writes across all Compute Nodes in the Pool. */ + networkWriteGiB: number; +} -/** Options for disabling scheduling on an Azure Batch Compute Node. */ -export interface NodeDisableSchedulingOptions { - /** What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. */ - nodeDisableSchedulingOption?: DisableBatchNodeSchedulingOption; +/** The identity of the Batch pool, if configured. */ +export interface BatchPoolIdentity { + /** The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + type: PoolIdentityType; + /** The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. */ + userAssignedIdentities?: UserAssignedIdentity[]; } -/** DisableBatchNodeSchedulingOption enums */ -/** "requeue", "terminate", "taskcompletion" */ -export type DisableBatchNodeSchedulingOption = string; +/** PoolIdentityType enums */ +/** "UserAssigned", "None" */ +export type PoolIdentityType = string; -/** The remote login settings for a Compute Node. */ -export interface BatchNodeRemoteLoginSettingsResult { - /** The IP address used for remote login to the Compute Node. */ - remoteLoginIPAddress: string; - /** The port used for remote login to the Compute Node. */ - remoteLoginPort: number; +/** The user assigned Identity */ +export interface UserAssignedIdentity { + /** The ARM resource id of the user assigned identity. */ + resourceId: string; + /** The client id of the user assigned identity. */ + readonly clientId?: string; + /** The principal id of the user assigned identity. */ + readonly principalId?: string; } -/** The Azure Batch service log files upload options for a Compute Node. */ -export interface UploadBatchServiceLogsOptions { - /** The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. */ - containerUrl: string; - /** The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. */ - startTime: Date; - /** The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. */ - endTime?: Date; - /** The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. */ - identityReference?: BatchNodeIdentityReference; +/** Options for updating an Azure Batch Pool. */ +export interface BatchPoolUpdateOptions { + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. */ + startTask?: StartTask; + /** + * If this element is present, it replaces any existing Certificate references configured on the Pool. + * If omitted, any existing Certificate references are left unchanged. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + certificateReferences?: CertificateReference[]; + /** A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. */ + applicationPackageReferences?: ApplicationPackageReference[]; + /** A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. */ + metadata?: MetadataItem[]; + /** The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. */ + targetNodeCommunicationMode?: NodeCommunicationMode; } -/** The result of uploading Batch service log files from a specific Compute Node. */ -export interface UploadBatchServiceLogsResult { - /** The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. */ - virtualDirectoryName: string; - /** The number of log files which will be uploaded. */ - numberOfFilesUploaded: number; +/** Options for enabling automatic scaling on an Azure Batch Pool. */ +export interface BatchPoolEnableAutoScaleOptions { + /** The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + autoScaleFormula?: string; + /** The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. */ + autoScaleEvaluationInterval?: string; } -/** The result of listing the Compute Nodes in a Pool. */ -export interface BatchNodeListResult { - /** The list of Compute Nodes. */ - value?: BatchNode[]; - /** The URL to get the next set of results. */ - "odata.nextLink"?: string; +/** Options for evaluating an automatic scaling formula on an Azure Batch Pool. */ +export interface BatchPoolEvaluateAutoScaleOptions { + /** The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). */ + autoScaleFormula: string; } -/** The configuration for virtual machine extension instance view. */ -export interface NodeVMExtension { - /** The provisioning state of the virtual machine extension. */ - provisioningState?: string; - /** The virtual machine extension. */ - vmExtension?: VMExtension; - /** The vm extension instance view. */ - instanceView?: VMExtensionInstanceView; +/** Options for changing the size of an Azure Batch Pool. */ +export interface BatchPoolResizeOptions { + /** The desired number of dedicated Compute Nodes in the Pool. */ + targetDedicatedNodes?: number; + /** The desired number of Spot/Low-priority Compute Nodes in the Pool. */ + targetLowPriorityNodes?: number; + /** The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + resizeTimeout?: string; + /** Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. */ + nodeDeallocationOption?: BatchNodeDeallocationOption; } -/** The vm extension instance view. */ -export interface VMExtensionInstanceView { - /** The name of the vm extension instance view. */ - name?: string; - /** The resource status information. */ - statuses?: InstanceViewStatus[]; - /** The resource status information. */ - subStatuses?: InstanceViewStatus[]; -} +/** BatchNodeDeallocationOption enums */ +/** "requeue", "terminate", "taskcompletion", "retaineddata" */ +export type BatchNodeDeallocationOption = string; -/** The instance view status. */ -export interface InstanceViewStatus { - /** The status code. */ - code?: string; - /** The localized label for the status. */ - displayStatus?: string; - /** Level code. */ - level?: StatusLevelTypes; - /** The detailed status message. */ - message?: string; - /** The time of the status. */ - time?: string; +/** Options for replacing properties on an Azure Batch Pool. */ +export interface BatchPoolReplaceOptions { + /** A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. */ + startTask?: StartTask; + /** + * This list replaces any existing Certificate references configured on the Pool. + * If you specify an empty collection, any existing Certificate references are removed from the Pool. + * For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + * For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + * For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + * Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + */ + certificateReferences: CertificateReference[]; + /** The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. */ + applicationPackageReferences: ApplicationPackageReference[]; + /** A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. */ + metadata: MetadataItem[]; + /** The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. */ + targetNodeCommunicationMode?: NodeCommunicationMode; } -/** Level code. */ -/** "Error", "Info", "Warning" */ -export type StatusLevelTypes = string; +/** Options for removing nodes from an Azure Batch Pool. */ +export interface NodeRemoveOptions { + /** A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. */ + nodeList: string[]; + /** The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). */ + resizeTimeout?: string; + /** Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. */ + nodeDeallocationOption?: BatchNodeDeallocationOption; +} -/** The result of listing the Compute Node extensions in a Node. */ -export interface NodeVMExtensionList { - /** The list of Compute Node extensions. */ - value?: NodeVMExtension[]; +/** The result of listing the applications available in an Account. */ +export interface ApplicationListResult { + /** The list of applications available in the Account. */ + value?: BatchApplication[]; /** The URL to get the next set of results. */ "odata.nextLink"?: string; } + +/** Contains information about an application in an Azure Batch Account. */ +export interface BatchApplication { + /** A string that uniquely identifies the application within the Account. */ + id: string; + /** The display name for the application. */ + displayName: string; + /** The list of available versions of the application. */ + versions: string[]; +} From 72a146f58f23f8a92ee10798f995c22c9e464d3c Mon Sep 17 00:00:00 2001 From: Mary Gao Date: Tue, 30 Jan 2024 16:34:44 +0800 Subject: [PATCH 5/6] Update the changes --- .../typespec-ts/src/ContentSafetyClient.ts | 8 +- .../typespec-ts/src/api/operations.ts | 8 +- .../generated/typespec-ts/src/index.ts | 24 +-- .../generated/typespec-ts/src/models/index.ts | 24 +-- .../typespec-ts/src/models/models.ts | 146 +++++++++--------- 5 files changed, 105 insertions(+), 105 deletions(-) diff --git a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/ContentSafetyClient.ts b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/ContentSafetyClient.ts index f77641f419..77489434e1 100644 --- a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/ContentSafetyClient.ts +++ b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/ContentSafetyClient.ts @@ -4,15 +4,15 @@ import { TokenCredential, KeyCredential } from "@azure/core-auth"; import { Pipeline } from "@azure/core-rest-pipeline"; import { - AnalyzeTextOptions, - AnalyzeTextResult, - AnalyzeImageOptions, - AnalyzeImageResult, TextBlocklist, AddOrUpdateBlockItemsOptions, AddOrUpdateBlockItemsResult, TextBlockItem, RemoveBlockItemsOptions, + AnalyzeImageOptions, + AnalyzeImageResult, + AnalyzeTextOptions, + AnalyzeTextResult, } from "./models/models.js"; import { AnalyzeTextRequestOptions, diff --git a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/api/operations.ts b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/api/operations.ts index b65d907fef..c6c0deaa5e 100644 --- a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/api/operations.ts +++ b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/api/operations.ts @@ -2,15 +2,15 @@ // Licensed under the MIT license. import { - AnalyzeTextOptions, - AnalyzeTextResult, - AnalyzeImageOptions, - AnalyzeImageResult, TextBlocklist, AddOrUpdateBlockItemsOptions, AddOrUpdateBlockItemsResult, TextBlockItem, RemoveBlockItemsOptions, + AnalyzeImageOptions, + AnalyzeImageResult, + AnalyzeTextOptions, + AnalyzeTextResult, PagedTextBlocklist, PagedTextBlockItem, } from "../models/models.js"; diff --git a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/index.ts b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/index.ts index 83fb4b3902..9786cc8e6a 100644 --- a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/index.ts +++ b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/index.ts @@ -6,24 +6,24 @@ export { ContentSafetyClientOptions, } from "./ContentSafetyClient.js"; export { - AnalyzeTextOptions, - TextCategory, - AnalyzeTextOutputType, - AnalyzeTextResult, - TextBlocklistMatchResult, - TextAnalyzeSeverityResult, - AnalyzeImageOptions, - ImageData, - ImageCategory, - AnalyzeImageOutputType, - AnalyzeImageResult, - ImageAnalyzeSeverityResult, TextBlocklist, AddOrUpdateBlockItemsOptions, TextBlockItemInfo, AddOrUpdateBlockItemsResult, TextBlockItem, RemoveBlockItemsOptions, + AnalyzeImageOptions, + ImageData, + ImageCategory, + AnalyzeImageOutputType, + AnalyzeImageResult, + ImageAnalyzeSeverityResult, + AnalyzeTextOptions, + TextCategory, + AnalyzeTextOutputType, + AnalyzeTextResult, + TextBlocklistMatchResult, + TextAnalyzeSeverityResult, PagedTextBlocklist, PagedTextBlockItem, AnalyzeTextRequestOptions, diff --git a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/models/index.ts b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/models/index.ts index 7e794eecae..18b2d927e9 100644 --- a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/models/index.ts +++ b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/models/index.ts @@ -2,24 +2,24 @@ // Licensed under the MIT license. export { - AnalyzeTextOptions, - TextCategory, - AnalyzeTextOutputType, - AnalyzeTextResult, - TextBlocklistMatchResult, - TextAnalyzeSeverityResult, - AnalyzeImageOptions, - ImageData, - ImageCategory, - AnalyzeImageOutputType, - AnalyzeImageResult, - ImageAnalyzeSeverityResult, TextBlocklist, AddOrUpdateBlockItemsOptions, TextBlockItemInfo, AddOrUpdateBlockItemsResult, TextBlockItem, RemoveBlockItemsOptions, + AnalyzeImageOptions, + ImageData, + ImageCategory, + AnalyzeImageOutputType, + AnalyzeImageResult, + ImageAnalyzeSeverityResult, + AnalyzeTextOptions, + TextCategory, + AnalyzeTextOutputType, + AnalyzeTextResult, + TextBlocklistMatchResult, + TextAnalyzeSeverityResult, PagedTextBlocklist, PagedTextBlockItem, } from "./models.js"; diff --git a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/models/models.ts b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/models/models.ts index 83befbaf53..9cebdc9b56 100644 --- a/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/models/models.ts +++ b/packages/typespec-test/test/contentsafety_modular/generated/typespec-ts/src/models/models.ts @@ -1,51 +1,48 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -/** The analysis request of the text. */ -export interface AnalyzeTextOptions { - /** The text needs to be scanned. We support at most 10k Unicode characters (unicode code points) in text of one request. */ - text: string; - /** The categories will be analyzed. If not assigned, a default set of the categories' analysis results will be returned. */ - categories?: TextCategory[]; - /** The names of blocklists. */ - blocklistNames?: string[]; - /** When set to true, further analyses of harmful content will not be performed in cases where blocklists are hit. When set to false, all analyses of harmful content will be performed, whether or not blocklists are hit. */ - breakByBlocklists?: boolean; - /** The type of text analysis output. If not assigned, the default value is "FourLevels". */ - outputType?: AnalyzeTextOutputType; +/** Text Blocklist. */ +export interface TextBlocklist { + /** Text blocklist name. */ + readonly blocklistName: string; + /** Text blocklist description. */ + description?: string; } -/** Text analyze category */ -/** "Hate", "SelfHarm", "Sexual", "Violence" */ -export type TextCategory = string; -/** The type of text analysis output. */ -/** "FourLevels", "EightLevels" */ -export type AnalyzeTextOutputType = string; +/** The request of adding blockItems to text blocklist. */ +export interface AddOrUpdateBlockItemsOptions { + /** Array of blockItemInfo to add. */ + blockItems: TextBlockItemInfo[]; +} -/** The analysis response of the text */ -export interface AnalyzeTextResult { - /** The details of blocklist match. */ - blocklistsMatchResults?: TextBlocklistMatchResult[]; - /** Analysis result for categories. */ - analyzeResults: TextAnalyzeSeverityResult[]; +/** Block item info in text blocklist. */ +export interface TextBlockItemInfo { + /** Block item description. */ + description?: string; + /** Block item content. */ + text: string; } -/** The result of blocklist match. */ -export interface TextBlocklistMatchResult { - /** The name of matched blocklist. */ - blocklistName: string; - /** The id of matched item. */ - blockItemId: string; - /** The content of matched item. */ - blockItemText: string; +/** The response of adding blockItems to text blocklist. */ +export interface AddOrUpdateBlockItemsResult { + /** Array of blockItems added. */ + value?: TextBlockItem[]; } -/** Text analysis result. */ -export interface TextAnalyzeSeverityResult { - /** The text category. */ - category: TextCategory; - /** This field is decided by outputType in request, if choose "FourLevels", the value could be 0,2,4,6. The higher the severity of input content, the larger this value is. */ - severity?: number; +/** Item in TextBlocklist. */ +export interface TextBlockItem { + /** Block Item Id. It will be uuid. */ + readonly blockItemId: string; + /** Block item description. */ + description?: string; + /** Block item content. */ + text: string; +} + +/** The request of removing blockItems from text blocklist. */ +export interface RemoveBlockItemsOptions { + /** Array of blockItemIds to remove. */ + blockItemIds: string[]; } /** The analysis request of the image. */ @@ -87,48 +84,51 @@ export interface ImageAnalyzeSeverityResult { severity?: number; } -/** Text Blocklist. */ -export interface TextBlocklist { - /** Text blocklist name. */ - readonly blocklistName: string; - /** Text blocklist description. */ - description?: string; -} - -/** The request of adding blockItems to text blocklist. */ -export interface AddOrUpdateBlockItemsOptions { - /** Array of blockItemInfo to add. */ - blockItems: TextBlockItemInfo[]; -} - -/** Block item info in text blocklist. */ -export interface TextBlockItemInfo { - /** Block item description. */ - description?: string; - /** Block item content. */ +/** The analysis request of the text. */ +export interface AnalyzeTextOptions { + /** The text needs to be scanned. We support at most 10k Unicode characters (unicode code points) in text of one request. */ text: string; + /** The categories will be analyzed. If not assigned, a default set of the categories' analysis results will be returned. */ + categories?: TextCategory[]; + /** The names of blocklists. */ + blocklistNames?: string[]; + /** When set to true, further analyses of harmful content will not be performed in cases where blocklists are hit. When set to false, all analyses of harmful content will be performed, whether or not blocklists are hit. */ + breakByBlocklists?: boolean; + /** The type of text analysis output. If not assigned, the default value is "FourLevels". */ + outputType?: AnalyzeTextOutputType; } -/** The response of adding blockItems to text blocklist. */ -export interface AddOrUpdateBlockItemsResult { - /** Array of blockItems added. */ - value?: TextBlockItem[]; +/** Text analyze category */ +/** "Hate", "SelfHarm", "Sexual", "Violence" */ +export type TextCategory = string; +/** The type of text analysis output. */ +/** "FourLevels", "EightLevels" */ +export type AnalyzeTextOutputType = string; + +/** The analysis response of the text */ +export interface AnalyzeTextResult { + /** The details of blocklist match. */ + blocklistsMatchResults?: TextBlocklistMatchResult[]; + /** Analysis result for categories. */ + analyzeResults: TextAnalyzeSeverityResult[]; } -/** Item in TextBlocklist. */ -export interface TextBlockItem { - /** Block Item Id. It will be uuid. */ - readonly blockItemId: string; - /** Block item description. */ - description?: string; - /** Block item content. */ - text: string; +/** The result of blocklist match. */ +export interface TextBlocklistMatchResult { + /** The name of matched blocklist. */ + blocklistName: string; + /** The id of matched item. */ + blockItemId: string; + /** The content of matched item. */ + blockItemText: string; } -/** The request of removing blockItems from text blocklist. */ -export interface RemoveBlockItemsOptions { - /** Array of blockItemIds to remove. */ - blockItemIds: string[]; +/** Text analysis result. */ +export interface TextAnalyzeSeverityResult { + /** The text category. */ + category: TextCategory; + /** This field is decided by outputType in request, if choose "FourLevels", the value could be 0,2,4,6. The higher the severity of input content, the larger this value is. */ + severity?: number; } /** Paged collection of TextBlocklist items */ From 84cffce9ae185b31a9d7acc0efb120f0bb203ea4 Mon Sep 17 00:00:00 2001 From: Mary Gao Date: Tue, 30 Jan 2024 17:34:39 +0800 Subject: [PATCH 6/6] update the changes --- .../src/api/audio/transcriptions/index.ts | 2 +- .../src/api/audio/translations/index.ts | 2 +- .../src/api/chat/completions/index.ts | 2 +- .../typespec-ts/src/api/files/index.ts | 2 +- .../src/api/fineTuning/jobs/index.ts | 2 +- .../src/classic/audio/transcriptions/index.ts | 2 +- .../src/classic/audio/translations/index.ts | 2 +- .../src/classic/chat/completions/index.ts | 2 +- .../typespec-ts/src/classic/files/index.ts | 2 +- .../src/classic/fineTuning/jobs/index.ts | 2 +- .../generated/typespec-ts/src/index.ts | 66 +- .../generated/typespec-ts/src/models/index.ts | 66 +- .../typespec-ts/src/models/models.ts | 1062 ++++++++--------- .../src/api/audio/transcriptions/index.ts | 2 +- .../src/api/audio/translations/index.ts | 2 +- .../src/api/chat/completions/index.ts | 2 +- .../typespec-ts/src/api/files/index.ts | 2 +- .../src/api/fineTuning/jobs/index.ts | 2 +- .../src/classic/audio/transcriptions/index.ts | 2 +- .../src/classic/audio/translations/index.ts | 2 +- .../src/classic/chat/completions/index.ts | 2 +- .../typespec-ts/src/classic/files/index.ts | 2 +- .../src/classic/fineTuning/jobs/index.ts | 2 +- .../generated/typespec-ts/src/index.ts | 66 +- .../generated/typespec-ts/src/models/index.ts | 66 +- .../typespec-ts/src/models/models.ts | 1062 ++++++++--------- .../propertyTypes/generated/src/index.ts | 58 +- .../generated/src/models/index.ts | 58 +- .../generated/src/models/models.ts | 182 +-- .../generated/unions/src/index.ts | 8 +- .../generated/unions/src/models/index.ts | 8 +- .../generated/unions/src/models/models.ts | 58 +- 32 files changed, 1400 insertions(+), 1400 deletions(-) diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/audio/transcriptions/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/audio/transcriptions/index.ts index 4c80cdf07d..fb3d7e7398 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/audio/transcriptions/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/audio/transcriptions/index.ts @@ -2,8 +2,8 @@ // Licensed under the MIT license. import { - CreateTranscriptionResponse, CreateTranscriptionRequest, + CreateTranscriptionResponse, } from "../../../models/models.js"; import { AudioTranscriptionsCreate200Response, diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/audio/translations/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/audio/translations/index.ts index 9a3f5f4fae..2ad10522b6 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/audio/translations/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/audio/translations/index.ts @@ -2,8 +2,8 @@ // Licensed under the MIT license. import { - CreateTranslationResponse, CreateTranslationRequest, + CreateTranslationResponse, } from "../../../models/models.js"; import { AudioTranslationsCreate200Response, diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/chat/completions/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/chat/completions/index.ts index aa9c91cd03..17b40dba9c 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/chat/completions/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/chat/completions/index.ts @@ -2,8 +2,8 @@ // Licensed under the MIT license. import { - CreateChatCompletionResponse, CreateChatCompletionRequest, + CreateChatCompletionResponse, } from "../../../models/models.js"; import { ChatCompletionsCreate200Response, diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/files/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/files/index.ts index 2ac9dea864..63a775801c 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/files/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/files/index.ts @@ -2,8 +2,8 @@ // Licensed under the MIT license. import { - ListFilesResponse, OpenAIFile, + ListFilesResponse, CreateFileRequest, DeleteFileResponse, } from "../../models/models.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/fineTuning/jobs/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/fineTuning/jobs/index.ts index 106500ce0e..7ca317f852 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/fineTuning/jobs/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/api/fineTuning/jobs/index.ts @@ -2,8 +2,8 @@ // Licensed under the MIT license. import { - FineTuningJob, CreateFineTuningJobRequest, + FineTuningJob, ListPaginatedFineTuningJobsResponse, ListFineTuningJobEventsResponse, } from "../../../models/models.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/audio/transcriptions/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/audio/transcriptions/index.ts index 74e3c3df0b..ce2ffeb654 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/audio/transcriptions/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/audio/transcriptions/index.ts @@ -3,8 +3,8 @@ import { OpenAIContext } from "../../../api/OpenAIContext.js"; import { - CreateTranscriptionResponse, CreateTranscriptionRequest, + CreateTranscriptionResponse, } from "../../../models/models.js"; import { create } from "../../../api/audio/transcriptions/index.js"; import { AudioTranscriptionsCreateOptions } from "../../../models/options.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/audio/translations/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/audio/translations/index.ts index 4e8c4688f8..bc54ab855a 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/audio/translations/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/audio/translations/index.ts @@ -3,8 +3,8 @@ import { OpenAIContext } from "../../../api/OpenAIContext.js"; import { - CreateTranslationResponse, CreateTranslationRequest, + CreateTranslationResponse, } from "../../../models/models.js"; import { create } from "../../../api/audio/translations/index.js"; import { AudioTranslationsCreateOptions } from "../../../models/options.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/chat/completions/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/chat/completions/index.ts index 0b5da3b12b..1fe629deec 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/chat/completions/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/chat/completions/index.ts @@ -3,8 +3,8 @@ import { OpenAIContext } from "../../../api/OpenAIContext.js"; import { - CreateChatCompletionResponse, CreateChatCompletionRequest, + CreateChatCompletionResponse, } from "../../../models/models.js"; import { create } from "../../../api/chat/completions/index.js"; import { ChatCompletionsCreateOptions } from "../../../models/options.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/files/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/files/index.ts index 6f9426f885..18de510bd8 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/files/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/files/index.ts @@ -3,8 +3,8 @@ import { OpenAIContext } from "../../api/OpenAIContext.js"; import { - ListFilesResponse, OpenAIFile, + ListFilesResponse, CreateFileRequest, DeleteFileResponse, } from "../../models/models.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/fineTuning/jobs/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/fineTuning/jobs/index.ts index e07381ed72..6966788fc6 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/fineTuning/jobs/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/classic/fineTuning/jobs/index.ts @@ -3,8 +3,8 @@ import { OpenAIContext } from "../../../api/OpenAIContext.js"; import { - FineTuningJob, CreateFineTuningJobRequest, + FineTuningJob, ListPaginatedFineTuningJobsResponse, ListFineTuningJobEventsResponse, } from "../../../models/models.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/index.ts index ef32c5012e..ada475f0e6 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/index.ts @@ -3,49 +3,49 @@ export { OpenAIClient, OpenAIClientOptions } from "./OpenAIClient.js"; export { - CreateCompletionRequest, - CreateCompletionResponse, - CompletionUsage, + CreateModerationRequest, + CreateModerationResponse, Error, - CreateEditRequest, - CreateEditResponse, - CreateEmbeddingRequest, - CreateEmbeddingResponse, - Embedding, - ListFilesResponse, - OpenAIFile, - CreateFileRequest, - DeleteFileResponse, - CreateFineTuneRequest, - FineTune, - FineTuneEvent, - ListFineTunesResponse, - ListFineTuneEventsResponse, - ListModelsResponse, - Model, - DeleteModelResponse, CreateImageRequest, ImagesResponse, Image, CreateImageEditRequest, CreateImageVariationRequest, - CreateModerationRequest, - CreateModerationResponse, - CreateTranscriptionResponse, - CreateTranscriptionRequest, - CreateTranslationResponse, - CreateTranslationRequest, - CreateChatCompletionResponse, - ChatCompletionResponseMessage, - CreateChatCompletionRequest, - ChatCompletionRequestMessage, - ChatCompletionFunctions, - ChatCompletionFunctionCallOption, - FineTuningJob, + ListModelsResponse, + Model, + DeleteModelResponse, + CreateFineTuneRequest, + FineTune, + OpenAIFile, + FineTuneEvent, + ListFineTunesResponse, + ListFineTuneEventsResponse, + ListFilesResponse, + CreateFileRequest, + DeleteFileResponse, + CreateEmbeddingRequest, + CreateEmbeddingResponse, + Embedding, + CreateEditRequest, + CreateEditResponse, + CompletionUsage, + CreateCompletionRequest, + CreateCompletionResponse, CreateFineTuningJobRequest, + FineTuningJob, ListPaginatedFineTuningJobsResponse, ListFineTuningJobEventsResponse, FineTuningJobEvent, + CreateChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionFunctions, + ChatCompletionFunctionCallOption, + CreateChatCompletionResponse, + ChatCompletionResponseMessage, + CreateTranslationRequest, + CreateTranslationResponse, + CreateTranscriptionRequest, + CreateTranscriptionResponse, Prompt, Stop, AudioTranscriptionsCreateOptions, diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/index.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/index.ts index bbea915aa4..7f0a2a2385 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/index.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/index.ts @@ -2,49 +2,49 @@ // Licensed under the MIT license. export { - CreateCompletionRequest, - CreateCompletionResponse, - CompletionUsage, + CreateModerationRequest, + CreateModerationResponse, Error, - CreateEditRequest, - CreateEditResponse, - CreateEmbeddingRequest, - CreateEmbeddingResponse, - Embedding, - ListFilesResponse, - OpenAIFile, - CreateFileRequest, - DeleteFileResponse, - CreateFineTuneRequest, - FineTune, - FineTuneEvent, - ListFineTunesResponse, - ListFineTuneEventsResponse, - ListModelsResponse, - Model, - DeleteModelResponse, CreateImageRequest, ImagesResponse, Image, CreateImageEditRequest, CreateImageVariationRequest, - CreateModerationRequest, - CreateModerationResponse, - CreateTranscriptionResponse, - CreateTranscriptionRequest, - CreateTranslationResponse, - CreateTranslationRequest, - CreateChatCompletionResponse, - ChatCompletionResponseMessage, - CreateChatCompletionRequest, - ChatCompletionRequestMessage, - ChatCompletionFunctions, - ChatCompletionFunctionCallOption, - FineTuningJob, + ListModelsResponse, + Model, + DeleteModelResponse, + CreateFineTuneRequest, + FineTune, + OpenAIFile, + FineTuneEvent, + ListFineTunesResponse, + ListFineTuneEventsResponse, + ListFilesResponse, + CreateFileRequest, + DeleteFileResponse, + CreateEmbeddingRequest, + CreateEmbeddingResponse, + Embedding, + CreateEditRequest, + CreateEditResponse, + CompletionUsage, + CreateCompletionRequest, + CreateCompletionResponse, CreateFineTuningJobRequest, + FineTuningJob, ListPaginatedFineTuningJobsResponse, ListFineTuningJobEventsResponse, FineTuningJobEvent, + CreateChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionFunctions, + ChatCompletionFunctionCallOption, + CreateChatCompletionResponse, + ChatCompletionResponseMessage, + CreateTranslationRequest, + CreateTranslationResponse, + CreateTranscriptionRequest, + CreateTranscriptionResponse, Prompt, Stop, } from "./models.js"; diff --git a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/models.ts b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/models.ts index 6fc35e9b46..402857cf87 100644 --- a/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/models.ts +++ b/packages/typespec-test/test/openai_generic/generated/typespec-ts/src/models/models.ts @@ -1,160 +1,54 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -export interface CreateCompletionRequest { - /** - * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - * see all of your available models, or see our [Model overview](/docs/models/overview) for - * descriptions of them. - */ - model: - | string - | "babbage-002" - | "davinci-002" - | "text-davinci-003" - | "text-davinci-002" - | "text-davinci-001" - | "code-davinci-002" - | "text-curie-001" - | "text-babbage-001" - | "text-ada-001"; - /** - * The prompt(s) to generate completions for, encoded as a string, array of strings, array of - * tokens, or array of token arrays. - * - * Note that <|endoftext|> is the document separator that the model sees during training, so if a - * prompt is not specified the model will generate as if from the beginning of a new document. - */ - prompt: Prompt; - /** The suffix that comes after a completion of inserted text. */ - suffix?: string | null; - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - * more random, while lower values like 0.2 will make it more focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. - */ - temperature?: number | null; - /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers - * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - * the top 10% probability mass are considered. - * - * We generally recommend altering this or `temperature` but not both. - */ - topP?: number | null; - /** - * How many completions to generate for each prompt. - * **Note:** Because this parameter generates many completions, it can quickly consume your token - * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - */ - n?: number | null; - /** - * The maximum number of [tokens](/tokenizer) to generate in the completion. - * - * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - * for counting tokens. - */ - maxTokens?: number | null; - /** Up to 4 sequences where the API will stop generating further tokens. */ - stop?: Stop; - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - * in the text so far, increasing the model's likelihood to talk about new topics. - * - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - */ - presencePenalty?: number | null; - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - * frequency in the text so far, decreasing the model's likelihood to repeat the same line - * verbatim. - * - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - */ - frequencyPenalty?: number | null; - /** - * Modify the likelihood of specified tokens appearing in the completion. - * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an - * associated bias value from -100 to 100. Mathematically, the bias is added to the logits - * generated by the model prior to sampling. The exact effect will vary per model, but values - * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - * should result in a ban or exclusive selection of the relevant token. - */ - logitBias?: Record; - /** - * A unique identifier representing your end-user, which can help OpenAI to monitor and detect - * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - */ - user?: string; - /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` message. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - */ - stream?: boolean | null; - /** - * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. - * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The - * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` - * elements in the response. - * - * The maximum value for `logprobs` is 5. - */ - logprobs?: number | null; - /** Echo back the prompt in addition to the completion */ - echo?: boolean | null; +export interface CreateModerationRequest { + /** The input text to classify */ + input: string | string[]; /** - * Generates `best_of` completions server-side and returns the "best" (the one with the highest - * log probability per token). Results cannot be streamed. - * - * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies - * how many to return – `best_of` must be greater than `n`. - * - * **Note:** Because this parameter generates many completions, it can quickly consume your token - * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + * Two content moderations models are available: `text-moderation-stable` and + * `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + * upgraded over time. This ensures you are always using our most accurate model. If you use + * `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. */ - bestOf?: number | null; + model?: string | "text-moderation-latest" | "text-moderation-stable"; } -/** - * Represents a completion response from the API. Note: both the streamed and non-streamed response - * objects share the same shape (unlike the chat endpoint). - */ -export interface CreateCompletionResponse { - /** A unique identifier for the completion. */ +export interface CreateModerationResponse { + /** The unique identifier for the moderation request. */ id: string; - /** The object type, which is always `text_completion`. */ - object: string; - /** The Unix timestamp (in seconds) of when the completion was created. */ - created: Date; - /** The model used for the completion. */ + /** The model used to generate the moderation results. */ model: string; - /** The list of completion choices the model generated for the input. */ - choices: { - index: number; - text: string; - logprobs: { - tokens: string[]; - tokenLogprobs: number[]; - topLogprobs: Record[]; - textOffset: number[]; - } | null; - finishReason: "stop" | "length" | "content_filter"; + /** A list of moderation objects. */ + results: { + flagged: boolean; + categories: { + hate: boolean; + "hate/threatening": boolean; + harassment: boolean; + "harassment/threatening": boolean; + selfHarm: boolean; + "selfHarm/intent": boolean; + "selfHarm/instructive": boolean; + sexual: boolean; + "sexual/minors": boolean; + violence: boolean; + "violence/graphic": boolean; + }; + categoryScores: { + hate: number; + "hate/threatening": number; + harassment: number; + "harassment/threatening": number; + selfHarm: number; + "selfHarm/intent": number; + "selfHarm/instructive": number; + sexual: number; + "sexual/minors": number; + violence: number; + "violence/graphic": number; + }; }[]; - usage?: CompletionUsage; -} - -/** Usage statistics for the completion request. */ -export interface CompletionUsage { - /** Number of tokens in the prompt. */ - promptTokens: number; - /** Number of tokens in the generated completion */ - completionTokens: number; - /** Total number of tokens used in the request (prompt + completion). */ - totalTokens: number; } export interface Error { @@ -164,136 +58,87 @@ export interface Error { code: string | null; } -export interface CreateEditRequest { - /** - * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` - * model with this endpoint. - */ - model: string | "text-davinci-edit-001" | "code-davinci-edit-001"; - /** The input text to use as a starting point for the edit. */ - input?: string | null; - /** The instruction that tells the model how to edit the prompt. */ - instruction: string; - /** How many edits to generate for the input and instruction. */ +export interface CreateImageRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + /** The number of images to generate. Must be between 1 and 10. */ n?: number | null; - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - * more random, while lower values like 0.2 will make it more focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. - */ - temperature?: number | null; - /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers - * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - * the top 10% probability mass are considered. - * - * We generally recommend altering this or `temperature` but not both. - */ - topP?: number | null; -} - -export interface CreateEditResponse { - /** The object type, which is always `edit`. */ - object: "edit"; - /** The Unix timestamp (in seconds) of when the edit was created. */ - created: Date; - /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ - choices: { text: string; index: number; finishReason: "stop" | "length" }[]; - usage: CompletionUsage; -} - -export interface CreateEmbeddingRequest { - /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ - model: string | "text-embedding-ada-002"; - /** - * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a - * single request, pass an array of strings or array of token arrays. Each input must not exceed - * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - * for counting tokens. - */ - input: string | string[] | number[] | number[][]; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024"; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json"; user?: string; } -export interface CreateEmbeddingResponse { - /** The object type, which is always "embedding". */ - object: "embedding"; - /** The name of the model used to generate the embedding. */ - model: string; - /** The list of embeddings generated by the model. */ - data: Embedding[]; - /** The usage information for the request. */ - usage: { promptTokens: number; totalTokens: number }; -} - -/** Represents an embedding vector returned by embedding endpoint. */ -export interface Embedding { - /** The index of the embedding in the list of embeddings. */ - index: number; - /** The object type, which is always "embedding". */ - object: "embedding"; - /** - * The embedding vector, which is a list of floats. The length of vector depends on the model as\ - * listed in the [embedding guide](/docs/guides/embeddings). - */ - embedding: number[]; +export interface ImagesResponse { + created: Date; + data: Image[]; } -export interface ListFilesResponse { - object: string; - data: OpenAIFile[]; +/** Represents the url or the content of an image generated by the OpenAI API. */ +export interface Image { + /** The URL of the generated image, if `response_format` is `url` (default). */ + url?: string; + /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ + b64Json?: Uint8Array; } - -/** The `File` object represents a document that has been uploaded to OpenAI. */ -export interface OpenAIFile { - /** The file identifier, which can be referenced in the API endpoints. */ - id: string; - /** The object type, which is always "file". */ - object: "file"; - /** The size of the file in bytes. */ - bytes: number; - /** The Unix timestamp (in seconds) for when the file was created. */ - createdAt: Date; - /** The name of the file. */ - filename: string; - /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ - purpose: string; + +export interface CreateImageEditRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; /** - * The current status of the file, which can be either `uploaded`, `processed`, `pending`, - * `error`, `deleting` or `deleted`. + * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + * provided, image must have transparency, which will be used as the mask. */ - status: - | "uploaded" - | "processed" - | "pending" - | "error" - | "deleting" - | "deleted"; + image: Uint8Array; /** - * Additional details about the status of the file. If the file is in the `error` state, this will - * include a message describing the error. + * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + * as `image`. */ - statusDetails?: string | null; + mask?: Uint8Array; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024"; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json"; + user?: string; } -export interface CreateFileRequest { - /** - * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. - * - * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. - */ - file: Uint8Array; +export interface CreateImageVariationRequest { /** - * The intended purpose of the uploaded documents. Use "fine-tune" for - * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the - * uploaded file. + * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + * and square. */ - purpose: string; + image: Uint8Array; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024"; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json"; + user?: string; } -export interface DeleteFileResponse { +export interface ListModelsResponse { + object: string; + data: Model[]; +} + +/** Describes an OpenAI model offering that can be used with the API. */ +export interface Model { + /** The model identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "model". */ + object: "model"; + /** The Unix timestamp (in seconds) when the model was created. */ + created: Date; + /** The organization that owns the model. */ + ownedBy: string; +} + +export interface DeleteModelResponse { id: string; object: string; deleted: boolean; @@ -453,6 +298,38 @@ export interface FineTune { events?: FineTuneEvent[]; } +/** The `File` object represents a document that has been uploaded to OpenAI. */ +export interface OpenAIFile { + /** The file identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "file". */ + object: "file"; + /** The size of the file in bytes. */ + bytes: number; + /** The Unix timestamp (in seconds) for when the file was created. */ + createdAt: Date; + /** The name of the file. */ + filename: string; + /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ + purpose: string; + /** + * The current status of the file, which can be either `uploaded`, `processed`, `pending`, + * `error`, `deleting` or `deleted`. + */ + status: + | "uploaded" + | "processed" + | "pending" + | "error" + | "deleting" + | "deleted"; + /** + * Additional details about the status of the file. If the file is in the `error` state, this will + * include a message describing the error. + */ + statusDetails?: string | null; +} + export interface FineTuneEvent { object: string; createdAt: Date; @@ -470,236 +347,387 @@ export interface ListFineTuneEventsResponse { data: FineTuneEvent[]; } -export interface ListModelsResponse { +export interface ListFilesResponse { object: string; - data: Model[]; + data: OpenAIFile[]; } -/** Describes an OpenAI model offering that can be used with the API. */ -export interface Model { - /** The model identifier, which can be referenced in the API endpoints. */ - id: string; - /** The object type, which is always "model". */ - object: "model"; - /** The Unix timestamp (in seconds) when the model was created. */ - created: Date; - /** The organization that owns the model. */ - ownedBy: string; +export interface CreateFileRequest { + /** + * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + * + * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + */ + file: Uint8Array; + /** + * The intended purpose of the uploaded documents. Use "fine-tune" for + * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the + * uploaded file. + */ + purpose: string; } -export interface DeleteModelResponse { +export interface DeleteFileResponse { id: string; object: string; deleted: boolean; } -export interface CreateImageRequest { - /** A text description of the desired image(s). The maximum length is 1000 characters. */ - prompt: string; - /** The number of images to generate. Must be between 1 and 10. */ - n?: number | null; - /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ - size?: "256x256" | "512x512" | "1024x1024"; - /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ - responseFormat?: "url" | "b64_json"; +export interface CreateEmbeddingRequest { + /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ + model: string | "text-embedding-ada-002"; + /** + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + * single request, pass an array of strings or array of token arrays. Each input must not exceed + * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + input: string | string[] | number[] | number[][]; user?: string; } -export interface ImagesResponse { +export interface CreateEmbeddingResponse { + /** The object type, which is always "embedding". */ + object: "embedding"; + /** The name of the model used to generate the embedding. */ + model: string; + /** The list of embeddings generated by the model. */ + data: Embedding[]; + /** The usage information for the request. */ + usage: { promptTokens: number; totalTokens: number }; +} + +/** Represents an embedding vector returned by embedding endpoint. */ +export interface Embedding { + /** The index of the embedding in the list of embeddings. */ + index: number; + /** The object type, which is always "embedding". */ + object: "embedding"; + /** + * The embedding vector, which is a list of floats. The length of vector depends on the model as\ + * listed in the [embedding guide](/docs/guides/embeddings). + */ + embedding: number[]; +} + +export interface CreateEditRequest { + /** + * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` + * model with this endpoint. + */ + model: string | "text-davinci-edit-001" | "code-davinci-edit-001"; + /** The input text to use as a starting point for the edit. */ + input?: string | null; + /** The instruction that tells the model how to edit the prompt. */ + instruction: string; + /** How many edits to generate for the input and instruction. */ + n?: number | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + topP?: number | null; +} + +export interface CreateEditResponse { + /** The object type, which is always `edit`. */ + object: "edit"; + /** The Unix timestamp (in seconds) of when the edit was created. */ created: Date; - data: Image[]; + /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ + choices: { text: string; index: number; finishReason: "stop" | "length" }[]; + usage: CompletionUsage; } -/** Represents the url or the content of an image generated by the OpenAI API. */ -export interface Image { - /** The URL of the generated image, if `response_format` is `url` (default). */ - url?: string; - /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ - b64Json?: Uint8Array; +/** Usage statistics for the completion request. */ +export interface CompletionUsage { + /** Number of tokens in the prompt. */ + promptTokens: number; + /** Number of tokens in the generated completion */ + completionTokens: number; + /** Total number of tokens used in the request (prompt + completion). */ + totalTokens: number; } -export interface CreateImageEditRequest { - /** A text description of the desired image(s). The maximum length is 1000 characters. */ - prompt: string; +export interface CreateCompletionRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + model: + | string + | "babbage-002" + | "davinci-002" + | "text-davinci-003" + | "text-davinci-002" + | "text-davinci-001" + | "code-davinci-002" + | "text-curie-001" + | "text-babbage-001" + | "text-ada-001"; + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of + * tokens, or array of token arrays. + * + * Note that <|endoftext|> is the document separator that the model sees during training, so if a + * prompt is not specified the model will generate as if from the beginning of a new document. + */ + prompt: Prompt; + /** The suffix that comes after a completion of inserted text. */ + suffix?: string | null; /** - * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not - * provided, image must have transparency, which will be used as the mask. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. */ - image: Uint8Array; + temperature?: number | null; /** - * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where - * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions - * as `image`. + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. */ - mask?: Uint8Array; - /** The number of images to generate. Must be between 1 and 10. */ - n?: number | null; - /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ - size?: "256x256" | "512x512" | "1024x1024"; - /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ - responseFormat?: "url" | "b64_json"; - user?: string; -} - -export interface CreateImageVariationRequest { + topP?: number | null; /** - * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, - * and square. + * How many completions to generate for each prompt. + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ - image: Uint8Array; - /** The number of images to generate. Must be between 1 and 10. */ n?: number | null; - /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ - size?: "256x256" | "512x512" | "1024x1024"; - /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ - responseFormat?: "url" | "b64_json"; + /** + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + maxTokens?: number | null; + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + presencePenalty?: number | null; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + frequencyPenalty?: number | null; + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + logitBias?: Record; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ user?: string; -} - -export interface CreateModerationRequest { - /** The input text to classify */ - input: string | string[]; /** - * Two content moderations models are available: `text-moderation-stable` and - * `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically - * upgraded over time. This ensures you are always using our most accurate model. If you use - * `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy - * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). */ - model?: string | "text-moderation-latest" | "text-moderation-stable"; + stream?: boolean | null; + /** + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + * elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + logprobs?: number | null; + /** Echo back the prompt in addition to the completion */ + echo?: boolean | null; + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the highest + * log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + * how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + bestOf?: number | null; } -export interface CreateModerationResponse { - /** The unique identifier for the moderation request. */ +/** + * Represents a completion response from the API. Note: both the streamed and non-streamed response + * objects share the same shape (unlike the chat endpoint). + */ +export interface CreateCompletionResponse { + /** A unique identifier for the completion. */ id: string; - /** The model used to generate the moderation results. */ + /** The object type, which is always `text_completion`. */ + object: string; + /** The Unix timestamp (in seconds) of when the completion was created. */ + created: Date; + /** The model used for the completion. */ model: string; - /** A list of moderation objects. */ - results: { - flagged: boolean; - categories: { - hate: boolean; - "hate/threatening": boolean; - harassment: boolean; - "harassment/threatening": boolean; - selfHarm: boolean; - "selfHarm/intent": boolean; - "selfHarm/instructive": boolean; - sexual: boolean; - "sexual/minors": boolean; - violence: boolean; - "violence/graphic": boolean; - }; - categoryScores: { - hate: number; - "hate/threatening": number; - harassment: number; - "harassment/threatening": number; - selfHarm: number; - "selfHarm/intent": number; - "selfHarm/instructive": number; - sexual: number; - "sexual/minors": number; - violence: number; - "violence/graphic": number; - }; + /** The list of completion choices the model generated for the input. */ + choices: { + index: number; + text: string; + logprobs: { + tokens: string[]; + tokenLogprobs: number[]; + topLogprobs: Record[]; + textOffset: number[]; + } | null; + finishReason: "stop" | "length" | "content_filter"; }[]; + usage?: CompletionUsage; } -export interface CreateTranscriptionResponse { - text: string; +export interface CreateFineTuningJobRequest { + /** + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + * the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + trainingFile: string; + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + * not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + * `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + validationFile?: string | null; + /** + * The name of the model to fine-tune. You can select one of the + * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + */ + model: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; + /** The hyperparameters used for the fine-tuning job. */ + hyperparameters?: { nEpochs?: "auto" | number }; + /** + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + */ + suffix?: string | null; } -export interface CreateTranscriptionRequest { +export interface FineTuningJob { + /** The object identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "fine_tuning.job". */ + object: "fine_tuning.job"; + /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + createdAt: Date; /** - * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, - * mpeg, mpga, m4a, ogg, wav, or webm. + * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + * null if the fine-tuning job is still running. */ - file: Uint8Array; - /** ID of the model to use. Only `whisper-1` is currently available. */ - model: string | "whisper-1"; + finishedAt: Date | null; + /** The base model that is being fine-tuned. */ + model: string; /** - * An optional text to guide the model's style or continue a previous audio segment. The - * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * The name of the fine-tuned model that is being created. The value will be null if the + * fine-tuning job is still running. */ - prompt?: string; + fineTunedModel: string | null; + /** The organization that owns the fine-tuning job. */ + organizationId: string; /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - * vtt. + * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + * `succeeded`, `failed`, or `cancelled`. */ - responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; - /** - * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - * automatically increase the temperature until certain thresholds are hit. + status: + | "created" + | "pending" + | "running" + | "succeeded" + | "failed" + | "cancelled"; + /** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ - temperature?: number; + hyperparameters: { nEpochs?: "auto" | number }; /** - * The language of the input audio. Supplying the input language in - * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy - * and latency. + * The file ID used for training. You can retrieve the training data with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - language?: string; -} - -export interface CreateTranslationResponse { - text: string; -} - -export interface CreateTranslationRequest { + trainingFile: string; /** - * The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, - * mpeg, mpga, m4a, ogg, wav, or webm. + * The file ID used for validation. You can retrieve the validation results with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - file: Uint8Array; - /** ID of the model to use. Only `whisper-1` is currently available. */ - model: string | "whisper-1"; + validationFile: string | null; /** - * An optional text to guide the model's style or continue a previous audio segment. The - * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - prompt?: string; + resultFiles: string[]; /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - * vtt. + * The total number of billable tokens processed by this fine tuning job. The value will be null + * if the fine-tuning job is still running. */ - responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + trainedTokens: number | null; /** - * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - * automatically increase the temperature until certain thresholds are hit. + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + * failure. */ - temperature?: number; + error: { message?: string; code?: string; param?: string | null } | null; } -/** Represents a chat completion response returned by model, based on the provided input. */ -export interface CreateChatCompletionResponse { - /** A unique identifier for the chat completion. */ - id: string; - /** The object type, which is always `chat.completion`. */ +export interface ListPaginatedFineTuningJobsResponse { object: string; - /** The Unix timestamp (in seconds) of when the chat completion was created. */ - created: Date; - /** The model used for the chat completion. */ - model: string; - /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ - choices: { - index: number; - message: ChatCompletionResponseMessage; - finishReason: "stop" | "length" | "function_call" | "content_filter"; - }[]; - usage?: CompletionUsage; + data: FineTuningJob[]; + hasMore: boolean; } -export interface ChatCompletionResponseMessage { - /** The role of the author of this message. */ - role: "system" | "user" | "assistant" | "function"; - /** The contents of the message. */ - content: string | null; - /** The name and arguments of a function that should be called, as generated by the model. */ - functionCall?: { name: string; arguments: string }; +export interface ListFineTuningJobEventsResponse { + object: string; + data: FineTuningJobEvent[]; +} + +export interface FineTuningJobEvent { + id: string; + object: string; + createdAt: Date; + level: "info" | "warn" | "error"; + message: string; } export interface CreateChatCompletionRequest { @@ -848,128 +876,100 @@ export interface ChatCompletionFunctionCallOption { name: string; } -export interface FineTuningJob { - /** The object identifier, which can be referenced in the API endpoints. */ +/** Represents a chat completion response returned by model, based on the provided input. */ +export interface CreateChatCompletionResponse { + /** A unique identifier for the chat completion. */ id: string; - /** The object type, which is always "fine_tuning.job". */ - object: "fine_tuning.job"; - /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ - createdAt: Date; - /** - * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be - * null if the fine-tuning job is still running. - */ - finishedAt: Date | null; - /** The base model that is being fine-tuned. */ + /** The object type, which is always `chat.completion`. */ + object: string; + /** The Unix timestamp (in seconds) of when the chat completion was created. */ + created: Date; + /** The model used for the chat completion. */ model: string; + /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + choices: { + index: number; + message: ChatCompletionResponseMessage; + finishReason: "stop" | "length" | "function_call" | "content_filter"; + }[]; + usage?: CompletionUsage; +} + +export interface ChatCompletionResponseMessage { + /** The role of the author of this message. */ + role: "system" | "user" | "assistant" | "function"; + /** The contents of the message. */ + content: string | null; + /** The name and arguments of a function that should be called, as generated by the model. */ + functionCall?: { name: string; arguments: string }; +} + +export interface CreateTranslationRequest { /** - * The name of the fine-tuned model that is being created. The value will be null if the - * fine-tuning job is still running. - */ - fineTunedModel: string | null; - /** The organization that owns the fine-tuning job. */ - organizationId: string; - /** - * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, - * `succeeded`, `failed`, or `cancelled`. - */ - status: - | "created" - | "pending" - | "running" - | "succeeded" - | "failed" - | "cancelled"; - /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](/docs/guides/fine-tuning) for more details. - */ - hyperparameters: { nEpochs?: "auto" | number }; - /** - * The file ID used for training. You can retrieve the training data with the - * [Files API](/docs/api-reference/files/retrieve-contents). - */ - trainingFile: string; - /** - * The file ID used for validation. You can retrieve the validation results with the - * [Files API](/docs/api-reference/files/retrieve-contents). + * The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. */ - validationFile: string | null; + file: Uint8Array; + /** ID of the model to use. Only `whisper-1` is currently available. */ + model: string | "whisper-1"; /** - * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the - * [Files API](/docs/api-reference/files/retrieve-contents). + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. */ - resultFiles: string[]; + prompt?: string; /** - * The total number of billable tokens processed by this fine tuning job. The value will be null - * if the fine-tuning job is still running. + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. */ - trainedTokens: number | null; + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; /** - * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the - * failure. + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. */ - error: { message?: string; code?: string; param?: string | null } | null; + temperature?: number; } -export interface CreateFineTuningJobRequest { +export interface CreateTranslationResponse { + text: string; +} + +export interface CreateTranscriptionRequest { /** - * The ID of an uploaded file that contains training data. - * - * See [upload file](/docs/api-reference/files/upload) for how to upload a file. - * - * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with - * the purpose `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. */ - trainingFile: string; + file: Uint8Array; + /** ID of the model to use. Only `whisper-1` is currently available. */ + model: string | "whisper-1"; /** - * The ID of an uploaded file that contains validation data. - * - * If you provide this file, the data is used to generate validation metrics periodically during - * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should - * not be present in both train and validation files. - * - * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose - * `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. */ - validationFile?: string | null; + prompt?: string; /** - * The name of the model to fine-tune. You can select one of the - * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. */ - model: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; - /** The hyperparameters used for the fine-tuning job. */ - hyperparameters?: { nEpochs?: "auto" | number }; + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; /** - * A string of up to 18 characters that will be added to your fine-tuned model name. - * - * For example, a `suffix` of "custom-model-name" would produce a model name like - * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. */ - suffix?: string | null; -} - -export interface ListPaginatedFineTuningJobsResponse { - object: string; - data: FineTuningJob[]; - hasMore: boolean; -} - -export interface ListFineTuningJobEventsResponse { - object: string; - data: FineTuningJobEvent[]; + temperature?: number; + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + * and latency. + */ + language?: string; } -export interface FineTuningJobEvent { - id: string; - object: string; - createdAt: Date; - level: "info" | "warn" | "error"; - message: string; +export interface CreateTranscriptionResponse { + text: string; } /** Alias for Prompt */ diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/audio/transcriptions/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/audio/transcriptions/index.ts index e47bf7b7ff..b13ef97ebd 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/audio/transcriptions/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/audio/transcriptions/index.ts @@ -1,8 +1,8 @@ // Licensed under the MIT license. import { - CreateTranscriptionResponse, CreateTranscriptionRequest, + CreateTranscriptionResponse, } from "../../../models/models.js"; import { AudioTranscriptionsCreate200Response, diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/audio/translations/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/audio/translations/index.ts index 4582aef35e..85c84915d2 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/audio/translations/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/audio/translations/index.ts @@ -1,8 +1,8 @@ // Licensed under the MIT license. import { - CreateTranslationResponse, CreateTranslationRequest, + CreateTranslationResponse, } from "../../../models/models.js"; import { AudioTranslationsCreate200Response, diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/chat/completions/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/chat/completions/index.ts index 2335d41421..2f9e4b65d8 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/chat/completions/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/chat/completions/index.ts @@ -1,8 +1,8 @@ // Licensed under the MIT license. import { - CreateChatCompletionResponse, CreateChatCompletionRequest, + CreateChatCompletionResponse, } from "../../../models/models.js"; import { ChatCompletionsCreate200Response, diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/files/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/files/index.ts index 7287c8c084..5f8a72d523 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/files/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/files/index.ts @@ -1,8 +1,8 @@ // Licensed under the MIT license. import { - ListFilesResponse, OpenAIFile, + ListFilesResponse, CreateFileRequest, DeleteFileResponse, } from "../../models/models.js"; diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/fineTuning/jobs/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/fineTuning/jobs/index.ts index 8413270de6..375ad45286 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/fineTuning/jobs/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/api/fineTuning/jobs/index.ts @@ -1,8 +1,8 @@ // Licensed under the MIT license. import { - FineTuningJob, CreateFineTuningJobRequest, + FineTuningJob, ListPaginatedFineTuningJobsResponse, ListFineTuningJobEventsResponse, } from "../../../models/models.js"; diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/audio/transcriptions/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/audio/transcriptions/index.ts index ac844455e8..021e44baf2 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/audio/transcriptions/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/audio/transcriptions/index.ts @@ -2,8 +2,8 @@ import { OpenAIContext } from "../../../api/OpenAIContext.js"; import { - CreateTranscriptionResponse, CreateTranscriptionRequest, + CreateTranscriptionResponse, } from "../../../models/models.js"; import { create } from "../../../api/audio/transcriptions/index.js"; import { AudioTranscriptionsCreateOptions } from "../../../models/options.js"; diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/audio/translations/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/audio/translations/index.ts index ab9ada512b..00f8ee19f4 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/audio/translations/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/audio/translations/index.ts @@ -2,8 +2,8 @@ import { OpenAIContext } from "../../../api/OpenAIContext.js"; import { - CreateTranslationResponse, CreateTranslationRequest, + CreateTranslationResponse, } from "../../../models/models.js"; import { create } from "../../../api/audio/translations/index.js"; import { AudioTranslationsCreateOptions } from "../../../models/options.js"; diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/chat/completions/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/chat/completions/index.ts index 5aa2f97ffc..d102600eda 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/chat/completions/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/chat/completions/index.ts @@ -2,8 +2,8 @@ import { OpenAIContext } from "../../../api/OpenAIContext.js"; import { - CreateChatCompletionResponse, CreateChatCompletionRequest, + CreateChatCompletionResponse, } from "../../../models/models.js"; import { create } from "../../../api/chat/completions/index.js"; import { ChatCompletionsCreateOptions } from "../../../models/options.js"; diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/files/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/files/index.ts index 62a7943099..2e5401e880 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/files/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/files/index.ts @@ -2,8 +2,8 @@ import { OpenAIContext } from "../../api/OpenAIContext.js"; import { - ListFilesResponse, OpenAIFile, + ListFilesResponse, CreateFileRequest, DeleteFileResponse, } from "../../models/models.js"; diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/fineTuning/jobs/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/fineTuning/jobs/index.ts index 6d3f311bb8..b9a9f1f0af 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/fineTuning/jobs/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/classic/fineTuning/jobs/index.ts @@ -2,8 +2,8 @@ import { OpenAIContext } from "../../../api/OpenAIContext.js"; import { - FineTuningJob, CreateFineTuningJobRequest, + FineTuningJob, ListPaginatedFineTuningJobsResponse, ListFineTuningJobEventsResponse, } from "../../../models/models.js"; diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/index.ts index 05db99c190..473adfd3af 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/index.ts @@ -2,49 +2,49 @@ export { OpenAIClient, OpenAIClientOptions } from "./OpenAIClient.js"; export { - CreateCompletionRequest, - CreateCompletionResponse, - CompletionUsage, + CreateModerationRequest, + CreateModerationResponse, Error, - CreateEditRequest, - CreateEditResponse, - CreateEmbeddingRequest, - CreateEmbeddingResponse, - Embedding, - ListFilesResponse, - OpenAIFile, - CreateFileRequest, - DeleteFileResponse, - CreateFineTuneRequest, - FineTune, - FineTuneEvent, - ListFineTunesResponse, - ListFineTuneEventsResponse, - ListModelsResponse, - Model, - DeleteModelResponse, CreateImageRequest, ImagesResponse, Image, CreateImageEditRequest, CreateImageVariationRequest, - CreateModerationRequest, - CreateModerationResponse, - CreateTranscriptionResponse, - CreateTranscriptionRequest, - CreateTranslationResponse, - CreateTranslationRequest, - CreateChatCompletionResponse, - ChatCompletionResponseMessage, - CreateChatCompletionRequest, - ChatCompletionRequestMessage, - ChatCompletionFunctions, - ChatCompletionFunctionCallOption, - FineTuningJob, + ListModelsResponse, + Model, + DeleteModelResponse, + CreateFineTuneRequest, + FineTune, + OpenAIFile, + FineTuneEvent, + ListFineTunesResponse, + ListFineTuneEventsResponse, + ListFilesResponse, + CreateFileRequest, + DeleteFileResponse, + CreateEmbeddingRequest, + CreateEmbeddingResponse, + Embedding, + CreateEditRequest, + CreateEditResponse, + CompletionUsage, + CreateCompletionRequest, + CreateCompletionResponse, CreateFineTuningJobRequest, + FineTuningJob, ListPaginatedFineTuningJobsResponse, ListFineTuningJobEventsResponse, FineTuningJobEvent, + CreateChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionFunctions, + ChatCompletionFunctionCallOption, + CreateChatCompletionResponse, + ChatCompletionResponseMessage, + CreateTranslationRequest, + CreateTranslationResponse, + CreateTranscriptionRequest, + CreateTranscriptionResponse, Prompt, Stop, AudioTranscriptionsCreateOptions, diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/index.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/index.ts index 007ec1cc60..b816237bf8 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/index.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/index.ts @@ -1,49 +1,49 @@ // Licensed under the MIT license. export { - CreateCompletionRequest, - CreateCompletionResponse, - CompletionUsage, + CreateModerationRequest, + CreateModerationResponse, Error, - CreateEditRequest, - CreateEditResponse, - CreateEmbeddingRequest, - CreateEmbeddingResponse, - Embedding, - ListFilesResponse, - OpenAIFile, - CreateFileRequest, - DeleteFileResponse, - CreateFineTuneRequest, - FineTune, - FineTuneEvent, - ListFineTunesResponse, - ListFineTuneEventsResponse, - ListModelsResponse, - Model, - DeleteModelResponse, CreateImageRequest, ImagesResponse, Image, CreateImageEditRequest, CreateImageVariationRequest, - CreateModerationRequest, - CreateModerationResponse, - CreateTranscriptionResponse, - CreateTranscriptionRequest, - CreateTranslationResponse, - CreateTranslationRequest, - CreateChatCompletionResponse, - ChatCompletionResponseMessage, - CreateChatCompletionRequest, - ChatCompletionRequestMessage, - ChatCompletionFunctions, - ChatCompletionFunctionCallOption, - FineTuningJob, + ListModelsResponse, + Model, + DeleteModelResponse, + CreateFineTuneRequest, + FineTune, + OpenAIFile, + FineTuneEvent, + ListFineTunesResponse, + ListFineTuneEventsResponse, + ListFilesResponse, + CreateFileRequest, + DeleteFileResponse, + CreateEmbeddingRequest, + CreateEmbeddingResponse, + Embedding, + CreateEditRequest, + CreateEditResponse, + CompletionUsage, + CreateCompletionRequest, + CreateCompletionResponse, CreateFineTuningJobRequest, + FineTuningJob, ListPaginatedFineTuningJobsResponse, ListFineTuningJobEventsResponse, FineTuningJobEvent, + CreateChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionFunctions, + ChatCompletionFunctionCallOption, + CreateChatCompletionResponse, + ChatCompletionResponseMessage, + CreateTranslationRequest, + CreateTranslationResponse, + CreateTranscriptionRequest, + CreateTranscriptionResponse, Prompt, Stop, } from "./models.js"; diff --git a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/models.ts b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/models.ts index 3ba3926c78..6a7b23468a 100644 --- a/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/models.ts +++ b/packages/typespec-test/test/openai_non_branded/generated/typespec-ts/src/models/models.ts @@ -1,159 +1,53 @@ // Licensed under the MIT license. -export interface CreateCompletionRequest { - /** - * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to - * see all of your available models, or see our [Model overview](/docs/models/overview) for - * descriptions of them. - */ - model: - | string - | "babbage-002" - | "davinci-002" - | "text-davinci-003" - | "text-davinci-002" - | "text-davinci-001" - | "code-davinci-002" - | "text-curie-001" - | "text-babbage-001" - | "text-ada-001"; - /** - * The prompt(s) to generate completions for, encoded as a string, array of strings, array of - * tokens, or array of token arrays. - * - * Note that <|endoftext|> is the document separator that the model sees during training, so if a - * prompt is not specified the model will generate as if from the beginning of a new document. - */ - prompt: Prompt; - /** The suffix that comes after a completion of inserted text. */ - suffix?: string | null; - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - * more random, while lower values like 0.2 will make it more focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. - */ - temperature?: number | null; - /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers - * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - * the top 10% probability mass are considered. - * - * We generally recommend altering this or `temperature` but not both. - */ - topP?: number | null; - /** - * How many completions to generate for each prompt. - * **Note:** Because this parameter generates many completions, it can quickly consume your token - * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - */ - n?: number | null; - /** - * The maximum number of [tokens](/tokenizer) to generate in the completion. - * - * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - * for counting tokens. - */ - maxTokens?: number | null; - /** Up to 4 sequences where the API will stop generating further tokens. */ - stop?: Stop; - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear - * in the text so far, increasing the model's likelihood to talk about new topics. - * - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - */ - presencePenalty?: number | null; - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing - * frequency in the text so far, decreasing the model's likelihood to repeat the same line - * verbatim. - * - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) - */ - frequencyPenalty?: number | null; - /** - * Modify the likelihood of specified tokens appearing in the completion. - * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an - * associated bias value from -100 to 100. Mathematically, the bias is added to the logits - * generated by the model prior to sampling. The exact effect will vary per model, but values - * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 - * should result in a ban or exclusive selection of the relevant token. - */ - logitBias?: Record; - /** - * A unique identifier representing your end-user, which can help OpenAI to monitor and detect - * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - */ - user?: string; - /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` message. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). - */ - stream?: boolean | null; - /** - * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. - * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The - * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` - * elements in the response. - * - * The maximum value for `logprobs` is 5. - */ - logprobs?: number | null; - /** Echo back the prompt in addition to the completion */ - echo?: boolean | null; +export interface CreateModerationRequest { + /** The input text to classify */ + input: string | string[]; /** - * Generates `best_of` completions server-side and returns the "best" (the one with the highest - * log probability per token). Results cannot be streamed. - * - * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies - * how many to return – `best_of` must be greater than `n`. - * - * **Note:** Because this parameter generates many completions, it can quickly consume your token - * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + * Two content moderations models are available: `text-moderation-stable` and + * `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically + * upgraded over time. This ensures you are always using our most accurate model. If you use + * `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy + * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. */ - bestOf?: number | null; + model?: string | "text-moderation-latest" | "text-moderation-stable"; } -/** - * Represents a completion response from the API. Note: both the streamed and non-streamed response - * objects share the same shape (unlike the chat endpoint). - */ -export interface CreateCompletionResponse { - /** A unique identifier for the completion. */ +export interface CreateModerationResponse { + /** The unique identifier for the moderation request. */ id: string; - /** The object type, which is always `text_completion`. */ - object: string; - /** The Unix timestamp (in seconds) of when the completion was created. */ - created: Date; - /** The model used for the completion. */ + /** The model used to generate the moderation results. */ model: string; - /** The list of completion choices the model generated for the input. */ - choices: { - index: number; - text: string; - logprobs: { - tokens: string[]; - tokenLogprobs: number[]; - topLogprobs: Record[]; - textOffset: number[]; - } | null; - finishReason: "stop" | "length" | "content_filter"; + /** A list of moderation objects. */ + results: { + flagged: boolean; + categories: { + hate: boolean; + "hate/threatening": boolean; + harassment: boolean; + "harassment/threatening": boolean; + selfHarm: boolean; + "selfHarm/intent": boolean; + "selfHarm/instructive": boolean; + sexual: boolean; + "sexual/minors": boolean; + violence: boolean; + "violence/graphic": boolean; + }; + categoryScores: { + hate: number; + "hate/threatening": number; + harassment: number; + "harassment/threatening": number; + selfHarm: number; + "selfHarm/intent": number; + "selfHarm/instructive": number; + sexual: number; + "sexual/minors": number; + violence: number; + "violence/graphic": number; + }; }[]; - usage?: CompletionUsage; -} - -/** Usage statistics for the completion request. */ -export interface CompletionUsage { - /** Number of tokens in the prompt. */ - promptTokens: number; - /** Number of tokens in the generated completion */ - completionTokens: number; - /** Total number of tokens used in the request (prompt + completion). */ - totalTokens: number; } export interface Error { @@ -163,136 +57,87 @@ export interface Error { code: string | null; } -export interface CreateEditRequest { - /** - * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` - * model with this endpoint. - */ - model: string | "text-davinci-edit-001" | "code-davinci-edit-001"; - /** The input text to use as a starting point for the edit. */ - input?: string | null; - /** The instruction that tells the model how to edit the prompt. */ - instruction: string; - /** How many edits to generate for the input and instruction. */ +export interface CreateImageRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; + /** The number of images to generate. Must be between 1 and 10. */ n?: number | null; - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - * more random, while lower values like 0.2 will make it more focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. - */ - temperature?: number | null; - /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers - * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising - * the top 10% probability mass are considered. - * - * We generally recommend altering this or `temperature` but not both. - */ - topP?: number | null; -} - -export interface CreateEditResponse { - /** The object type, which is always `edit`. */ - object: "edit"; - /** The Unix timestamp (in seconds) of when the edit was created. */ - created: Date; - /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ - choices: { text: string; index: number; finishReason: "stop" | "length" }[]; - usage: CompletionUsage; -} - -export interface CreateEmbeddingRequest { - /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ - model: string | "text-embedding-ada-002"; - /** - * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a - * single request, pass an array of strings or array of token arrays. Each input must not exceed - * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. - * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) - * for counting tokens. - */ - input: string | string[] | number[] | number[][]; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024"; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json"; user?: string; } -export interface CreateEmbeddingResponse { - /** The object type, which is always "embedding". */ - object: "embedding"; - /** The name of the model used to generate the embedding. */ - model: string; - /** The list of embeddings generated by the model. */ - data: Embedding[]; - /** The usage information for the request. */ - usage: { promptTokens: number; totalTokens: number }; -} - -/** Represents an embedding vector returned by embedding endpoint. */ -export interface Embedding { - /** The index of the embedding in the list of embeddings. */ - index: number; - /** The object type, which is always "embedding". */ - object: "embedding"; - /** - * The embedding vector, which is a list of floats. The length of vector depends on the model as\ - * listed in the [embedding guide](/docs/guides/embeddings). - */ - embedding: number[]; +export interface ImagesResponse { + created: Date; + data: Image[]; } -export interface ListFilesResponse { - object: string; - data: OpenAIFile[]; +/** Represents the url or the content of an image generated by the OpenAI API. */ +export interface Image { + /** The URL of the generated image, if `response_format` is `url` (default). */ + url?: string; + /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ + b64Json?: Uint8Array; } - -/** The `File` object represents a document that has been uploaded to OpenAI. */ -export interface OpenAIFile { - /** The file identifier, which can be referenced in the API endpoints. */ - id: string; - /** The object type, which is always "file". */ - object: "file"; - /** The size of the file in bytes. */ - bytes: number; - /** The Unix timestamp (in seconds) for when the file was created. */ - createdAt: Date; - /** The name of the file. */ - filename: string; - /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ - purpose: string; + +export interface CreateImageEditRequest { + /** A text description of the desired image(s). The maximum length is 1000 characters. */ + prompt: string; /** - * The current status of the file, which can be either `uploaded`, `processed`, `pending`, - * `error`, `deleting` or `deleted`. + * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not + * provided, image must have transparency, which will be used as the mask. */ - status: - | "uploaded" - | "processed" - | "pending" - | "error" - | "deleting" - | "deleted"; + image: Uint8Array; /** - * Additional details about the status of the file. If the file is in the `error` state, this will - * include a message describing the error. + * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where + * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions + * as `image`. */ - statusDetails?: string | null; + mask?: Uint8Array; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024"; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json"; + user?: string; } -export interface CreateFileRequest { - /** - * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. - * - * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. - */ - file: Uint8Array; +export interface CreateImageVariationRequest { /** - * The intended purpose of the uploaded documents. Use "fine-tune" for - * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the - * uploaded file. + * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, + * and square. */ - purpose: string; + image: Uint8Array; + /** The number of images to generate. Must be between 1 and 10. */ + n?: number | null; + /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ + size?: "256x256" | "512x512" | "1024x1024"; + /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ + responseFormat?: "url" | "b64_json"; + user?: string; } -export interface DeleteFileResponse { +export interface ListModelsResponse { + object: string; + data: Model[]; +} + +/** Describes an OpenAI model offering that can be used with the API. */ +export interface Model { + /** The model identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "model". */ + object: "model"; + /** The Unix timestamp (in seconds) when the model was created. */ + created: Date; + /** The organization that owns the model. */ + ownedBy: string; +} + +export interface DeleteModelResponse { id: string; object: string; deleted: boolean; @@ -452,6 +297,38 @@ export interface FineTune { events?: FineTuneEvent[]; } +/** The `File` object represents a document that has been uploaded to OpenAI. */ +export interface OpenAIFile { + /** The file identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "file". */ + object: "file"; + /** The size of the file in bytes. */ + bytes: number; + /** The Unix timestamp (in seconds) for when the file was created. */ + createdAt: Date; + /** The name of the file. */ + filename: string; + /** The intended purpose of the file. Currently, only "fine-tune" is supported. */ + purpose: string; + /** + * The current status of the file, which can be either `uploaded`, `processed`, `pending`, + * `error`, `deleting` or `deleted`. + */ + status: + | "uploaded" + | "processed" + | "pending" + | "error" + | "deleting" + | "deleted"; + /** + * Additional details about the status of the file. If the file is in the `error` state, this will + * include a message describing the error. + */ + statusDetails?: string | null; +} + export interface FineTuneEvent { object: string; createdAt: Date; @@ -469,236 +346,387 @@ export interface ListFineTuneEventsResponse { data: FineTuneEvent[]; } -export interface ListModelsResponse { +export interface ListFilesResponse { object: string; - data: Model[]; + data: OpenAIFile[]; } -/** Describes an OpenAI model offering that can be used with the API. */ -export interface Model { - /** The model identifier, which can be referenced in the API endpoints. */ - id: string; - /** The object type, which is always "model". */ - object: "model"; - /** The Unix timestamp (in seconds) when the model was created. */ - created: Date; - /** The organization that owns the model. */ - ownedBy: string; +export interface CreateFileRequest { + /** + * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + * + * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + */ + file: Uint8Array; + /** + * The intended purpose of the uploaded documents. Use "fine-tune" for + * [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the + * uploaded file. + */ + purpose: string; } -export interface DeleteModelResponse { +export interface DeleteFileResponse { id: string; object: string; deleted: boolean; } -export interface CreateImageRequest { - /** A text description of the desired image(s). The maximum length is 1000 characters. */ - prompt: string; - /** The number of images to generate. Must be between 1 and 10. */ - n?: number | null; - /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ - size?: "256x256" | "512x512" | "1024x1024"; - /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ - responseFormat?: "url" | "b64_json"; +export interface CreateEmbeddingRequest { + /** ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. */ + model: string | "text-embedding-ada-002"; + /** + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a + * single request, pass an array of strings or array of token arrays. Each input must not exceed + * the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + input: string | string[] | number[] | number[][]; user?: string; } -export interface ImagesResponse { +export interface CreateEmbeddingResponse { + /** The object type, which is always "embedding". */ + object: "embedding"; + /** The name of the model used to generate the embedding. */ + model: string; + /** The list of embeddings generated by the model. */ + data: Embedding[]; + /** The usage information for the request. */ + usage: { promptTokens: number; totalTokens: number }; +} + +/** Represents an embedding vector returned by embedding endpoint. */ +export interface Embedding { + /** The index of the embedding in the list of embeddings. */ + index: number; + /** The object type, which is always "embedding". */ + object: "embedding"; + /** + * The embedding vector, which is a list of floats. The length of vector depends on the model as\ + * listed in the [embedding guide](/docs/guides/embeddings). + */ + embedding: number[]; +} + +export interface CreateEditRequest { + /** + * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` + * model with this endpoint. + */ + model: string | "text-davinci-edit-001" | "code-davinci-edit-001"; + /** The input text to use as a starting point for the edit. */ + input?: string | null; + /** The instruction that tells the model how to edit the prompt. */ + instruction: string; + /** How many edits to generate for the input and instruction. */ + n?: number | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + topP?: number | null; +} + +export interface CreateEditResponse { + /** The object type, which is always `edit`. */ + object: "edit"; + /** The Unix timestamp (in seconds) of when the edit was created. */ created: Date; - data: Image[]; + /** description: A list of edit choices. Can be more than one if `n` is greater than 1. */ + choices: { text: string; index: number; finishReason: "stop" | "length" }[]; + usage: CompletionUsage; } -/** Represents the url or the content of an image generated by the OpenAI API. */ -export interface Image { - /** The URL of the generated image, if `response_format` is `url` (default). */ - url?: string; - /** The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. */ - b64Json?: Uint8Array; +/** Usage statistics for the completion request. */ +export interface CompletionUsage { + /** Number of tokens in the prompt. */ + promptTokens: number; + /** Number of tokens in the generated completion */ + completionTokens: number; + /** Total number of tokens used in the request (prompt + completion). */ + totalTokens: number; } -export interface CreateImageEditRequest { - /** A text description of the desired image(s). The maximum length is 1000 characters. */ - prompt: string; +export interface CreateCompletionRequest { + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to + * see all of your available models, or see our [Model overview](/docs/models/overview) for + * descriptions of them. + */ + model: + | string + | "babbage-002" + | "davinci-002" + | "text-davinci-003" + | "text-davinci-002" + | "text-davinci-001" + | "code-davinci-002" + | "text-curie-001" + | "text-babbage-001" + | "text-ada-001"; + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of + * tokens, or array of token arrays. + * + * Note that <|endoftext|> is the document separator that the model sees during training, so if a + * prompt is not specified the model will generate as if from the beginning of a new document. + */ + prompt: Prompt; + /** The suffix that comes after a completion of inserted text. */ + suffix?: string | null; /** - * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not - * provided, image must have transparency, which will be used as the mask. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. + * + * We generally recommend altering this or `top_p` but not both. */ - image: Uint8Array; + temperature?: number | null; /** - * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where - * `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions - * as `image`. + * An alternative to sampling with temperature, called nucleus sampling, where the model considers + * the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising + * the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. */ - mask?: Uint8Array; - /** The number of images to generate. Must be between 1 and 10. */ - n?: number | null; - /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ - size?: "256x256" | "512x512" | "1024x1024"; - /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ - responseFormat?: "url" | "b64_json"; - user?: string; -} - -export interface CreateImageVariationRequest { + topP?: number | null; /** - * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, - * and square. + * How many completions to generate for each prompt. + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ - image: Uint8Array; - /** The number of images to generate. Must be between 1 and 10. */ n?: number | null; - /** The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ - size?: "256x256" | "512x512" | "1024x1024"; - /** The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ - responseFormat?: "url" | "b64_json"; + /** + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + * for counting tokens. + */ + maxTokens?: number | null; + /** Up to 4 sequences where the API will stop generating further tokens. */ + stop?: Stop; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear + * in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + presencePenalty?: number | null; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + */ + frequencyPenalty?: number | null; + /** + * Modify the likelihood of specified tokens appearing in the completion. + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an + * associated bias value from -100 to 100. Mathematically, the bias is added to the logits + * generated by the model prior to sampling. The exact effect will vary per model, but values + * between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 + * should result in a ban or exclusive selection of the relevant token. + */ + logitBias?: Record; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect + * abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + */ user?: string; -} - -export interface CreateModerationRequest { - /** The input text to classify */ - input: string | string[]; /** - * Two content moderations models are available: `text-moderation-stable` and - * `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically - * upgraded over time. This ensures you are always using our most accurate model. If you use - * `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy - * of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available, with the stream terminated by a `data: [DONE]` message. + * [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). */ - model?: string | "text-moderation-latest" | "text-moderation-stable"; + stream?: boolean | null; + /** + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. + * For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The + * API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` + * elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + logprobs?: number | null; + /** Echo back the prompt in addition to the completion */ + echo?: boolean | null; + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the highest + * log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies + * how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token + * quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + */ + bestOf?: number | null; } -export interface CreateModerationResponse { - /** The unique identifier for the moderation request. */ +/** + * Represents a completion response from the API. Note: both the streamed and non-streamed response + * objects share the same shape (unlike the chat endpoint). + */ +export interface CreateCompletionResponse { + /** A unique identifier for the completion. */ id: string; - /** The model used to generate the moderation results. */ + /** The object type, which is always `text_completion`. */ + object: string; + /** The Unix timestamp (in seconds) of when the completion was created. */ + created: Date; + /** The model used for the completion. */ model: string; - /** A list of moderation objects. */ - results: { - flagged: boolean; - categories: { - hate: boolean; - "hate/threatening": boolean; - harassment: boolean; - "harassment/threatening": boolean; - selfHarm: boolean; - "selfHarm/intent": boolean; - "selfHarm/instructive": boolean; - sexual: boolean; - "sexual/minors": boolean; - violence: boolean; - "violence/graphic": boolean; - }; - categoryScores: { - hate: number; - "hate/threatening": number; - harassment: number; - "harassment/threatening": number; - selfHarm: number; - "selfHarm/intent": number; - "selfHarm/instructive": number; - sexual: number; - "sexual/minors": number; - violence: number; - "violence/graphic": number; - }; + /** The list of completion choices the model generated for the input. */ + choices: { + index: number; + text: string; + logprobs: { + tokens: string[]; + tokenLogprobs: number[]; + topLogprobs: Record[]; + textOffset: number[]; + } | null; + finishReason: "stop" | "length" | "content_filter"; }[]; + usage?: CompletionUsage; } -export interface CreateTranscriptionResponse { - text: string; +export interface CreateFineTuningJobRequest { + /** + * The ID of an uploaded file that contains training data. + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with + * the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + trainingFile: string; + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically during + * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should + * not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose + * `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + validationFile?: string | null; + /** + * The name of the model to fine-tune. You can select one of the + * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + */ + model: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; + /** The hyperparameters used for the fine-tuning job. */ + hyperparameters?: { nEpochs?: "auto" | number }; + /** + * A string of up to 18 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + */ + suffix?: string | null; } -export interface CreateTranscriptionRequest { +export interface FineTuningJob { + /** The object identifier, which can be referenced in the API endpoints. */ + id: string; + /** The object type, which is always "fine_tuning.job". */ + object: "fine_tuning.job"; + /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ + createdAt: Date; /** - * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, - * mpeg, mpga, m4a, ogg, wav, or webm. + * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be + * null if the fine-tuning job is still running. */ - file: Uint8Array; - /** ID of the model to use. Only `whisper-1` is currently available. */ - model: string | "whisper-1"; + finishedAt: Date | null; + /** The base model that is being fine-tuned. */ + model: string; /** - * An optional text to guide the model's style or continue a previous audio segment. The - * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * The name of the fine-tuned model that is being created. The value will be null if the + * fine-tuning job is still running. */ - prompt?: string; + fineTunedModel: string | null; + /** The organization that owns the fine-tuning job. */ + organizationId: string; /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - * vtt. + * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, + * `succeeded`, `failed`, or `cancelled`. */ - responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; - /** - * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - * automatically increase the temperature until certain thresholds are hit. + status: + | "created" + | "pending" + | "running" + | "succeeded" + | "failed" + | "cancelled"; + /** + * The hyperparameters used for the fine-tuning job. See the + * [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ - temperature?: number; + hyperparameters: { nEpochs?: "auto" | number }; /** - * The language of the input audio. Supplying the input language in - * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy - * and latency. + * The file ID used for training. You can retrieve the training data with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - language?: string; -} - -export interface CreateTranslationResponse { - text: string; -} - -export interface CreateTranslationRequest { + trainingFile: string; /** - * The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, - * mpeg, mpga, m4a, ogg, wav, or webm. + * The file ID used for validation. You can retrieve the validation results with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - file: Uint8Array; - /** ID of the model to use. Only `whisper-1` is currently available. */ - model: string | "whisper-1"; + validationFile: string | null; /** - * An optional text to guide the model's style or continue a previous audio segment. The - * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the + * [Files API](/docs/api-reference/files/retrieve-contents). */ - prompt?: string; + resultFiles: string[]; /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or - * vtt. + * The total number of billable tokens processed by this fine tuning job. The value will be null + * if the fine-tuning job is still running. */ - responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; + trainedTokens: number | null; /** - * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, - * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to - * automatically increase the temperature until certain thresholds are hit. + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the + * failure. */ - temperature?: number; + error: { message?: string; code?: string; param?: string | null } | null; } -/** Represents a chat completion response returned by model, based on the provided input. */ -export interface CreateChatCompletionResponse { - /** A unique identifier for the chat completion. */ - id: string; - /** The object type, which is always `chat.completion`. */ +export interface ListPaginatedFineTuningJobsResponse { object: string; - /** The Unix timestamp (in seconds) of when the chat completion was created. */ - created: Date; - /** The model used for the chat completion. */ - model: string; - /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ - choices: { - index: number; - message: ChatCompletionResponseMessage; - finishReason: "stop" | "length" | "function_call" | "content_filter"; - }[]; - usage?: CompletionUsage; + data: FineTuningJob[]; + hasMore: boolean; } -export interface ChatCompletionResponseMessage { - /** The role of the author of this message. */ - role: "system" | "user" | "assistant" | "function"; - /** The contents of the message. */ - content: string | null; - /** The name and arguments of a function that should be called, as generated by the model. */ - functionCall?: { name: string; arguments: string }; +export interface ListFineTuningJobEventsResponse { + object: string; + data: FineTuningJobEvent[]; +} + +export interface FineTuningJobEvent { + id: string; + object: string; + createdAt: Date; + level: "info" | "warn" | "error"; + message: string; } export interface CreateChatCompletionRequest { @@ -847,128 +875,100 @@ export interface ChatCompletionFunctionCallOption { name: string; } -export interface FineTuningJob { - /** The object identifier, which can be referenced in the API endpoints. */ +/** Represents a chat completion response returned by model, based on the provided input. */ +export interface CreateChatCompletionResponse { + /** A unique identifier for the chat completion. */ id: string; - /** The object type, which is always "fine_tuning.job". */ - object: "fine_tuning.job"; - /** The Unix timestamp (in seconds) for when the fine-tuning job was created. */ - createdAt: Date; - /** - * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be - * null if the fine-tuning job is still running. - */ - finishedAt: Date | null; - /** The base model that is being fine-tuned. */ + /** The object type, which is always `chat.completion`. */ + object: string; + /** The Unix timestamp (in seconds) of when the chat completion was created. */ + created: Date; + /** The model used for the chat completion. */ model: string; + /** A list of chat completion choices. Can be more than one if `n` is greater than 1. */ + choices: { + index: number; + message: ChatCompletionResponseMessage; + finishReason: "stop" | "length" | "function_call" | "content_filter"; + }[]; + usage?: CompletionUsage; +} + +export interface ChatCompletionResponseMessage { + /** The role of the author of this message. */ + role: "system" | "user" | "assistant" | "function"; + /** The contents of the message. */ + content: string | null; + /** The name and arguments of a function that should be called, as generated by the model. */ + functionCall?: { name: string; arguments: string }; +} + +export interface CreateTranslationRequest { /** - * The name of the fine-tuned model that is being created. The value will be null if the - * fine-tuning job is still running. - */ - fineTunedModel: string | null; - /** The organization that owns the fine-tuning job. */ - organizationId: string; - /** - * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, - * `succeeded`, `failed`, or `cancelled`. - */ - status: - | "created" - | "pending" - | "running" - | "succeeded" - | "failed" - | "cancelled"; - /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](/docs/guides/fine-tuning) for more details. - */ - hyperparameters: { nEpochs?: "auto" | number }; - /** - * The file ID used for training. You can retrieve the training data with the - * [Files API](/docs/api-reference/files/retrieve-contents). - */ - trainingFile: string; - /** - * The file ID used for validation. You can retrieve the validation results with the - * [Files API](/docs/api-reference/files/retrieve-contents). + * The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. */ - validationFile: string | null; + file: Uint8Array; + /** ID of the model to use. Only `whisper-1` is currently available. */ + model: string | "whisper-1"; /** - * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the - * [Files API](/docs/api-reference/files/retrieve-contents). + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. */ - resultFiles: string[]; + prompt?: string; /** - * The total number of billable tokens processed by this fine tuning job. The value will be null - * if the fine-tuning job is still running. + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. */ - trainedTokens: number | null; + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; /** - * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the - * failure. + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. */ - error: { message?: string; code?: string; param?: string | null } | null; + temperature?: number; } -export interface CreateFineTuningJobRequest { +export interface CreateTranslationResponse { + text: string; +} + +export interface CreateTranscriptionRequest { /** - * The ID of an uploaded file that contains training data. - * - * See [upload file](/docs/api-reference/files/upload) for how to upload a file. - * - * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with - * the purpose `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, + * mpeg, mpga, m4a, ogg, wav, or webm. */ - trainingFile: string; + file: Uint8Array; + /** ID of the model to use. Only `whisper-1` is currently available. */ + model: string | "whisper-1"; /** - * The ID of an uploaded file that contains validation data. - * - * If you provide this file, the data is used to generate validation metrics periodically during - * fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should - * not be present in both train and validation files. - * - * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose - * `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * An optional text to guide the model's style or continue a previous audio segment. The + * [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. */ - validationFile?: string | null; + prompt?: string; /** - * The name of the model to fine-tune. You can select one of the - * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or + * vtt. */ - model: string | "babbage-002" | "davinci-002" | "gpt-3.5-turbo"; - /** The hyperparameters used for the fine-tuning job. */ - hyperparameters?: { nEpochs?: "auto" | number }; + responseFormat?: "json" | "text" | "srt" | "verbose_json" | "vtt"; /** - * A string of up to 18 characters that will be added to your fine-tuned model name. - * - * For example, a `suffix` of "custom-model-name" would produce a model name like - * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + * random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, + * the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to + * automatically increase the temperature until certain thresholds are hit. */ - suffix?: string | null; -} - -export interface ListPaginatedFineTuningJobsResponse { - object: string; - data: FineTuningJob[]; - hasMore: boolean; -} - -export interface ListFineTuningJobEventsResponse { - object: string; - data: FineTuningJobEvent[]; + temperature?: number; + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy + * and latency. + */ + language?: string; } -export interface FineTuningJobEvent { - id: string; - object: string; - createdAt: Date; - level: "info" | "warn" | "error"; - message: string; +export interface CreateTranscriptionResponse { + text: string; } /** Alias for Prompt */ diff --git a/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/index.ts b/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/index.ts index ff766bf951..ff0685b8a2 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/index.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/index.ts @@ -6,37 +6,37 @@ export { ValueTypesClientOptions, } from "./ValueTypesClient.js"; export { - BooleanProperty, - StringProperty, - BytesProperty, - IntProperty, - FloatProperty, - DecimalProperty, - Decimal128Property, - DatetimeProperty, - DurationProperty, - EnumProperty, - FixedInnerEnum, - ExtensibleEnumProperty, - InnerEnum, - ModelProperty, + UnionFloatLiteralProperty, + UnionIntLiteralProperty, + UnionStringLiteralProperty, + BooleanLiteralProperty, + FloatLiteralProperty, + IntLiteralProperty, + StringLiteralProperty, + UnknownArrayProperty, + UnknownDictProperty, + UnknownIntProperty, + UnknownStringProperty, + NeverProperty, + DictionaryStringProperty, + CollectionsModelProperty, InnerModel, - CollectionsStringProperty, CollectionsIntProperty, - CollectionsModelProperty, - DictionaryStringProperty, - NeverProperty, - UnknownStringProperty, - UnknownIntProperty, - UnknownDictProperty, - UnknownArrayProperty, - StringLiteralProperty, - IntLiteralProperty, - FloatLiteralProperty, - BooleanLiteralProperty, - UnionStringLiteralProperty, - UnionIntLiteralProperty, - UnionFloatLiteralProperty, + CollectionsStringProperty, + ModelProperty, + ExtensibleEnumProperty, + InnerEnum, + EnumProperty, + FixedInnerEnum, + DurationProperty, + DatetimeProperty, + Decimal128Property, + DecimalProperty, + FloatProperty, + IntProperty, + BytesProperty, + StringProperty, + BooleanProperty, BooleanGetOptions, BooleanPutOptions, StringGetOptions, diff --git a/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/models/index.ts b/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/models/index.ts index 5281668999..c9faea2903 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/models/index.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/models/index.ts @@ -2,37 +2,37 @@ // Licensed under the MIT license. export { - BooleanProperty, - StringProperty, - BytesProperty, - IntProperty, - FloatProperty, - DecimalProperty, - Decimal128Property, - DatetimeProperty, - DurationProperty, - EnumProperty, - FixedInnerEnum, - ExtensibleEnumProperty, - InnerEnum, - ModelProperty, + UnionFloatLiteralProperty, + UnionIntLiteralProperty, + UnionStringLiteralProperty, + BooleanLiteralProperty, + FloatLiteralProperty, + IntLiteralProperty, + StringLiteralProperty, + UnknownArrayProperty, + UnknownDictProperty, + UnknownIntProperty, + UnknownStringProperty, + NeverProperty, + DictionaryStringProperty, + CollectionsModelProperty, InnerModel, - CollectionsStringProperty, CollectionsIntProperty, - CollectionsModelProperty, - DictionaryStringProperty, - NeverProperty, - UnknownStringProperty, - UnknownIntProperty, - UnknownDictProperty, - UnknownArrayProperty, - StringLiteralProperty, - IntLiteralProperty, - FloatLiteralProperty, - BooleanLiteralProperty, - UnionStringLiteralProperty, - UnionIntLiteralProperty, - UnionFloatLiteralProperty, + CollectionsStringProperty, + ModelProperty, + ExtensibleEnumProperty, + InnerEnum, + EnumProperty, + FixedInnerEnum, + DurationProperty, + DatetimeProperty, + Decimal128Property, + DecimalProperty, + FloatProperty, + IntProperty, + BytesProperty, + StringProperty, + BooleanProperty, } from "./models.js"; export { BooleanGetOptions, diff --git a/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/models/models.ts b/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/models/models.ts index ffbccf8d21..cd0ae4406a 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/models/models.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/models/propertyTypes/generated/src/models/models.ts @@ -1,84 +1,85 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -/** Model with a boolean property */ -export interface BooleanProperty { +/** Model with a union of float literal as property. */ +export interface UnionFloatLiteralProperty { /** Property */ - property: boolean; + property: 42.42 | 43.43; } -/** Model with a string property */ -export interface StringProperty { +/** Model with a union of int literal as property. */ +export interface UnionIntLiteralProperty { /** Property */ - property: string; + property: 42 | 43; } -/** Model with a bytes property */ -export interface BytesProperty { +/** Model with a union of string literal as property. */ +export interface UnionStringLiteralProperty { /** Property */ - property: Uint8Array; + property: "hello" | "world"; } -/** Model with a int property */ -export interface IntProperty { +/** Model with a boolean literal property. */ +export interface BooleanLiteralProperty { /** Property */ - property: number; + property: true; } -/** Model with a float property */ -export interface FloatProperty { +/** Model with a float literal property. */ +export interface FloatLiteralProperty { /** Property */ - property: number; + property: 42.42; } -/** Model with a decimal property */ -export interface DecimalProperty { +/** Model with a int literal property. */ +export interface IntLiteralProperty { /** Property */ - property: number; + property: 42; } -/** Model with a decimal128 property */ -export interface Decimal128Property { +/** Model with a string literal property. */ +export interface StringLiteralProperty { /** Property */ - property: number; + property: "hello"; } -/** Model with a datetime property */ -export interface DatetimeProperty { +/** Model with a property unknown, and the data is an array. */ +export interface UnknownArrayProperty { /** Property */ - property: Date; + property: unknown; } -/** Model with a duration property */ -export interface DurationProperty { +/** Model with a property unknown, and the data is a dictionnary. */ +export interface UnknownDictProperty { /** Property */ - property: string; + property: unknown; } -/** Model with enum properties */ -export interface EnumProperty { +/** Model with a property unknown, and the data is a int32. */ +export interface UnknownIntProperty { /** Property */ - property: FixedInnerEnum; + property: unknown; } -/** Enum that will be used as a property for model EnumProperty. Non-extensible. */ -/** */ -export type FixedInnerEnum = "ValueOne" | "ValueTwo"; - -/** Model with extensible enum properties */ -export interface ExtensibleEnumProperty { +/** Model with a property unknown, and the data is a string. */ +export interface UnknownStringProperty { /** Property */ - property: InnerEnum; + property: unknown; } -/** Enum that will be used as a property for model EnumProperty. Non-extensible. */ -/** "ValueOne", "ValueTwo" */ -export type InnerEnum = string; +/** Model with a property never. (This property should not be included). */ +export interface NeverProperty {} -/** Model with model properties */ -export interface ModelProperty { +/** Model with dictionary string properties */ +export interface DictionaryStringProperty { /** Property */ - property: InnerModel; + property: Record; +} + +/** Model with collection model properties */ +export interface CollectionsModelProperty { + /** Property */ + property: InnerModel[]; } /** Inner model. Will be a property type for ModelWithModelProperties */ @@ -87,95 +88,94 @@ export interface InnerModel { property: string; } -/** Model with collection string properties */ -export interface CollectionsStringProperty { - /** Property */ - property: string[]; -} - /** Model with collection int properties */ export interface CollectionsIntProperty { /** Property */ property: number[]; } -/** Model with collection model properties */ -export interface CollectionsModelProperty { +/** Model with collection string properties */ +export interface CollectionsStringProperty { /** Property */ - property: InnerModel[]; + property: string[]; } -/** Model with dictionary string properties */ -export interface DictionaryStringProperty { +/** Model with model properties */ +export interface ModelProperty { /** Property */ - property: Record; + property: InnerModel; } -/** Model with a property never. (This property should not be included). */ -export interface NeverProperty {} - -/** Model with a property unknown, and the data is a string. */ -export interface UnknownStringProperty { +/** Model with extensible enum properties */ +export interface ExtensibleEnumProperty { /** Property */ - property: unknown; + property: InnerEnum; } -/** Model with a property unknown, and the data is a int32. */ -export interface UnknownIntProperty { +/** Enum that will be used as a property for model EnumProperty. Non-extensible. */ +/** "ValueOne", "ValueTwo" */ +export type InnerEnum = string; + +/** Model with enum properties */ +export interface EnumProperty { /** Property */ - property: unknown; + property: FixedInnerEnum; } -/** Model with a property unknown, and the data is a dictionnary. */ -export interface UnknownDictProperty { +/** Enum that will be used as a property for model EnumProperty. Non-extensible. */ +/** */ +export type FixedInnerEnum = "ValueOne" | "ValueTwo"; + +/** Model with a duration property */ +export interface DurationProperty { /** Property */ - property: unknown; + property: string; } -/** Model with a property unknown, and the data is an array. */ -export interface UnknownArrayProperty { +/** Model with a datetime property */ +export interface DatetimeProperty { /** Property */ - property: unknown; + property: Date; } -/** Model with a string literal property. */ -export interface StringLiteralProperty { +/** Model with a decimal128 property */ +export interface Decimal128Property { /** Property */ - property: "hello"; + property: number; } -/** Model with a int literal property. */ -export interface IntLiteralProperty { +/** Model with a decimal property */ +export interface DecimalProperty { /** Property */ - property: 42; + property: number; } -/** Model with a float literal property. */ -export interface FloatLiteralProperty { +/** Model with a float property */ +export interface FloatProperty { /** Property */ - property: 42.42; + property: number; } -/** Model with a boolean literal property. */ -export interface BooleanLiteralProperty { +/** Model with a int property */ +export interface IntProperty { /** Property */ - property: true; + property: number; } -/** Model with a union of string literal as property. */ -export interface UnionStringLiteralProperty { +/** Model with a bytes property */ +export interface BytesProperty { /** Property */ - property: "hello" | "world"; + property: Uint8Array; } -/** Model with a union of int literal as property. */ -export interface UnionIntLiteralProperty { +/** Model with a string property */ +export interface StringProperty { /** Property */ - property: 42 | 43; + property: string; } -/** Model with a union of float literal as property. */ -export interface UnionFloatLiteralProperty { +/** Model with a boolean property */ +export interface BooleanProperty { /** Property */ - property: 42.42 | 43.43; + property: boolean; } diff --git a/packages/typespec-ts/test/modularIntegration/generated/unions/src/index.ts b/packages/typespec-ts/test/modularIntegration/generated/unions/src/index.ts index b3a0c27ec8..0a2a889f79 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/unions/src/index.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/unions/src/index.ts @@ -3,14 +3,14 @@ export { UnionClient, UnionClientOptions } from "./UnionClient.js"; export { + MixedTypesCases, Cat, - Dog, + MixedLiteralsCases, + StringAndArrayCases, EnumsOnlyCases, LR, UD, - StringAndArrayCases, - MixedLiteralsCases, - MixedTypesCases, + Dog, StringExtensibleNamedUnion, StringsOnlyGetOptions, StringsOnlySendOptions, diff --git a/packages/typespec-ts/test/modularIntegration/generated/unions/src/models/index.ts b/packages/typespec-ts/test/modularIntegration/generated/unions/src/models/index.ts index 0ab4524192..b52c63d0c5 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/unions/src/models/index.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/unions/src/models/index.ts @@ -2,14 +2,14 @@ // Licensed under the MIT license. export { + MixedTypesCases, Cat, - Dog, + MixedLiteralsCases, + StringAndArrayCases, EnumsOnlyCases, LR, UD, - StringAndArrayCases, - MixedLiteralsCases, - MixedTypesCases, + Dog, StringExtensibleNamedUnion, } from "./models.js"; export { diff --git a/packages/typespec-ts/test/modularIntegration/generated/unions/src/models/models.ts b/packages/typespec-ts/test/modularIntegration/generated/unions/src/models/models.ts index 9da3e5c3ad..bed2cd9dcc 100644 --- a/packages/typespec-ts/test/modularIntegration/generated/unions/src/models/models.ts +++ b/packages/typespec-ts/test/modularIntegration/generated/unions/src/models/models.ts @@ -1,12 +1,37 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +export interface MixedTypesCases { + /** This should be receive/send the Cat variant */ + model: Cat | "a" | number | boolean; + /** This should be receive/send the "a" variant */ + literal: Cat | "a" | number | boolean; + /** This should be receive/send the int variant */ + int: Cat | "a" | number | boolean; + /** This should be receive/send the boolean variant */ + boolean: Cat | "a" | number | boolean; +} + export interface Cat { name: string; } -export interface Dog { - bark: string; +export interface MixedLiteralsCases { + /** This should be receive/send the "a" variant */ + stringLiteral: "a" | 2 | 3.3 | true; + /** This should be receive/send the 2 variant */ + intLiteral: "a" | 2 | 3.3 | true; + /** This should be receive/send the 3.3 variant */ + floatLiteral: "a" | 2 | 3.3 | true; + /** This should be receive/send the true variant */ + booleanLiteral: "a" | 2 | 3.3 | true; +} + +export interface StringAndArrayCases { + /** This should be receive/send the string variant */ + string: string | string[]; + /** This should be receive/send the array variant */ + array: string | string[]; } export interface EnumsOnlyCases { @@ -21,33 +46,8 @@ export type LR = string; /** "up", "down" */ export type UD = string; -export interface StringAndArrayCases { - /** This should be receive/send the string variant */ - string: string | string[]; - /** This should be receive/send the array variant */ - array: string | string[]; -} - -export interface MixedLiteralsCases { - /** This should be receive/send the "a" variant */ - stringLiteral: "a" | 2 | 3.3 | true; - /** This should be receive/send the 2 variant */ - intLiteral: "a" | 2 | 3.3 | true; - /** This should be receive/send the 3.3 variant */ - floatLiteral: "a" | 2 | 3.3 | true; - /** This should be receive/send the true variant */ - booleanLiteral: "a" | 2 | 3.3 | true; -} - -export interface MixedTypesCases { - /** This should be receive/send the Cat variant */ - model: Cat | "a" | number | boolean; - /** This should be receive/send the "a" variant */ - literal: Cat | "a" | number | boolean; - /** This should be receive/send the int variant */ - int: Cat | "a" | number | boolean; - /** This should be receive/send the boolean variant */ - boolean: Cat | "a" | number | boolean; +export interface Dog { + bark: string; } /** Alias for StringExtensibleNamedUnion */