author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
305,159 | 19.09.2022 07:56:42 | -7,200 | 577453e0b5153a68f2cf101620465b974c6d3aa2 | chore(apis): repair code blocks also in generated types | [
{
"change_type": "MODIFY",
"old_path": "packages/apis/package.json",
"new_path": "packages/apis/package.json",
"diff": "\"fetchSwaggerFiles\": \"node ./scripts/fetchSwaggerFiles.js\",\n\"generate\": \"yarn generate:clean && yarn generate:types && yarn generate:apis\",\n\"generate:clean\": \"rm -rf src/generated/*.ts\",\n- \"generate:types\": \"oats -i 'types' --storeOperations resources/operations.json --patchScript $PWD/scripts/patchSwagger.js resources/oss.yml resources/invocable-scripts.yml > src/generated/types.ts\",\n+ \"generate:types\": \"oats -i 'types' --storeOperations resources/operations.json --patchScript $PWD/scripts/patchSwagger.js resources/oss.yml resources/invocable-scripts.yml > src/generated/types.ts && node ../../scripts/repair-doc-code-blocks.js src/generated/types.ts\",\n\"generate:apis\": \"yarn esr generator && yarn prettier --write src/generated/*.ts\"\n},\n\"main\": \"dist/index.js\",\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(apis): repair code blocks also in generated types |
305,159 | 19.09.2022 08:06:40 | -7,200 | 68a168120d29a9de4ec1a69601cca66db8a7439b | feat(apis): do not allow eslint warnings in generated code | [
{
"change_type": "MODIFY",
"old_path": "packages/apis/package.json",
"new_path": "packages/apis/package.json",
"diff": "\"test:unit\": \"mocha --require esbuild-runner/register 'test/unit/**/*.test.ts' --exit\",\n\"test:ci\": \"yarn run typecheck && yarn run lint:ci && yarn run test:unit --reporter mocha-junit-reporter --reporter-options mochaFile=../../reports/apis_mocha/test-results.xml\",\n\"typecheck\": \"tsc --noEmit --pretty\",\n- \"lint\": \"eslint 'src/**/*.ts'\",\n+ \"lint\": \"eslint --max-warnings 0 'src/**/*.ts'\",\n\"lint:ci\": \"yarn run lint --format junit --output-file ../../reports/apis_eslint/eslint.xml\",\n\"lint:fix\": \"eslint --fix 'src/**/*.ts'\",\n\"regenerate\": \"yarn fetchSwaggerFiles && yarn generate && yarn test\",\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(apis): do not allow eslint warnings in generated code |
305,159 | 19.09.2022 11:54:31 | -7,200 | 62effb86efda332339b4c2e68896c03af187b81d | fix(core): repair deserialization of a missing boolean value | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/FluxTableColumn.ts",
"new_path": "packages/core/src/results/FluxTableColumn.ts",
"diff": "@@ -56,7 +56,7 @@ const identity = (x: string): any => x\n* See {@link https://docs.influxdata.com/influxdb/latest/reference/syntax/annotated-csv/#data-types }\n*/\nexport const typeSerializers: Record<ColumnType, (val: string) => any> = {\n- boolean: (x: string): any => x === 'true',\n+ boolean: (x: string): any => (x === '' ? null : x === 'true'),\nunsignedLong: (x: string): any => (x === '' ? null : +x),\nlong: (x: string): any => (x === '' ? null : +x),\ndouble(x: string): any {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/results/FluxTableMetaData.test.ts",
"new_path": "packages/core/test/unit/results/FluxTableMetaData.test.ts",
"diff": "@@ -115,6 +115,7 @@ describe('FluxTableMetaData', () => {\nconst serializationTable: Array<[ColumnType | undefined, string, any]> = [\n['boolean', 'false', false],\n['boolean', 'true', true],\n+ ['boolean', '', null],\n['unsignedLong', '1', 1],\n['unsignedLong', '', null],\n['long', '1', 1],\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | fix(core): repair deserialization of a missing boolean value |
305,159 | 20.09.2022 09:36:33 | -7,200 | e27b6b886861c5b6184ab89b2d232c6bc74d8236 | chore(docs): repair doc links to point to latest API docs | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -16,7 +16,7 @@ This repository contains the reference JavaScript client for InfluxDB 2.x. This\nThis section contains links to the client library documentation.\n-- [Product documentation](https://docs.influxdata.com/influxdb/v2.1/api-guide/client-libraries/nodejs/), [Getting Started](#usage)\n+- [Product documentation](https://docs.influxdata.com/influxdb/latest/api-guide/client-libraries/nodejs/), [Getting Started](#usage)\n- [Examples](examples#influxdb-client-examples)\n- [API Reference](https://influxdata.github.io/influxdb-client-js/influxdb-client.html)\n- [Changelog](CHANGELOG.md)\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/README.md",
"new_path": "examples/README.md",
"diff": "@@ -11,9 +11,9 @@ This directory contains javascript and typescript examples for node.js, browser,\n- [write.mjs](./write.mjs)\nWrite data points to InfluxDB.\n- [query.ts](./query.ts)\n- Query InfluxDB with [Flux](https://docs.influxdata.com/influxdb/v2.1/get-started/).\n+ Query InfluxDB with [Flux](https://docs.influxdata.com/influxdb/latest/get-started/).\n- [queryWithParams.ts](./queryWithParams.ts)\n- Supply parameters to a [Flux](https://docs.influxdata.com/influxdb/v2.1/get-started/) query.\n+ Supply parameters to a [Flux](https://docs.influxdata.com/influxdb/latest/get-started/) query.\n- [ping.mjs](./ping.mjs)\nCheck status of InfluxDB server.\n- [createBucket.mjs](./createBucket.mjs)\n@@ -23,7 +23,7 @@ This directory contains javascript and typescript examples for node.js, browser,\n- [influxdb-1.8.ts](./influxdb-1.8.ts)\nHow to use forward compatibility APIs from InfluxDB 1.8.\n- [rxjs-query.ts](./rxjs-query.ts)\n- Use [RxJS](https://rxjs.dev/) to query InfluxDB with [Flux](https://docs.influxdata.com/influxdb/v2.1/get-started/).\n+ Use [RxJS](https://rxjs.dev/) to query InfluxDB with [Flux](https://docs.influxdata.com/influxdb/latest/get-started/).\n- [writeAdvanced.mjs](./writeAdvanced.mjs)\nShows how to control the way of how data points are written to InfluxDB.\n- [follow-redirects.mjs](./follow-redirects.mjs)\n@@ -37,5 +37,5 @@ This directory contains javascript and typescript examples for node.js, browser,\nThe local HTTP server serves all files from this git repository and also proxies requests\nto a configured influxDB database, see [scripts/server.js](./scripts/server.js) for details.\n- Deno examples\n- - [query.deno.ts](./query.deno.ts) shows how to query InfluxDB with [Flux](https://docs.influxdata.com/influxdb/v2.1/get-started/).\n+ - [query.deno.ts](./query.deno.ts) shows how to query InfluxDB with [Flux](https://docs.influxdata.com/influxdb/latest/get-started/).\nIt is almost the same as node's [query.ts](./query.ts) example, the difference is the import statement that works in [deno](https://deno.land) and built-in typescript support.\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/ping.mjs",
"new_path": "examples/ping.mjs",
"diff": "This example shows how to check state InfluxDB instance.\nInfluxDB OSS APIs are available through '@influxdata/influxdb-client-apis' package.\n-See https://docs.influxdata.com/influxdb/v2.1/api/\n+See https://docs.influxdata.com/influxdb/latest/api/\n*/\nimport {InfluxDB} from '@influxdata/influxdb-client'\nimport {PingAPI} from '@influxdata/influxdb-client-apis'\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/apis/DEVELOPMENT.md",
"new_path": "packages/apis/DEVELOPMENT.md",
"diff": "# influxdb-client-apis\n-Contains generated client APIs for InfluxDB v2.1. See https://github.com/influxdata/influxdb-client-js to know more.\n+Contains generated client APIs for InfluxDB v2.x. See https://github.com/influxdata/influxdb-client-js to know more.\n## Build\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/apis/README.md",
"new_path": "packages/apis/README.md",
"diff": "# @influxdata/influxdb-client-apis\n-Contains client APIs for InfluxDB v2.1. See https://github.com/influxdata/influxdb-client-js to know more.\n+Contains client APIs for InfluxDB v2.x. See https://github.com/influxdata/influxdb-client-js to know more.\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(docs): repair doc links to point to latest API docs |
305,159 | 29.09.2022 03:43:32 | -7,200 | 7ccdd26aa5c7123ac4d70a983d3683cefadcbce4 | feat(examples): import browser configuration from a module | [
{
"change_type": "DELETE",
"old_path": "examples/env_browser.js",
"new_path": null,
"diff": "-/*\n- * The following configuration is used in the browser example.\n- */\n-// eslint-disable-next-line no-undef\n-window.INFLUX_ENV = {\n- /** InfluxDB v2 URL, '/influxdb' relies upon proxy to forward to the target influxDB */\n- url: '/influx', //'http://localhost:8086',\n- /** InfluxDB authorization token */\n- token: 'my-token',\n- /** InfluxDB organization */\n- org: 'my-org',\n- /** InfluxDB bucket used for onboarding and write requests. */\n- bucket: 'my-bucket',\n-\n- /** The following properties are used ONLY in the onboarding example */\n- /** InfluxDB user */\n- username: 'my-user',\n- /** InfluxDB password */\n- password: 'my-password',\n-}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "examples/env_browser.mjs",
"diff": "+/* This file contains InfluxDB configuration for the browser example. */\n+\n+/** InfluxDB v2 URL, '/influxdb' relies upon proxy to forward to the target influxDB */\n+export const url = '/influx' //'http://localhost:8086',\n+/** InfluxDB authorization token */\n+export const token = 'my-token'\n+/** InfluxDB organization */\n+export const org = 'my-org'\n+/** InfluxDB bucket used for onboarding and write requests. */\n+export const bucket = 'my-bucket'\n+\n+/** The following properties are used ONLY in the onboarding example */\n+/** InfluxDB user */\n+export const username = 'my-user'\n+/** InfluxDB password */\n+export const password = 'my-password'\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/index.html",
"new_path": "examples/index.html",
"diff": "// import {PingAPI, SetupAPI} from '../packages/apis/dist/index.browser.mjs'\n/**\n- * Import the configuration from ./env_browser.js.\n- * The property INFLUX_ENV.bucket is only used in onboardingExample() and writeExample().\n+ * Import InfluxDB configuration from ./env_browser.js.\n+ * The `bucket` property is only used in onboardingExample() and writeExample().\n* To prevent SQL injection attacks, the variable is not used within the Flux query examples.\n* The query examples assume your InfluxDB bucket is named \"my-bucket\".\n*/\n- import './env_browser.js'\n- const {url, token, org, bucket, username, password} = window.INFLUX_ENV\n+ import {url, token, org, bucket, username, password} from './env_browser.mjs'\nconst influxDB = new InfluxDB({url, token})\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(examples): import browser configuration from a module |
305,159 | 29.09.2022 03:44:34 | -7,200 | 56e54317e68b1e78f54fc7536a0107a220df99c4 | fix(examples): repair import in the browser example | [
{
"change_type": "MODIFY",
"old_path": "examples/index.html",
"new_path": "examples/index.html",
"diff": "import {\nPingAPI,\nSetupAPI,\n- } from 'https://unpkg.com/@influxdata/influxdb-client-apis/dist/index.browser.mjs'\n+ } from 'https://unpkg.com/@influxdata/influxdb-client-apis/dist/index.mjs'\n// or use the following imports to use local builds\n// import {InfluxDB, Point} from '../packages/core/dist/index.browser.mjs'\n// import {PingAPI, SetupAPI} from '../packages/apis/dist/index.browser.mjs'\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | fix(examples): repair import in the browser example |
305,159 | 29.09.2022 03:46:09 | -7,200 | 537d772df7f3c929b207c5d6d8667fd4376d0d0a | chore(examples): adjust readme to use env_browser.mjs | [
{
"change_type": "MODIFY",
"old_path": "examples/README.md",
"new_path": "examples/README.md",
"diff": "@@ -31,7 +31,7 @@ This directory contains javascript and typescript examples for node.js, browser,\n- [delete.ts](./delete.ts)\nShows how to delete data from a bucket.\n- Browser examples\n- - Change `token, org, bucket, username, password` variables in [./env_browser.js](env_browser.js) to match your InfluxDB instance\n+ - Change `token, org, bucket, username, password` variables in [./env_browser.mjs](env_browser.mjs) to match your InfluxDB instance\n- Run `npm run browser`\nIt starts a local HTTP server and opens [index.html](./index.html) that contains client examples.\nThe local HTTP server serves all files from this git repository and also proxies requests\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(examples): adjust readme to use env_browser.mjs |
305,159 | 29.09.2022 04:12:02 | -7,200 | 2766c4dc7ef8258d56add043c2a398509aa8e9eb | feat(examples): setup query according to configured bucket | [
{
"change_type": "MODIFY",
"old_path": "examples/index.html",
"new_path": "examples/index.html",
"diff": "import {\nInfluxDB,\nPoint,\n+ flux\n} from 'https://unpkg.com/@influxdata/influxdb-client/dist/index.browser.mjs'\nimport {\nPingAPI,\n// import {PingAPI, SetupAPI} from '../packages/apis/dist/index.browser.mjs'\n/**\n- * Import InfluxDB configuration from ./env_browser.js.\n- * The `bucket` property is only used in onboardingExample() and writeExample().\n- * To prevent SQL injection attacks, the variable is not used within the Flux query examples.\n- * The query examples assume your InfluxDB bucket is named \"my-bucket\".\n+ * Import InfluxDB configuration rather than inlining it.\n*/\nimport {url, token, org, bucket, username, password} from './env_browser.mjs'\nelse writeExample(number)\n})\nconst queryInput = document.getElementById('query')\n- const fluxQueryParam = new URLSearchParams(window.location.search).get(\n- 'fluxQuery'\n- )\n- if (fluxQueryParam) {\n- queryInput.value = fluxQueryParam\n- }\ndocument.getElementById('queryButton').addEventListener('click', () => {\nqueryExample(queryInput.value)\n})\ndocument.getElementById('pingButton').addEventListener('click', () => {\npingExample()\n})\n+ document.addEventListener('DOMContentLoaded', () => {\n+ const fluxQueryParam = new URLSearchParams(window.location.search).get('fluxQuery')\n+ if (fluxQueryParam) {\n+ queryInput.value = fluxQueryParam\n+ } else {\n+ queryInput.value =\n+ flux`from(bucket:${bucket}) |> range(start: -1d) |> filter(fn: (r) => r._measurement == \"temperature\")`.toString()\n+ }\n+ })\n</script>\n</head>\n<h1>InfluxDB JavaScript Client Examples</h1>\n</div>\n<hr />\n<div style=\"display: flex; margin-bottom: 10px\">\n- <textarea id=\"query\" style=\"flex: 1\" rows=\"2\">\n-from(bucket:\"my-bucket\") |> range(start: -1d) |> filter(fn: (r) => r._measurement == \"temperature\")</textarea\n- >\n+ <textarea id=\"query\" style=\"flex: 1\" rows=\"2\"></textarea>\n</div>\n<button id=\"queryButton\">Query InfluxDB</button>\n<hr />\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(examples): setup query according to configured bucket |
305,171 | 29.09.2022 10:55:23 | 18,000 | 5998f43dee7dcc2c92486391d57462dfcbcf3527 | chore(release): publish v1.30.0 [skip CI] | [
{
"change_type": "MODIFY",
"old_path": "lerna.json",
"new_path": "lerna.json",
"diff": "{\n- \"version\": \"1.29.0\",\n+ \"version\": \"1.30.0\",\n\"npmClient\": \"yarn\",\n\"packages\": [\n\"packages/*\"\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/apis/package.json",
"new_path": "packages/apis/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-apis\",\n- \"version\": \"1.29.0\",\n+ \"version\": \"1.30.0\",\n\"description\": \"InfluxDB 2.x generated APIs\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n\"@influxdata/influxdb-client\": \"*\"\n},\n\"devDependencies\": {\n- \"@influxdata/influxdb-client\": \"^1.29.0\",\n+ \"@influxdata/influxdb-client\": \"^1.30.0\",\n\"@influxdata/oats\": \"^0.7.0\",\n\"@microsoft/api-extractor\": \"^7.31.0\",\n\"@types/mocha\": \"^9.1.1\",\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core-browser/package.json",
"new_path": "packages/core-browser/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-browser\",\n- \"version\": \"1.29.0\",\n+ \"version\": \"1.30.0\",\n\"description\": \"InfluxDB 2.x client for browser\",\n\"scripts\": {\n\"apidoc:extract\": \"echo \\\"Nothing to do\\\"\",\n},\n\"license\": \"MIT\",\n\"devDependencies\": {\n- \"@influxdata/influxdb-client\": \"^1.29.0\",\n+ \"@influxdata/influxdb-client\": \"^1.30.0\",\n\"cpr\": \"^3.0.1\",\n\"rimraf\": \"^3.0.0\"\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/package.json",
"new_path": "packages/core/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client\",\n- \"version\": \"1.29.0\",\n+ \"version\": \"1.30.0\",\n\"description\": \"InfluxDB 2.x client\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/giraffe/package.json",
"new_path": "packages/giraffe/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-giraffe\",\n- \"version\": \"1.29.0\",\n+ \"version\": \"1.30.0\",\n\"description\": \"InfluxDB 2.x client - giraffe integration\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n\"license\": \"MIT\",\n\"devDependencies\": {\n\"@influxdata/giraffe\": \"*\",\n- \"@influxdata/influxdb-client\": \"^1.29.0\",\n+ \"@influxdata/influxdb-client\": \"^1.30.0\",\n\"@microsoft/api-extractor\": \"^7.31.0\",\n\"@types/chai\": \"^4.2.5\",\n\"@types/mocha\": \"^9.1.1\",\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(release): publish v1.30.0 [skip CI] |
305,171 | 29.09.2022 11:30:33 | 18,000 | bb3a52bbac84a5b35947afd998d09fd870748555 | chore: Undo package.json changes | [
{
"change_type": "MODIFY",
"old_path": "package.json",
"new_path": "package.json",
"diff": "},\n\"license\": \"MIT\",\n\"devDependencies\": {\n- \"@microsoft/api-documenter\": \"^7.19.12\",\n\"@types/node\": \"^18\",\n- \"@types/react\": \"^18.0.21\",\n+ \"@microsoft/api-documenter\": \"^7.19.12\",\n\"gh-pages\": \"^4.0.0\",\n\"lerna\": \"^5.0.0\",\n\"prettier\": \"^2.7.1\",\n\"rimraf\": \"^3.0.0\"\n- },\n- \"dependencies\": {\n- \"@influxdata/giraffe\": \"^2.36.1\",\n- \"@influxdata/influxdb-client\": \"^1.30.0\",\n- \"@microsoft/api-extractor\": \"^7.32.0\",\n- \"apidoc\": \"^0.53.0\",\n- \"cpr\": \"^3.0.1\",\n- \"react\": \"^18.2.0\",\n- \"tsup\": \"^6.2.3\"\n}\n}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore: Undo package.json changes |
305,159 | 21.09.2022 12:48:21 | -7,200 | e49e0298a4ed9701fdff2c4a43eda49aa5bb0e46 | feat(core): allow to pause and resume result observer | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/completeCommunicationObserver.ts",
"new_path": "packages/core/src/impl/completeCommunicationObserver.ts",
"diff": "@@ -2,7 +2,7 @@ import {CommunicationObserver, Headers} from '../results'\nexport default function completeCommunicationObserver(\ncallbacks: Partial<CommunicationObserver<any>> = {}\n-): Omit<Required<CommunicationObserver<any>>, 'useCancellable'> {\n+): Omit<Required<CommunicationObserver<any>>, 'useCancellable' | 'useResume'> {\nlet state = 0\nconst retVal = {\nnext: (data: any): void => {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/CommunicationObserver.ts",
"new_path": "packages/core/src/results/CommunicationObserver.ts",
"diff": "@@ -22,8 +22,10 @@ export interface CommunicationObserver<T> {\n/**\n* Data chunk received, can be called multiple times.\n* @param data - data\n+ * @returns when `false` value is returned and {@link CommunicationObserver#useResume} is defined,\n+ * future calls to `next` are temporarily paused.\n*/\n- next(data: T): void\n+ next(data: T): void | false\n/**\n* Communication ended with an error.\n*/\n@@ -40,4 +42,11 @@ export interface CommunicationObserver<T> {\n* Setups cancelllable for this communication.\n*/\nuseCancellable?: (cancellable: Cancellable) => void\n+ /**\n+ * Setups a callback that resumes reading of next data, it is called whenever\n+ * {@link CommunicationObserver#next} returns `false`.\n+ *\n+ * @param resume - a function that will resume reading of next data when called\n+ */\n+ useResume?: (resume: () => void) => void\n}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): allow to pause and resume result observer |
305,159 | 21.09.2022 12:50:19 | -7,200 | c327b8a012bccf737d3d04170da2f1f26c1e630b | feat(core): add resume/pause to node transport | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"new_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"diff": "@@ -26,8 +26,13 @@ const emptyBuffer = Buffer.allocUnsafe(0)\nclass CancellableImpl implements Cancellable {\nprivate cancelled = false\n+ public resume?: () => void\ncancel(): void {\nthis.cancelled = true\n+ if (this.resume) {\n+ this.resume()\n+ this.resume = undefined\n+ }\n}\nisCancelled(): boolean {\nreturn this.cancelled\n@@ -329,7 +334,16 @@ export class NodeHttpTransport implements Transport {\nif (cancellable.isCancelled()) {\nres.resume()\n} else {\n- listeners.next(data)\n+ if (listeners.next(data) === false && callbacks?.useResume) {\n+ // pause processing, the consumer signalizes that\n+ // it is not able to receive more data\n+ res.pause()\n+ const resume = () => {\n+ res.resume()\n+ }\n+ cancellable.resume = resume\n+ callbacks.useResume(resume)\n+ }\n}\n})\nresponseData.on('end', listeners.complete)\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add resume/pause to node transport |
305,159 | 21.09.2022 12:51:08 | -7,200 | 8fd31e0e80166d457f5e67c1af524e631a26f139 | feat(core): add pause/resume to fetch transport | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/browser/FetchTransport.ts",
"new_path": "packages/core/src/impl/browser/FetchTransport.ts",
"diff": "@@ -65,12 +65,19 @@ export default class FetchTransport implements Transport {\nconst observer = completeCommunicationObserver(callbacks)\nlet cancelled = false\nlet signal = (options as any).signal\n+ let pausePromise: Promise<void> | undefined\n+ const resumeQuickly = () => {}\n+ let resume = resumeQuickly\nif (callbacks && callbacks.useCancellable) {\nconst controller = new AbortController()\nif (!signal) {\nsignal = controller.signal\noptions = {...(options as object), ...signal} as SendOptions\n}\n+ // resume data reading so that it can exit on abort signal\n+ signal.addEventListener('abort', () => {\n+ resume()\n+ })\ncallbacks.useCancellable({\ncancel() {\ncancelled = true\n@@ -126,8 +133,25 @@ export default class FetchTransport implements Transport {\nconst reader = response.body.getReader()\nlet chunk: ReadableStreamReadResult<Uint8Array>\ndo {\n+ if (pausePromise) {\n+ await pausePromise\n+ }\n+ if (cancelled) {\n+ break\n+ }\nchunk = await reader.read()\n- observer.next(chunk.value)\n+ if (\n+ observer.next(chunk.value) === false &&\n+ callbacks?.useResume\n+ ) {\n+ pausePromise = new Promise((resolve) => {\n+ resume = () => {\n+ resolve()\n+ pausePromise = undefined\n+ resume = resumeQuickly\n+ }\n+ })\n+ }\n} while (!chunk.done)\n} else if (response.arrayBuffer) {\nconst buffer = await response.arrayBuffer()\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add pause/resume to fetch transport |
305,159 | 21.09.2022 13:11:24 | -7,200 | 24df4d18cd1fdaa5daf16dc788dd956d343959e4 | feat(core): adjust existing fetch trabnsport test for extended signal handling | [
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"new_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"diff": "import FetchTransport from '../../../../src/impl/browser/FetchTransport'\nimport {expect} from 'chai'\n-import {removeFetchApi, emulateFetchApi} from './emulateBrowser'\n+import {\n+ removeFetchApi,\n+ emulateFetchApi,\n+ AbortController,\n+} from './emulateBrowser'\nimport sinon from 'sinon'\nimport {SendOptions, Cancellable} from '../../../../src'\nimport {CollectedLogs, collectLogging} from '../../../util'\n@@ -346,7 +350,7 @@ describe('FetchTransport', () => {\n{\nurl: 'customNext_cancelledWithSignal',\nbody: [Buffer.from('a'), Buffer.from('b')],\n- signal: {aborted: true},\n+ signal: new AbortController(true).signal,\ncallbacks: ((): void => {\nconst overriden = fakeCallbacks()\noverriden.useCancellable = sinon.spy((c: Cancellable): void => {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/emulateBrowser.ts",
"new_path": "packages/core/test/unit/impl/browser/emulateBrowser.ts",
"diff": "@@ -79,6 +79,23 @@ let beforeEmulation:\n| {fetch: any; abortController: any; textEncoder: any}\n| undefined\n+export class AbortController {\n+ private listeners: Array<() => void> = []\n+ signal = {\n+ aborted: false,\n+ addEventListener: (type: string, listener: () => void) => {\n+ this.listeners.push(listener)\n+ },\n+ }\n+ constructor(aborted = false) {\n+ this.signal.aborted = aborted\n+ }\n+ abort(): void {\n+ this.signal.aborted = true\n+ this.listeners.forEach((x) => x())\n+ }\n+}\n+\nexport function emulateFetchApi(\nspec: ResponseSpec,\nonRequest?: (options: any) => void\n@@ -89,15 +106,6 @@ export function emulateFetchApi(\n? Promise.reject(new Error(url))\n: Promise.resolve(createResponse(spec))\n}\n- class AbortController {\n- signal = {\n- aborted: false,\n- }\n- abort(): void {\n- this.signal.aborted = true\n- }\n- }\n-\nclass TextEncoder {\nencode(s: string): Uint8Array {\nreturn Buffer.from(s)\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): adjust existing fetch trabnsport test for extended signal handling |
305,159 | 21.09.2022 13:12:31 | -7,200 | 5b1fe415454c62873114ce7215a04935ce7a1c28 | feat(core): assign useCancelable only if required | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/chunksToLines.ts",
"new_path": "packages/core/src/results/chunksToLines.ts",
"diff": "@@ -52,7 +52,7 @@ export function chunksToLines(\n}\n}\n- return {\n+ const retVal: CommunicationObserver<Uint8Array> = {\nnext(chunk: Uint8Array): void {\nif (finished) return\ntry {\n@@ -76,21 +76,22 @@ export function chunksToLines(\ntarget.complete()\n}\n},\n- useCancellable(cancellable: Cancellable): void {\n+ }\nif (target.useCancellable) {\n- // eslint-disable-next-line @typescript-eslint/no-this-alias\n- const self = this\n+ retVal.useCancellable = (cancellable: Cancellable) => {\n+ target.useCancellable &&\ntarget.useCancellable({\ncancel(): void {\ncancellable.cancel()\nprevious = undefined // do not emit more lines\n- self.complete()\n+ retVal.complete()\n},\nisCancelled(): boolean {\nreturn cancellable.isCancelled()\n},\n})\n}\n- },\n}\n+\n+ return retVal\n}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): assign useCancelable only if required |
305,159 | 21.09.2022 17:16:52 | -7,200 | 8d7f3cf5ee32a127db7dd6a5ceef7eec38c68a28 | feat(core): fail tests when conditions cannot be awaited | [
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/util/waitForCondition.ts",
"new_path": "packages/core/test/unit/util/waitForCondition.ts",
"diff": "@@ -20,6 +20,5 @@ export async function waitForCondition(\n}\nif (condition()) return\n}\n- // eslint-disable-next-line no-console\n- console.error(`WARN:waitForCondition: ${message}`)\n+ return Promise.reject(`WARN:waitForCondition: ${message}`)\n}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): fail tests when conditions cannot be awaited |
305,159 | 21.09.2022 17:17:47 | -7,200 | a0ed79b6b2c0e0b8a47dd3b54e1d62dfdef5cdb7 | feat(core): forward useCancellable and useResume to make it complete | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/completeCommunicationObserver.ts",
"new_path": "packages/core/src/impl/completeCommunicationObserver.ts",
"diff": "import {CommunicationObserver, Headers} from '../results'\n+type CompleteObserver = Omit<\n+ Required<CommunicationObserver<any>>,\n+ 'useCancellable' | 'useResume'\n+> &\n+ Pick<CommunicationObserver<any>, 'useResume' | 'useCancellable'>\n+\nexport default function completeCommunicationObserver(\ncallbacks: Partial<CommunicationObserver<any>> = {}\n-): Omit<Required<CommunicationObserver<any>>, 'useCancellable' | 'useResume'> {\n+): CompleteObserver {\nlet state = 0\n- const retVal = {\n- next: (data: any): void => {\n+ const retVal: CompleteObserver = {\n+ next: (data: any): void | false => {\nif (\nstate === 0 &&\ncallbacks.next &&\ndata !== null &&\ndata !== undefined\n) {\n- callbacks.next(data)\n+ return callbacks.next(data)\n}\n},\nerror: (error: Error): void => {\n@@ -35,5 +41,11 @@ export default function completeCommunicationObserver(\ncallbacks.responseStarted(headers, statusCode)\n},\n}\n+ if (callbacks.useCancellable) {\n+ retVal.useCancellable = callbacks.useCancellable.bind(callbacks)\n+ }\n+ if (callbacks.useResume) {\n+ retVal.useResume = callbacks.useResume.bind(callbacks)\n+ }\nreturn retVal\n}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): forward useCancellable and useResume to make it complete |
305,159 | 21.09.2022 17:18:13 | -7,200 | d98b75735fb114e146d79388935e85611e43e054 | feat(core): test backpressure in node transport | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"new_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"diff": "@@ -334,15 +334,22 @@ export class NodeHttpTransport implements Transport {\nif (cancellable.isCancelled()) {\nres.resume()\n} else {\n- if (listeners.next(data) === false && callbacks?.useResume) {\n+ if (listeners.next(data) === false) {\n// pause processing, the consumer signalizes that\n// it is not able to receive more data\n+ if (!listeners.useResume) {\n+ listeners.error(\n+ new Error('Unable to pause, useResume is not configured!')\n+ )\n+ res.resume()\n+ return\n+ }\nres.pause()\nconst resume = () => {\nres.resume()\n}\ncancellable.resume = resume\n- callbacks.useResume(resume)\n+ listeners.useResume(resume)\n}\n}\n})\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"new_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"diff": "@@ -3,7 +3,7 @@ import nock from 'nock' // WARN: nock must be imported before NodeHttpTransport,\nimport NodeHttpTransport from '../../../../src/impl/node/NodeHttpTransport'\nimport {ConnectionOptions} from '../../../../src/options'\nimport {SendOptions} from '../../../../src/transport'\n-import {Cancellable} from '../../../../src/results'\n+import {Cancellable, CommunicationObserver} from '../../../../src/results'\nimport * as http from 'http'\nimport * as https from 'https'\nimport sinon from 'sinon'\n@@ -11,6 +11,8 @@ import {Readable} from 'stream'\nimport zlib from 'zlib'\nimport {CLIENT_LIB_VERSION} from '../../../../src/impl/version'\nimport {CollectedLogs, collectLogging} from '../../../util'\n+import {waitForCondition} from '../../util/waitForCondition'\n+import {AddressInfo} from 'net'\nfunction sendTestData(\nconnectionOptions: ConnectionOptions,\n@@ -500,6 +502,156 @@ describe('NodeHttpTransport', () => {\n})\n})\n})\n+ describe('send.backpressure', () => {\n+ let server: http.Server\n+ let url = ''\n+ before(async () => {\n+ await new Promise<void>((resolve) => {\n+ server = http.createServer()\n+ server.listen(() => {\n+ const addr = server.address() as AddressInfo\n+ url = `http://${addr.address}:${addr.port}`\n+ resolve()\n+ })\n+ })\n+ })\n+ after(() => {\n+ server.close()\n+ })\n+ afterEach(async () => {\n+ server.removeAllListeners('request')\n+ })\n+\n+ it(`it throws an error when paused and useResume is not set`, async () => {\n+ server.on('request', async (_req, res) => {\n+ res.setHeader('content-type', 'application/csv')\n+ res.writeHead(200)\n+ const writeUntilFull = () => {\n+ while (res.write('.'));\n+ }\n+ writeUntilFull()\n+ res.once('drain', () => {\n+ res.write('.')\n+ res.end()\n+ })\n+ })\n+ const observer: CommunicationObserver<Uint8Array> = {\n+ next(_chunk: Uint8Array) {\n+ // do not receive more than 1 chunk, but still\n+ // there is no useResume callback!\n+ return false\n+ },\n+ error() {},\n+ complete(): void {},\n+ }\n+ const spy = sinon.spy(observer)\n+\n+ new NodeHttpTransport({url, timeout: 10000}).send(\n+ '/test',\n+ '',\n+ {\n+ method: 'GET',\n+ },\n+ spy\n+ )\n+ // wait for resume being called\n+ await waitForCondition(() => spy.error.callCount === 1)\n+ expect(spy.next.callCount).equals(1)\n+ expect(spy.error.getCall(0).args[0]?.message).contains(\n+ 'useResume is not configured!'\n+ )\n+ })\n+\n+ it(`is paused after the first chunk, then cancelled`, async () => {\n+ let cancellable: Cancellable | undefined\n+ let resume: () => void | undefined\n+\n+ server.on('request', async (_req, res) => {\n+ res.setHeader('content-type', 'application/csv')\n+ res.writeHead(200)\n+ const writeUntilFull = () => {\n+ while (res.write('.'));\n+ }\n+ writeUntilFull()\n+ res.once('drain', () => writeUntilFull())\n+ res.once('drain', () => res.end())\n+ })\n+ const observer: CommunicationObserver<Uint8Array> = {\n+ next(_chunk: Uint8Array) {\n+ return false // do not receive more than 1 chunk\n+ },\n+ error() {},\n+ complete(): void {},\n+ useCancellable(c: Cancellable) {\n+ cancellable = c\n+ },\n+ useResume(r) {\n+ resume = r\n+ },\n+ }\n+ const spy = sinon.spy(observer)\n+\n+ new NodeHttpTransport({url, timeout: 10000}).send(\n+ '/test',\n+ '',\n+ {\n+ method: 'GET',\n+ },\n+ spy\n+ )\n+ // wait for resume being called\n+ await waitForCondition(() => cancellable && resume)\n+ expect(spy.next.callCount).equals(1)\n+ cancellable?.cancel()\n+ })\n+ it(`is paused after the second chunk and then read fully`, async () => {\n+ let resume: (() => void) | undefined\n+ let chunkNumber = 0\n+\n+ server.on('request', async (_req, res) => {\n+ res.setHeader('content-type', 'application/csv')\n+ res.writeHead(200)\n+ const writeUntilFull = () => {\n+ while (res.write('.'));\n+ }\n+ writeUntilFull()\n+ res.once('drain', () => {\n+ res.write('.')\n+ res.end()\n+ })\n+ })\n+ const observer: CommunicationObserver<Uint8Array> = {\n+ next(_chunk: Uint8Array) {\n+ return ++chunkNumber === 2 ? false : undefined // pause at 2nd chunk\n+ },\n+ error() {},\n+ complete(): void {},\n+ useResume(r) {\n+ resume = r\n+ },\n+ }\n+ const spy = sinon.spy(observer)\n+\n+ new NodeHttpTransport({url, timeout: 10000}).send(\n+ '/test',\n+ '',\n+ {\n+ method: 'GET',\n+ },\n+ spy\n+ )\n+ // wait for resume being called\n+ await waitForCondition(() => resume, 'resume callback is set')\n+ expect(spy.next.callCount).equals(2)\n+ expect(resume).is.not.null\n+ if (resume) resume()\n+ await waitForCondition(\n+ () => spy.complete.callCount === 1,\n+ 'response is fully read'\n+ )\n+ expect(spy.next.callCount).is.greaterThan(2)\n+ })\n+ })\ndescribe('request', () => {\nbeforeEach(() => {\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): test backpressure in node transport |
305,159 | 21.09.2022 17:33:58 | -7,200 | 60a74ae76790ece4fedf3625e7916a90ce0d3f5e | feat(core): throw error when pausing without resume | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/browser/FetchTransport.ts",
"new_path": "packages/core/src/impl/browser/FetchTransport.ts",
"diff": "@@ -140,10 +140,13 @@ export default class FetchTransport implements Transport {\nbreak\n}\nchunk = await reader.read()\n- if (\n- observer.next(chunk.value) === false &&\n- callbacks?.useResume\n- ) {\n+ if (observer.next(chunk.value) === false) {\n+ if (!observer.useResume) {\n+ await reader.cancel()\n+ return Promise.reject(\n+ new Error('Unable to pause, useResume is not configured!')\n+ )\n+ }\npausePromise = new Promise((resolve) => {\nresume = () => {\nresolve()\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): throw error when pausing without resume |
305,159 | 22.09.2022 06:00:50 | -7,200 | e94dc9b0daf412bc5f2e241fe3def008a4f5daca | feat(core): test backpressure in fetch transport | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/browser/FetchTransport.ts",
"new_path": "packages/core/src/impl/browser/FetchTransport.ts",
"diff": "@@ -141,11 +141,11 @@ export default class FetchTransport implements Transport {\n}\nchunk = await reader.read()\nif (observer.next(chunk.value) === false) {\n- if (!observer.useResume) {\n- await reader.cancel()\n- return Promise.reject(\n- new Error('Unable to pause, useResume is not configured!')\n- )\n+ const useResume = observer.useResume\n+ if (!useResume) {\n+ const msg = 'Unable to pause, useResume is not configured!'\n+ await reader.cancel(msg)\n+ return Promise.reject(new Error(msg))\n}\npausePromise = new Promise((resolve) => {\nresume = () => {\n@@ -153,6 +153,7 @@ export default class FetchTransport implements Transport {\npausePromise = undefined\nresume = resumeQuickly\n}\n+ useResume(resume)\n})\n}\n} while (!chunk.done)\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"new_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"diff": "@@ -6,8 +6,9 @@ import {\nAbortController,\n} from './emulateBrowser'\nimport sinon from 'sinon'\n-import {SendOptions, Cancellable} from '../../../../src'\n+import {SendOptions, Cancellable, CommunicationObserver} from '../../../../src'\nimport {CollectedLogs, collectLogging} from '../../../util'\n+import {waitForCondition} from '../../util/waitForCondition'\ndescribe('FetchTransport', () => {\nafterEach(() => {\n@@ -524,6 +525,108 @@ describe('FetchTransport', () => {\nexpect(request?.credentials).is.deep.equal('my-val')\n})\n})\n+ describe('send.backpressure', () => {\n+ it(`it throws an error when paused and useResume is not set`, async () => {\n+ emulateFetchApi({body: 'abc'.split('').map(Buffer.from)})\n+ const observer: CommunicationObserver<Uint8Array> = {\n+ next(_chunk: Uint8Array) {\n+ // do not receive more than 1 chunk, but still\n+ // there is no useResume callback!\n+ return false\n+ },\n+ error() {},\n+ complete(): void {},\n+ }\n+ const spy = sinon.spy(observer)\n+ new FetchTransport({url: '/test'}).send(\n+ '/test',\n+ '',\n+ {\n+ method: 'GET',\n+ },\n+ spy\n+ )\n+ // wait for error being called\n+ await waitForCondition(() => spy.error.callCount === 1)\n+ expect(spy.next.callCount).equals(1)\n+ expect(spy.error.getCall(0).args[0]?.message).contains(\n+ 'useResume is not configured!'\n+ )\n+ })\n+ it(`is paused after the first chunk, then cancelled`, async () => {\n+ let cancellable: Cancellable | undefined\n+ let resume: () => void | undefined\n+\n+ emulateFetchApi({body: 'abc'.split('').map(Buffer.from)})\n+ const observer: CommunicationObserver<Uint8Array> = {\n+ next(_chunk: Uint8Array) {\n+ return false // do not receive more than 1 chunk\n+ },\n+ error() {},\n+ complete(): void {},\n+ useCancellable(c: Cancellable) {\n+ cancellable = c\n+ },\n+ useResume(r) {\n+ resume = r\n+ },\n+ }\n+ const spy = sinon.spy(observer)\n+\n+ new FetchTransport({url: '/test'}).send(\n+ '/test',\n+ '',\n+ {\n+ method: 'GET',\n+ },\n+ spy\n+ )\n+ // wait for resume being called\n+ await waitForCondition(() => cancellable && resume)\n+ expect(spy.next.callCount).equals(1)\n+ cancellable?.cancel()\n+ })\n+ it.only(`is paused after the second chunk and then read fully`, async () => {\n+ let resume: (() => void) | undefined\n+ let chunkNumber = 0\n+ const responseBody = 'abcd'\n+\n+ emulateFetchApi({body: responseBody.split('').map(Buffer.from)})\n+ const observer: CommunicationObserver<Uint8Array> = {\n+ next(_chunk: Uint8Array) {\n+ return ++chunkNumber === 2 ? false : undefined // pause at 2nd chunk\n+ },\n+ error() {},\n+ complete(): void {},\n+ useResume(r) {\n+ resume = r\n+ },\n+ }\n+ const spy = sinon.spy(observer)\n+\n+ new FetchTransport({url: '/test'}).send(\n+ '/test',\n+ '',\n+ {\n+ method: 'GET',\n+ },\n+ spy\n+ )\n+ // wait for useResume being called\n+ await waitForCondition(() => resume, 'resume callback is set')\n+ expect(spy.next.callCount).equals(2)\n+ expect(resume).is.not.null\n+ if (resume) resume()\n+ await waitForCondition(\n+ () => spy.complete.callCount === 1,\n+ 'response is fully read'\n+ )\n+ expect(spy.next.callCount).equals(responseBody.length)\n+ expect(\n+ spy.next.args.reduce((acc, [body]) => acc + body.toString(), '')\n+ ).equals(responseBody)\n+ })\n+ })\ndescribe('chunkCombiner', () => {\nconst options = {url: 'http://test:8086'}\nconst chunkCombiner = new FetchTransport(options).chunkCombiner\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/emulateBrowser.ts",
"new_path": "packages/core/test/unit/impl/browser/emulateBrowser.ts",
"diff": "@@ -68,6 +68,9 @@ function createResponse({\n})\n}\n},\n+ cancel(_msg = '') {\n+ /* read cancelled with an optional message*/\n+ },\n}\n},\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"new_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"diff": "@@ -554,7 +554,7 @@ describe('NodeHttpTransport', () => {\n},\nspy\n)\n- // wait for resume being called\n+ // wait for error being called\nawait waitForCondition(() => spy.error.callCount === 1)\nexpect(spy.next.callCount).equals(1)\nexpect(spy.error.getCall(0).args[0]?.message).contains(\n@@ -640,7 +640,7 @@ describe('NodeHttpTransport', () => {\n},\nspy\n)\n- // wait for resume being called\n+ // wait for useResume being called\nawait waitForCondition(() => resume, 'resume callback is set')\nexpect(spy.next.callCount).equals(2)\nexpect(resume).is.not.null\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): test backpressure in fetch transport |
305,159 | 26.09.2022 06:35:41 | -7,200 | 2f32c4af28e504599518fad25192357ce59c2149 | feat(core): change method signature to return boolean | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/completeCommunicationObserver.ts",
"new_path": "packages/core/src/impl/completeCommunicationObserver.ts",
"diff": "@@ -11,7 +11,7 @@ export default function completeCommunicationObserver(\n): CompleteObserver {\nlet state = 0\nconst retVal: CompleteObserver = {\n- next: (data: any): void | false => {\n+ next: (data: any): void | boolean => {\nif (\nstate === 0 &&\ncallbacks.next &&\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/CommunicationObserver.ts",
"new_path": "packages/core/src/results/CommunicationObserver.ts",
"diff": "@@ -23,9 +23,9 @@ export interface CommunicationObserver<T> {\n* Data chunk received, can be called multiple times.\n* @param data - data\n* @returns when `false` value is returned and {@link CommunicationObserver#useResume} is defined,\n- * future calls to `next` are temporarily paused.\n+ * future calls to `next` are paused until resume is called.\n*/\n- next(data: T): void | false\n+ next(data: T): void | boolean\n/**\n* Communication ended with an error.\n*/\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"new_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"diff": "@@ -586,7 +586,7 @@ describe('FetchTransport', () => {\nexpect(spy.next.callCount).equals(1)\ncancellable?.cancel()\n})\n- it.only(`is paused after the second chunk and then read fully`, async () => {\n+ it(`is paused after the second chunk and then read fully`, async () => {\nlet resume: (() => void) | undefined\nlet chunkNumber = 0\nconst responseBody = 'abcd'\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): change method signature to return boolean |
305,159 | 26.09.2022 13:56:13 | -7,200 | aa6b4b40d2ffb87e11353f997b9bead274fcb9f4 | feat(core): add pause/resume to lines transformation | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/chunksToLines.ts",
"new_path": "packages/core/src/results/chunksToLines.ts",
"diff": "@@ -17,13 +17,17 @@ export function chunksToLines(\nlet previous: Uint8Array | undefined\nlet finished = false\nlet quoted = false\n+ let paused = false\n+ let resumeChunks: (() => void) | undefined\nfunction bufferReceived(chunk: Uint8Array): void {\nlet index: number\nlet start = 0\nif (previous) {\n+ // inspect the whole remaining data upon empty chunk\n+ // empty chunk signalizes to restart of receiving\n+ index = chunk.length === 0 ? 0 : (previous as Uint8Array).length\nchunk = chunks.concat(previous, chunk)\n- index = (previous as Buffer).length\n} else {\nindex = 0\n}\n@@ -37,29 +41,50 @@ export function chunksToLines(\nif (finished) {\nreturn\n}\n- target.next(chunks.toUtf8String(chunk, start, end))\n+ paused = target.next(chunks.toUtf8String(chunk, start, end)) === false\nstart = index + 1\n+ if (paused) {\n+ break\n+ }\n}\n} else if (c === 34 /* \" */) {\nquoted = !quoted\n}\nindex++\n}\n- if (start < index) {\n- previous = chunks.copy(chunk, start, index)\n+ if (start < chunk.length) {\n+ previous = chunks.copy(chunk, start, chunk.length)\n} else {\nprevious = undefined\n}\n+ if (paused) {\n+ if (target.useResume) {\n+ target.useResume(() => {\n+ paused = false\n+ bufferReceived(new Uint8Array(0))\n+ })\n+ return\n+ }\n+ retVal.error(new Error('Unable to pause, useResume is not configured!'))\n+ paused = false // consume remaining data\n+ }\n+ if (resumeChunks) {\n+ resumeChunks()\n+ resumeChunks = undefined\n+ }\n}\nconst retVal: CommunicationObserver<Uint8Array> = {\n- next(chunk: Uint8Array): void {\n- if (finished) return\n+ next(chunk: Uint8Array): boolean {\n+ if (!finished) {\ntry {\nbufferReceived(chunk)\n+ return !paused\n} catch (e) {\nthis.error(e as Error)\n}\n+ }\n+ return true\n},\nerror(error: Error): void {\nif (!finished) {\n@@ -92,6 +117,11 @@ export function chunksToLines(\n})\n}\n}\n+ if (target.useResume) {\n+ retVal.useResume = (x: () => void) => {\n+ resumeChunks = x\n+ }\n+ }\nreturn retVal\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/results/chunksToLines.test.ts",
"new_path": "packages/core/test/unit/results/chunksToLines.test.ts",
"diff": "@@ -88,4 +88,82 @@ describe('chunksToLines', () => {\nexpect((cancellable as any)?.isCancelled()).equals(true)\nexpect(target.complete.callCount).equals(1)\n})\n+ it('can pause and resume processing', () => {\n+ let lines: string[] = []\n+ let chunksResume = sinon.spy((): void => {})\n+ const target = {\n+ next(line: string): boolean {\n+ lines.push(line)\n+ const paused = line.includes('pause')\n+ return !paused\n+ },\n+ error: sinon.fake(),\n+ complete: sinon.fake(),\n+ useResume: sinon.spy((_x: () => void): void => {}),\n+ }\n+ const subject = chunksToLines(target, nodeChunkCombiner)\n+ let nextVal = subject.next(Buffer.from('a\\npause\\nb\\nd', 'utf8'))\n+ subject?.useResume?.((chunksResume = sinon.spy((): void => {})))\n+ expect(nextVal).equals(false)\n+ expect(lines).deep.equals(['a', 'pause'])\n+ expect(target.error.callCount).equals(0)\n+ expect(target.complete.callCount).equals(0)\n+ expect(target.useResume.callCount).equals(1)\n+ expect(chunksResume.callCount).equals(0)\n+ target.useResume.args[0][0]() // resume\n+ expect(lines).deep.equals(['a', 'pause', 'b'])\n+ expect(target.error.callCount).equals(0)\n+ expect(target.complete.callCount).equals(0)\n+ expect(target.useResume.callCount).equals(1)\n+ expect(chunksResume.callCount).equals(1)\n+ lines = []\n+ nextVal = subject.next(Buffer.from('pause\\npause\\nf', 'utf8'))\n+ expect(nextVal).equals(false)\n+ subject?.useResume?.((chunksResume = sinon.spy((): void => {})))\n+ expect(lines).deep.equals(['dpause'])\n+ expect(target.error.callCount).equals(0)\n+ expect(target.complete.callCount).equals(0)\n+ expect(target.useResume.callCount).equals(2)\n+ expect(chunksResume.callCount).equals(0)\n+ target.useResume.args[1][0]() // resume\n+ expect(lines).deep.equals(['dpause', 'pause'])\n+ expect(target.error.callCount).equals(0)\n+ expect(target.complete.callCount).equals(0)\n+ expect(target.useResume.callCount).equals(3)\n+ expect(chunksResume.callCount).equals(0)\n+ target.useResume.args[2][0]() // resume\n+ expect(lines).deep.equals(['dpause', 'pause'])\n+ expect(target.error.callCount).equals(0)\n+ expect(target.complete.callCount).equals(0)\n+ expect(target.useResume.callCount).equals(3)\n+ expect(chunksResume.callCount).equals(1)\n+ subject.complete()\n+ expect(lines).deep.equals(['dpause', 'pause', 'f'])\n+ expect(target.error.callCount).equals(0)\n+ expect(target.complete.callCount).equals(1)\n+ expect(target.useResume.callCount).equals(3)\n+ })\n+ it('requires useResume', () => {\n+ const lines: string[] = []\n+ const target = {\n+ next(line: string): boolean {\n+ lines.push(line)\n+ const paused = line.includes('pause')\n+ return !paused\n+ },\n+ error: sinon.fake(),\n+ complete: sinon.fake(),\n+ }\n+ const subject = chunksToLines(target, nodeChunkCombiner)\n+ const nextVal = subject.next(Buffer.from('a\\npause\\nb\\nd', 'utf8'))\n+ expect(nextVal).equals(true)\n+ expect(lines).deep.equals(['a', 'pause'])\n+ expect(target.error.callCount).equals(1)\n+ expect(target.complete.callCount).equals(0)\n+ subject.next(Buffer.from('whatever'))\n+ subject.complete()\n+ expect(lines).deep.equals(['a', 'pause'])\n+ expect(target.error.callCount).equals(1)\n+ expect(target.complete.callCount).equals(0)\n+ })\n})\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add pause/resume to lines transformation |
305,159 | 26.09.2022 14:55:54 | -7,200 | 9c9bc24c1cc3b1881d68c5862b6f9a70999c6cb2 | feat(core): add pause/resume to table transformation | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/FluxResultObserver.ts",
"new_path": "packages/core/src/results/FluxResultObserver.ts",
"diff": "@@ -7,8 +7,12 @@ import {FluxTableMetaData} from './FluxTableMetaData'\nexport interface FluxResultObserver<T> {\n/**\n* Inform about a next record in a table.\n+ * @param row - flux result\n+ * @param tableMeta - actual table metata for the row supplied\n+ * @returns when `false` value is returned and {@link FluxResultObserver#useResume} is defined,\n+ * future calls to `next` are paused until resume is called.\n*/\n- next(row: T, tableMeta: FluxTableMetaData): void\n+ next(row: T, tableMeta: FluxTableMetaData): void | boolean\n/**\n* Signalizes processing error.\n*/\n@@ -21,4 +25,12 @@ export interface FluxResultObserver<T> {\n* Setups cancellable that can abort flux result processing.\n*/\nuseCancellable?: (cancellable: Cancellable) => void\n+\n+ /**\n+ * Setups a callback that resumes reading of next data, it is called whenever\n+ * {@link FluxResultObserver#next} returns `false`.\n+ *\n+ * @param resume - a function that will resume reading of next data when called\n+ */\n+ useResume?: (resume: () => void) => void\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/linesToTables.ts",
"new_path": "packages/core/src/results/linesToTables.ts",
"diff": "import {CommunicationObserver} from './CommunicationObserver'\n-import {Cancellable} from './Cancellable'\nimport {LineSplitter} from './LineSplitter'\nimport {FluxResultObserver} from './FluxResultObserver'\nimport {\n@@ -21,11 +20,11 @@ export function linesToTables(\nlet expectMeta = true\nlet firstColumnIndex = 0\nlet lastMeta: FluxTableMetaData\n- return {\n+ const retVal: CommunicationObserver<string> = {\nerror(error: Error): void {\nconsumer.error(error)\n},\n- next(line: string): void {\n+ next(line: string): void | boolean {\nif (line === '') {\nexpectMeta = true\ncolumns = undefined\n@@ -67,15 +66,20 @@ export function linesToTables(\n}\n}\n} else {\n- consumer.next(values.slice(firstColumnIndex, size), lastMeta)\n+ return consumer.next(values.slice(firstColumnIndex, size), lastMeta)\n}\n}\n+ return true\n},\ncomplete(): void {\nconsumer.complete()\n},\n- useCancellable(cancellable: Cancellable): void {\n- if (consumer.useCancellable) consumer.useCancellable(cancellable)\n- },\n}\n+ if (consumer.useCancellable) {\n+ retVal.useCancellable = consumer.useCancellable.bind(consumer)\n+ }\n+ if (consumer.useResume) {\n+ retVal.useResume = consumer.useResume.bind(consumer)\n+ }\n+ return retVal\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/results/linesToTables.test.ts",
"new_path": "packages/core/test/unit/results/linesToTables.test.ts",
"diff": "@@ -6,6 +6,7 @@ import {\ncreateTextDecoderCombiner,\nlinesToTables,\n} from '../../../src/results'\n+import sinon from 'sinon'\ndescribe('linesToTables', () => {\nconst chunkCombiner = createTextDecoderCombiner()\n@@ -39,4 +40,40 @@ describe('linesToTables', () => {\ninput.error(new Error())\nexpect(target.failed).equals(1)\n})\n+ it('can pause and resume processing', () => {\n+ const rows: string[][] = []\n+ let chunksResume = sinon.spy((): void => {})\n+ const target = {\n+ next(row: string[]): boolean {\n+ rows.push(row)\n+ const paused = row[0].includes('pause')\n+ return !paused\n+ },\n+ error: sinon.fake(),\n+ complete: sinon.fake(),\n+ useResume: sinon.spy((_x: () => void): void => {}),\n+ }\n+ const subject = linesToTables(target)\n+ let nextVal = subject.next('a,b\\n')\n+ expect(nextVal).equals(true)\n+ nextVal = subject.next('pause,1')\n+ expect(nextVal).equals(false)\n+ subject?.useResume?.((chunksResume = sinon.spy((): void => {})))\n+ expect(nextVal).equals(false)\n+ expect(rows).deep.equals([['pause', '1']])\n+ expect(target.error.callCount).equals(0)\n+ expect(target.complete.callCount).equals(0)\n+ expect(target.useResume.callCount).equals(1)\n+ expect(chunksResume.callCount).equals(0)\n+ target.useResume.args[0][0]() // resume\n+ expect(chunksResume.callCount).equals(1)\n+ subject.next('ok,2')\n+ subject.complete()\n+ expect(rows).deep.equals([\n+ ['pause', '1'],\n+ ['ok', '2'],\n+ ])\n+ expect(target.error.callCount).equals(0)\n+ expect(target.complete.callCount).equals(1)\n+ })\n})\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add pause/resume to table transformation |
305,159 | 28.09.2022 06:56:15 | -7,200 | 7d56b7133357e8435bedb68842f66eb57927c9f3 | chore(core): improve test | [
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"new_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"diff": "@@ -573,7 +573,6 @@ describe('NodeHttpTransport', () => {\nwhile (res.write('.'));\n}\nwriteUntilFull()\n- res.once('drain', () => writeUntilFull())\nres.once('drain', () => res.end())\n})\nconst observer: CommunicationObserver<Uint8Array> = {\n@@ -603,6 +602,7 @@ describe('NodeHttpTransport', () => {\nawait waitForCondition(() => cancellable && resume)\nexpect(spy.next.callCount).equals(1)\ncancellable?.cancel()\n+ await waitForCondition(() => spy.complete.callCount == 1)\n})\nit(`is paused after the second chunk and then read fully`, async () => {\nlet resume: (() => void) | undefined\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(core): improve test |
305,159 | 30.09.2022 12:35:16 | -7,200 | f129a2ca3dccedefa0042863ccf1d712196b676a | fix(core): repair tsdoc links | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/CommunicationObserver.ts",
"new_path": "packages/core/src/results/CommunicationObserver.ts",
"diff": "@@ -22,7 +22,7 @@ export interface CommunicationObserver<T> {\n/**\n* Data chunk received, can be called multiple times.\n* @param data - data\n- * @returns when `false` value is returned and {@link CommunicationObserver#useResume} is defined,\n+ * @returns when `false` value is returned and {@link CommunicationObserver.useResume} is defined,\n* future calls to `next` are paused until resume is called.\n*/\nnext(data: T): void | boolean\n@@ -44,7 +44,7 @@ export interface CommunicationObserver<T> {\nuseCancellable?: (cancellable: Cancellable) => void\n/**\n* Setups a callback that resumes reading of next data, it is called whenever\n- * {@link CommunicationObserver#next} returns `false`.\n+ * {@link CommunicationObserver.next} returns `false`.\n*\n* @param resume - a function that will resume reading of next data when called\n*/\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/FluxResultObserver.ts",
"new_path": "packages/core/src/results/FluxResultObserver.ts",
"diff": "@@ -9,7 +9,7 @@ export interface FluxResultObserver<T> {\n* Inform about a next record in a table.\n* @param row - flux result\n* @param tableMeta - actual table metata for the row supplied\n- * @returns when `false` value is returned and {@link FluxResultObserver#useResume} is defined,\n+ * @returns when `false` value is returned and {@link FluxResultObserver.useResume} is defined,\n* future calls to `next` are paused until resume is called.\n*/\nnext(row: T, tableMeta: FluxTableMetaData): void | boolean\n@@ -28,7 +28,7 @@ export interface FluxResultObserver<T> {\n/**\n* Setups a callback that resumes reading of next data, it is called whenever\n- * {@link FluxResultObserver#next} returns `false`.\n+ * {@link FluxResultObserver.next} returns `false`.\n*\n* @param resume - a function that will resume reading of next data when called\n*/\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | fix(core): repair tsdoc links |
305,159 | 03.10.2022 06:26:44 | -7,200 | 05cdda7e51280b3caddf9381c0bdad640fb727be | chore(core): repair flickering test | [
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"new_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"diff": "@@ -615,6 +615,9 @@ describe('NodeHttpTransport', () => {\nwhile (res.write('.'));\n}\nwriteUntilFull()\n+ res.once('drain', () => {\n+ writeUntilFull()\n+ })\nres.once('drain', () => {\nres.write('.')\nres.end()\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(core): repair flickering test |
305,159 | 03.10.2022 09:22:34 | -7,200 | 43259bd37fda752a2b747cdfd1d775f07e4350bf | feat(core): add test utility to validate unhandled promise rejections | [
{
"change_type": "MODIFY",
"old_path": "packages/core/test/util.ts",
"new_path": "packages/core/test/util.ts",
"diff": "@@ -47,3 +47,21 @@ export const collectLogging = {\n}\n},\n}\n+\n+let rejections: Array<any> = []\n+function addRejection(e: any) {\n+ rejections.push(e)\n+}\n+\n+/**\n+ * A simple guerd used by tests to check no unhandled promise rejection occurs.\n+ */\n+export const unhandledRejections = {\n+ before(): void {\n+ rejections = []\n+ process.on('unhandledRejection', addRejection)\n+ },\n+ after(): void {\n+ process.off('unhandledRejection', addRejection)\n+ },\n+}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add test utility to validate unhandled promise rejections |
305,159 | 03.10.2022 09:28:05 | -7,200 | 67461c08312cd803faa236b954ffbc092ec1cf67 | feat(core): check unhandled rejections in write tests | [
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/WriteApi.test.ts",
"new_path": "packages/core/test/unit/WriteApi.test.ts",
"diff": "@@ -11,7 +11,7 @@ import {\nDEFAULT_WriteOptions,\nPointSettings,\n} from '../../src'\n-import {collectLogging, CollectedLogs} from '../util'\n+import {collectLogging, CollectedLogs, unhandledRejections} from '../util'\nimport {Log} from '../../src/util/logger'\nimport {waitForCondition} from './util/waitForCondition'\nimport zlib from 'zlib'\n@@ -61,10 +61,12 @@ function createWriteCounters(): WriteListeners {\ndescribe('WriteApi', () => {\nbeforeEach(() => {\nnock.disableNetConnect()\n+ unhandledRejections.before()\n})\nafterEach(() => {\nnock.cleanAll()\nnock.enableNetConnect()\n+ unhandledRejections.after()\n})\ndescribe('simple', () => {\nlet subject: WriteApi\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/RetryBuffer.test.ts",
"new_path": "packages/core/test/unit/impl/RetryBuffer.test.ts",
"diff": "import {expect} from 'chai'\nimport RetryBuffer from '../../../src/impl/RetryBuffer'\n-import {CollectedLogs, collectLogging} from '../../util'\n+import {CollectedLogs, collectLogging, unhandledRejections} from '../../util'\nimport {waitForCondition} from '../util/waitForCondition'\ndescribe('RetryBuffer', () => {\n@@ -8,9 +8,11 @@ describe('RetryBuffer', () => {\nbeforeEach(() => {\nlogs = collectLogging.decorate()\n// logs = collectLogging.replace()\n+ unhandledRejections.before()\n})\nafterEach(async () => {\ncollectLogging.after()\n+ unhandledRejections.after()\n})\nit('stores lines for future retry', async () => {\nconst input = [] as Array<[string[], number]>\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/util.ts",
"new_path": "packages/core/test/util.ts",
"diff": "+import {expect} from 'chai'\nimport {setLogger} from '../src/util/logger'\nlet previous: any\n@@ -54,7 +55,7 @@ function addRejection(e: any) {\n}\n/**\n- * A simple guerd used by tests to check no unhandled promise rejection occurs.\n+ * Used by unit tests to check that no unhandled promise rejection occurs.\n*/\nexport const unhandledRejections = {\nbefore(): void {\n@@ -63,5 +64,6 @@ export const unhandledRejections = {\n},\nafter(): void {\nprocess.off('unhandledRejection', addRejection)\n+ expect(rejections, 'Unhandled Promise rejections detected').deep.equals([])\n},\n}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): check unhandled rejections in write tests |
305,159 | 03.10.2022 09:34:28 | -7,200 | 14fdf59a06ffab84d1de8ca72f758eda7e655041 | fix(core): fix write retry to not throw unhandled promise rejection | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/RetryBuffer.ts",
"new_path": "packages/core/src/impl/RetryBuffer.ts",
"diff": "@@ -130,11 +130,11 @@ export default class RetryBuffer {\nthis._timeoutHandle = setTimeout(() => {\nconst toRetry = this.removeLines()\nif (toRetry) {\n- this.retryLines(\n- toRetry.lines,\n- toRetry.retryCount,\n- toRetry.expires\n- ).finally(() => {\n+ this.retryLines(toRetry.lines, toRetry.retryCount, toRetry.expires)\n+ .catch(() => {\n+ /* error is already logged */\n+ })\n+ .finally(() => {\n// schedule next retry execution\nif (this.first) {\nthis.scheduleRetry(this.first.retryTime - Date.now())\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | fix(core): fix write retry to not throw unhandled promise rejection |
305,159 | 30.09.2022 07:35:06 | -7,200 | a1b2282fd0410619f529ac92b5863e37af356036 | chore: simplify code with stream pipeline | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"new_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"diff": "@@ -17,6 +17,7 @@ import zlib from 'zlib'\nimport completeCommunicationObserver from '../completeCommunicationObserver'\nimport {CLIENT_LIB_VERSION} from '../version'\nimport {Log} from '../../util/logger'\n+import {pipeline} from 'stream'\nconst zlibOptions = {\nflush: zlib.constants.Z_SYNC_FLUSH,\n@@ -298,8 +299,11 @@ export class NodeHttpTransport implements Transport {\nlet responseData\nif (contentEncoding === 'gzip') {\nresponseData = zlib.createGunzip(zlibOptions)\n- responseData.on('error', listeners.error)\n- res.pipe(responseData)\n+ responseData = pipeline(\n+ res,\n+ responseData,\n+ (e) => e && listeners.error(e)\n+ )\n} else {\nresponseData = res\n}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore: simplify code with stream pipeline |
305,159 | 30.09.2022 11:59:00 | -7,200 | e0dcb7a5e93a177b29d8410f807976dc0601c447 | feat(core): add async iterator to node transport | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"new_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"diff": "@@ -17,7 +17,7 @@ import zlib from 'zlib'\nimport completeCommunicationObserver from '../completeCommunicationObserver'\nimport {CLIENT_LIB_VERSION} from '../version'\nimport {Log} from '../../util/logger'\n-import {pipeline} from 'stream'\n+import {pipeline, Readable} from 'stream'\nconst zlibOptions = {\nflush: zlib.constants.Z_SYNC_FLUSH,\n@@ -138,7 +138,10 @@ export class NodeHttpTransport implements Transport {\nconst cancellable = new CancellableImpl()\nif (callbacks && callbacks.useCancellable)\ncallbacks.useCancellable(cancellable)\n- this.createRequestMessage(path, body, options).then(\n+ this.createRequestMessage(\n+ path,\n+ body,\n+ options,\n(message: {[key: string]: any}) => {\nthis._request(message, cancellable, callbacks)\n},\n@@ -214,6 +217,50 @@ export class NodeHttpTransport implements Transport {\n})\n}\n+ async *iterate(\n+ path: string,\n+ body: string,\n+ options: SendOptions\n+ ): AsyncIterableIterator<Uint8Array> {\n+ let terminationError: Error | undefined = undefined\n+ let nestedReject: (e: Error) => void\n+ function wrapReject(error: Error) {\n+ terminationError = error\n+ nestedReject(error)\n+ }\n+ const requestMessage = await new Promise<Record<string, any>>(\n+ (resolve, reject) => {\n+ nestedReject = reject\n+ this.createRequestMessage(path, body, options, resolve, wrapReject)\n+ }\n+ )\n+ if (requestMessage.signal?.addEventListener) {\n+ ;(requestMessage.signal as AbortSignal).addEventListener('abort', () => {\n+ wrapReject(new AbortError())\n+ })\n+ }\n+ const response = await new Promise<http.IncomingMessage>(\n+ (resolve, reject) => {\n+ nestedReject = reject\n+ const req = this.requestApi(requestMessage, resolve)\n+ req.on('timeout', () => wrapReject(new RequestTimedOutError()))\n+ req.on('error', wrapReject)\n+\n+ req.write(requestMessage.body)\n+ req.end()\n+ }\n+ )\n+ const res = await new Promise<Readable>((resolve, reject) => {\n+ nestedReject = reject\n+ this._prepareResponse(response, resolve, wrapReject)\n+ })\n+ for await (const chunk of res) {\n+ if (terminationError) {\n+ throw terminationError\n+ }\n+ yield chunk\n+ }\n+ }\n/**\n* Creates configuration for a specific request.\n*\n@@ -226,8 +273,10 @@ export class NodeHttpTransport implements Transport {\nprivate createRequestMessage(\npath: string,\nbody: string,\n- sendOptions: SendOptions\n- ): Promise<{[key: string]: any}> {\n+ sendOptions: SendOptions,\n+ resolve: (req: http.RequestOptions) => void,\n+ reject: (err: Error) => void\n+ ): void {\nconst bodyBuffer = Buffer.from(body, 'utf-8')\nconst headers: {[key: string]: any} = {\n'content-type': 'application/json; charset=utf-8',\n@@ -236,7 +285,6 @@ export class NodeHttpTransport implements Transport {\nif (this.token) {\nheaders.authorization = 'Token ' + this.token\n}\n- let bodyPromise = Promise.resolve(bodyBuffer)\nconst options: {[key: string]: any} = {\n...this.defaultOptions,\npath: this.contextPath + path,\n@@ -250,60 +298,38 @@ export class NodeHttpTransport implements Transport {\nsendOptions.gzipThreshold !== undefined &&\nsendOptions.gzipThreshold < bodyBuffer.length\n) {\n- bodyPromise = bodyPromise.then((body) => {\n- return new Promise((resolve, reject) => {\n- zlib.gzip(body, (err, res) => {\n+ zlib.gzip(bodyBuffer, (err, res) => {\n/* istanbul ignore next - hard to simulate failure, manually reviewed */\nif (err) {\nreturn reject(err)\n}\noptions.headers['content-encoding'] = 'gzip'\n- return resolve(res)\n- })\n+ options.body = res\n+ resolve(options)\n})\n- })\n- }\n-\n- return bodyPromise.then((bodyBuffer) => {\n+ } else {\noptions.body = bodyBuffer\n- options.headers['content-length'] = bodyBuffer.length\n- return options\n- })\n+ options.headers['content-length'] = options.body.length\n+ resolve(options)\n}\n-\n- private _request(\n- requestMessage: {[key: string]: any},\n- cancellable: CancellableImpl,\n- callbacks?: Partial<CommunicationObserver<any>>\n- ): void {\n- const listeners = completeCommunicationObserver(callbacks)\n- if (cancellable.isCancelled()) {\n- listeners.complete()\n- return\n- }\n- const req = this.requestApi(requestMessage, (res: http.IncomingMessage) => {\n- /* istanbul ignore next - hard to simulate failure, manually reviewed */\n- if (cancellable.isCancelled()) {\n- res.resume()\n- listeners.complete()\n- return\n}\n+\n+ private _prepareResponse(\n+ res: http.IncomingMessage,\n+ resolve: (res: Readable) => void,\n+ reject: (err: Error) => void\n+ ) {\nres.on('aborted', () => {\n- listeners.error(new AbortError())\n+ reject(new AbortError())\n})\n- res.on('error', listeners.error)\n- listeners.responseStarted(res.headers, res.statusCode)\n+ res.on('error', reject)\n/* istanbul ignore next statusCode is optional in http.IncomingMessage */\nconst statusCode = res.statusCode ?? 600\nconst contentEncoding = res.headers['content-encoding']\nlet responseData\nif (contentEncoding === 'gzip') {\nresponseData = zlib.createGunzip(zlibOptions)\n- responseData = pipeline(\n- res,\n- responseData,\n- (e) => e && listeners.error(e)\n- )\n+ responseData = pipeline(res, responseData, (e) => e && reject(e))\n} else {\nresponseData = res\n}\n@@ -323,7 +349,7 @@ export class NodeHttpTransport implements Transport {\nif (body === '' && !!res.headers['x-influxdb-error']) {\nbody = res.headers['x-influxdb-error'].toString()\n}\n- listeners.error(\n+ reject(\nnew HttpError(\nstatusCode,\nres.statusMessage,\n@@ -334,6 +360,36 @@ export class NodeHttpTransport implements Transport {\n)\n})\n} else {\n+ resolve(responseData)\n+ }\n+ }\n+\n+ private _request(\n+ requestMessage: {[key: string]: any},\n+ cancellable: CancellableImpl,\n+ callbacks?: Partial<CommunicationObserver<any>>\n+ ): void {\n+ const listeners = completeCommunicationObserver(callbacks)\n+ if (cancellable.isCancelled()) {\n+ listeners.complete()\n+ return\n+ }\n+ if (requestMessage.signal?.addEventListener) {\n+ ;(requestMessage.signal as AbortSignal).addEventListener('abort', () => {\n+ listeners.error(new AbortError())\n+ })\n+ }\n+ const req = this.requestApi(requestMessage, (res: http.IncomingMessage) => {\n+ /* istanbul ignore next - hard to simulate failure, manually reviewed */\n+ if (cancellable.isCancelled()) {\n+ res.resume()\n+ listeners.complete()\n+ return\n+ }\n+ listeners.responseStarted(res.headers, res.statusCode)\n+ this._prepareResponse(\n+ res,\n+ (responseData) => {\nresponseData.on('data', (data) => {\nif (cancellable.isCancelled()) {\nres.resume()\n@@ -358,7 +414,9 @@ export class NodeHttpTransport implements Transport {\n}\n})\nresponseData.on('end', listeners.complete)\n- }\n+ },\n+ listeners.error\n+ )\n})\n// Support older Nodes which don't allow `timeout` in the\n// request options\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"new_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"diff": "@@ -13,6 +13,7 @@ import {CLIENT_LIB_VERSION} from '../../../../src/impl/version'\nimport {CollectedLogs, collectLogging} from '../../../util'\nimport {waitForCondition} from '../../util/waitForCondition'\nimport {AddressInfo} from 'net'\n+import {AbortController} from '../browser/emulateBrowser'\nfunction sendTestData(\nconnectionOptions: ConnectionOptions,\n@@ -40,6 +41,20 @@ function sendTestData(\n})\n})\n}\n+async function iterateTestData(\n+ connectionOptions: ConnectionOptions,\n+ sendOptions: SendOptions\n+): Promise<string> {\n+ let data = ''\n+ for await (const chunk of new NodeHttpTransport(connectionOptions).iterate(\n+ '/test',\n+ '',\n+ sendOptions\n+ )) {\n+ data += chunk.toString()\n+ }\n+ return data\n+}\nconst TEST_URL = 'http://test:8086'\ndescribe('NodeHttpTransport', () => {\n@@ -413,6 +428,35 @@ describe('NodeHttpTransport', () => {\nexpect(e).property('message').to.include('aborted')\n})\n})\n+ it(`is aborted by a signal before response arrives`, async () => {\n+ let remainingChunks = 2\n+ const ac = new AbortController()\n+ nock(transportOptions.url)\n+ .get('/test')\n+ .reply((_uri, _requestBody) => [\n+ 200,\n+ new Readable({\n+ read(): any {\n+ remainingChunks--\n+ if (!remainingChunks) {\n+ ac.abort()\n+ }\n+ this.push(remainingChunks < 0 ? null : '.')\n+ },\n+ }),\n+ ])\n+ .persist()\n+ await sendTestData(\n+ {...transportOptions, transportOptions: {signal: ac.signal}},\n+ {method: 'GET'}\n+ )\n+ .then((_data) => {\n+ expect.fail('not expected!')\n+ })\n+ .catch((e: any) => {\n+ expect(e).property('message').to.include('aborted')\n+ })\n+ })\nit(`signalizes error upon request's error'`, async () => {\nlet remainingChunks = 2\nlet req: any\n@@ -655,6 +699,301 @@ describe('NodeHttpTransport', () => {\nexpect(spy.next.callCount).is.greaterThan(2)\n})\n})\n+ describe('iterate', () => {\n+ beforeEach(() => {\n+ nock.disableNetConnect()\n+ })\n+ afterEach(() => {\n+ nock.cleanAll()\n+ nock.enableNetConnect()\n+ })\n+ describe('positive', () => {\n+ const transportOptions = {\n+ url: TEST_URL,\n+ timeout: 100,\n+ }\n+ const extraOptions = [\n+ {},\n+ {\n+ token: 'a',\n+ },\n+ {\n+ headers: {\n+ 'accept-encoding': 'gzip',\n+ },\n+ },\n+ {contextPath: '/context'},\n+ ]\n+ for (let i = 0; i < extraOptions.length; i++) {\n+ const extras = extraOptions[i]\n+ const responseData = 'yes'\n+ it(`works with options ${JSON.stringify(extras)}`, async () => {\n+ let responseRead = false\n+ const context = nock(transportOptions.url)\n+ .post((extras.contextPath ?? '') + '/test')\n+ .reply((_uri, _requestBody) => [\n+ 200,\n+ new Readable({\n+ read(): any {\n+ const encode = !!(extras.headers ?? {})['accept-encoding']\n+ if (encode) {\n+ this.push(responseRead ? null : zlib.gzipSync(responseData))\n+ } else {\n+ this.push(responseRead ? null : responseData)\n+ }\n+ responseRead = true\n+ },\n+ }),\n+ {\n+ 'content-encoding': (\n+ _req: any,\n+ _res: any,\n+ _body: any\n+ ): string =>\n+ (extras.headers ?? {})['accept-encoding'] ?? 'identity',\n+ },\n+ ])\n+ .persist()\n+ if (extras.token) {\n+ context.matchHeader('authorization', 'Token ' + extras.token)\n+ }\n+ context.matchHeader(\n+ 'User-Agent',\n+ `influxdb-client-js/${CLIENT_LIB_VERSION}`\n+ )\n+ const transport = new NodeHttpTransport({\n+ ...extras,\n+ ...transportOptions,\n+ url: transportOptions.url + (extras.contextPath ?? ''),\n+ })\n+ try {\n+ let result = ''\n+ let resultAppended = 0\n+ const iterable = transport.iterate('/test', '', {\n+ ...extras,\n+ method: 'POST',\n+ })\n+ for await (const data of iterable) {\n+ result += data.toString()\n+ resultAppended++\n+ }\n+ expect(resultAppended).equals(1)\n+ expect(result).to.equal(responseData)\n+ } catch (e) {\n+ expect.fail(e?.toString())\n+ }\n+ })\n+ }\n+ })\n+ describe('negative', () => {\n+ const transportOptions = {\n+ url: TEST_URL,\n+ timeout: 100,\n+ }\n+ it(`fails on server error`, async () => {\n+ nock(transportOptions.url).get('/test').reply(500, 'not ok')\n+ await iterateTestData(transportOptions, {method: 'GET'})\n+ .then(() => {\n+ expect.fail('must not succeed')\n+ })\n+ .catch((e) => {\n+ expect(e).property('statusCode').to.equal(500)\n+ })\n+ })\n+ it(`fails on decoding error`, async () => {\n+ let responseRead = false\n+ nock(transportOptions.url)\n+ .get('/test')\n+ .reply((_uri, _requestBody) => [\n+ 200,\n+ new Readable({\n+ read(): any {\n+ this.push(responseRead ? null : 'no')\n+ responseRead = true\n+ },\n+ }),\n+ {\n+ 'content-encoding': 'gzip',\n+ },\n+ ])\n+ .persist()\n+ await iterateTestData(transportOptions, {method: 'GET'})\n+ .then(() => {\n+ expect.fail('must not succeed')\n+ })\n+ .catch((e) => {\n+ expect(e).property('message').is.not.equal('must not succeed')\n+ expect(e.toString()).does.not.include('time') // not timeout\n+ })\n+ })\n+ it(`fails on connection timeout`, async () => {\n+ nock(transportOptions.url)\n+ .get('/test')\n+ .delayConnection(2000)\n+ .reply(200, 'ok')\n+ await iterateTestData(\n+ {...transportOptions, timeout: 100},\n+ {method: 'GET'}\n+ )\n+ .then(() => {\n+ throw new Error('must not succeed')\n+ })\n+ .catch((e) => {\n+ expect(e.toString()).to.include('timed')\n+ })\n+ })\n+ it(`fails on response timeout`, async () => {\n+ nock(transportOptions.url).get('/test').delay(2000).reply(200, 'ok')\n+ await iterateTestData(\n+ {...transportOptions, timeout: 100},\n+ {method: 'GET'}\n+ )\n+ .then(() => {\n+ throw new Error('must not succeed')\n+ })\n+ .catch((e) => {\n+ expect(e.toString()).to.include('timed')\n+ })\n+ })\n+ it(`truncates error messages`, async () => {\n+ let bigMessage = 'this is a big error message'\n+ while (bigMessage.length < 1001) bigMessage += bigMessage\n+ nock(transportOptions.url).get('/test').reply(500, bigMessage)\n+ await iterateTestData(transportOptions, {method: 'GET'})\n+ .then(() => {\n+ throw new Error('must not succeed')\n+ })\n+ .catch((e: any) => {\n+ expect(e).property('body').to.length(1000)\n+ })\n+ })\n+ it(`parses error responses`, async () => {\n+ let bigMessage = ',\"this is a big error message\"'\n+ while (bigMessage.length < 1001) bigMessage += bigMessage\n+ bigMessage = `{\"code\":\"mc\",\"message\":\"mymsg\",\"details\":[\"\"${bigMessage}]}`\n+ nock(transportOptions.url)\n+ .get('/test')\n+ .reply(400, bigMessage, {'content-type': 'application/json'})\n+ await iterateTestData(transportOptions, {method: 'GET'}).then(\n+ () => {\n+ throw new Error('must not succeed')\n+ },\n+ (e: any) => {\n+ expect(e).property('body').to.length(bigMessage.length)\n+ expect(e).property('json').deep.equals(JSON.parse(bigMessage))\n+ expect(e).property('code').equals('mc')\n+ expect(e).property('message').equals('mymsg')\n+ }\n+ )\n+ })\n+ it(`uses X-Influxdb-Error header when no body is returned`, async () => {\n+ const errorMessage = 'this is a header error message'\n+ nock(transportOptions.url)\n+ .get('/test')\n+ .reply(500, '', {'X-Influxdb-Error': errorMessage})\n+ await iterateTestData(transportOptions, {method: 'GET'})\n+ .then(() => {\n+ throw new Error('must not succeed')\n+ })\n+ .catch((e: any) => {\n+ expect(e).property('body').equals(errorMessage)\n+ })\n+ })\n+ it(`is aborted before the whole response arrives`, async () => {\n+ let remainingChunks = 2\n+ let res: any\n+ nock(transportOptions.url)\n+ .get('/test')\n+ .reply((_uri, _requestBody) => [\n+ 200,\n+ new Readable({\n+ read(): any {\n+ remainingChunks--\n+ if (!remainingChunks) {\n+ res.emit('aborted')\n+ }\n+ this.push(remainingChunks < 0 ? null : '.')\n+ },\n+ }),\n+ {\n+ 'X-Whatever': (_req: any, _res: any, _body: any): string => {\n+ res = _res\n+ return '1'\n+ },\n+ },\n+ ])\n+ .persist()\n+ await iterateTestData(transportOptions, {method: 'GET'})\n+ .then((_data) => {\n+ expect.fail('not expected!')\n+ })\n+ .catch((e: any) => {\n+ expect(e).property('message').to.include('aborted')\n+ })\n+ })\n+ it(`is aborted with a signal before the whole response arrives`, async () => {\n+ let remainingChunks = 2\n+ const ac = new AbortController()\n+ nock(transportOptions.url)\n+ .get('/test')\n+ .reply((_uri, _requestBody) => [\n+ 200,\n+ new Readable({\n+ read(): any {\n+ remainingChunks--\n+ if (!remainingChunks) {\n+ ac.abort()\n+ }\n+ this.push(remainingChunks < 0 ? null : '.')\n+ },\n+ }),\n+ ])\n+ .persist()\n+ await iterateTestData(\n+ {...transportOptions, transportOptions: {signal: ac.signal}},\n+ {method: 'GET'}\n+ )\n+ .then((_data) => {\n+ expect.fail('not expected!')\n+ })\n+ .catch((e: any) => {\n+ expect(e).property('message').to.include('aborted')\n+ })\n+ })\n+ it(`signalizes error upon request's error'`, async () => {\n+ let remainingChunks = 2\n+ let req: any\n+ nock(transportOptions.url)\n+ .get('/test')\n+ .reply((_uri, _requestBody) => [\n+ 200,\n+ new Readable({\n+ read(): any {\n+ remainingChunks--\n+ if (!remainingChunks) {\n+ req.emit('error', new Error('request failed'))\n+ }\n+ this.push(remainingChunks < 0 ? null : '.')\n+ },\n+ }),\n+ {\n+ 'X-Whatever': (_req: any, _res: any, _body: any): string => {\n+ req = _req\n+ return '1'\n+ },\n+ },\n+ ])\n+ .persist()\n+ await iterateTestData(transportOptions, {method: 'GET'})\n+ .then((_data) => {\n+ expect.fail('not expected!')\n+ })\n+ .catch((e: any) => {\n+ expect(e).property('message').to.include('request failed')\n+ })\n+ })\n+ })\n+ })\ndescribe('request', () => {\nbeforeEach(() => {\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add async iterator to node transport |
305,159 | 30.09.2022 12:28:29 | -7,200 | 1b20b7c8e566435b81e054de3fa107bb9dba92ce | feat(core): allow to use AbortSignal in both node and fetch transport | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/browser/FetchTransport.ts",
"new_path": "packages/core/src/impl/browser/FetchTransport.ts",
"diff": "@@ -72,7 +72,7 @@ export default class FetchTransport implements Transport {\nconst controller = new AbortController()\nif (!signal) {\nsignal = controller.signal\n- options = {...(options as object), ...signal} as SendOptions\n+ options = {...options, signal}\n}\n// resume data reading so that it can exit on abort signal\nsignal.addEventListener('abort', () => {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"new_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"diff": "@@ -294,6 +294,9 @@ export class NodeHttpTransport implements Transport {\n...sendOptions.headers,\n},\n}\n+ if (sendOptions.signal) {\n+ options.signal = sendOptions.signal\n+ }\nif (\nsendOptions.gzipThreshold !== undefined &&\nsendOptions.gzipThreshold < bodyBuffer.length\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/transport.ts",
"new_path": "packages/core/src/transport.ts",
"diff": "@@ -10,6 +10,8 @@ export interface SendOptions {\nheaders?: {[key: string]: string}\n/** When specified, message body larger than the treshold is gzipped */\ngzipThreshold?: number\n+ /** Abort signal */\n+ signal?: AbortSignal\n}\n/**\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"new_path": "packages/core/test/unit/impl/node/NodeHttpTransport.test.ts",
"diff": "@@ -13,7 +13,6 @@ import {CLIENT_LIB_VERSION} from '../../../../src/impl/version'\nimport {CollectedLogs, collectLogging} from '../../../util'\nimport {waitForCondition} from '../../util/waitForCondition'\nimport {AddressInfo} from 'net'\n-import {AbortController} from '../browser/emulateBrowser'\nfunction sendTestData(\nconnectionOptions: ConnectionOptions,\n@@ -446,10 +445,7 @@ describe('NodeHttpTransport', () => {\n}),\n])\n.persist()\n- await sendTestData(\n- {...transportOptions, transportOptions: {signal: ac.signal}},\n- {method: 'GET'}\n- )\n+ await sendTestData(transportOptions, {method: 'GET', signal: ac.signal})\n.then((_data) => {\nexpect.fail('not expected!')\n})\n@@ -931,7 +927,7 @@ describe('NodeHttpTransport', () => {\nexpect(e).property('message').to.include('aborted')\n})\n})\n- it(`is aborted with a signal before the whole response arrives`, async () => {\n+ it(`is aborted by a signal before the whole response arrives`, async () => {\nlet remainingChunks = 2\nconst ac = new AbortController()\nnock(transportOptions.url)\n@@ -949,10 +945,10 @@ describe('NodeHttpTransport', () => {\n}),\n])\n.persist()\n- await iterateTestData(\n- {...transportOptions, transportOptions: {signal: ac.signal}},\n- {method: 'GET'}\n- )\n+ await iterateTestData(transportOptions, {\n+ method: 'GET',\n+ signal: ac.signal,\n+ })\n.then((_data) => {\nexpect.fail('not expected!')\n})\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): allow to use AbortSignal in both node and fetch transport |
305,159 | 30.09.2022 12:29:11 | -7,200 | 73a9b089ce03194bdfa6ceaf4348d8bddd38da99 | feat(core): upgrade target to es2018 to support async iterables | [
{
"change_type": "MODIFY",
"old_path": "packages/core/tsup.config.ts",
"new_path": "packages/core/tsup.config.ts",
"diff": "@@ -14,7 +14,7 @@ export default defineConfig({\ndts: true,\nformat: ['cjs', 'esm'],\nminify,\n- target: ['es2015'],\n+ target: ['es2018'],\nplatform: 'node',\nsplitting: false,\nesbuildOptions(options, {format}) {\n"
},
{
"change_type": "MODIFY",
"old_path": "tsconfig.base.json",
"new_path": "tsconfig.base.json",
"diff": "{\n\"compilerOptions\": {\n- \"target\": \"es2015\",\n- \"lib\": [\"es2015\", \"es2017\"],\n+ \"target\": \"es2018\",\n+ \"lib\": [\"es2018\"],\n\"allowJs\": false,\n\"declaration\": true,\n\"declarationMap\": true,\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): upgrade target to es2018 to support async iterables |
305,159 | 30.09.2022 13:33:48 | -7,200 | 5f505c2296d6f40ee17eb0b336bc9b530f3e658d | feat: change typescript target/lib to es2018 | [
{
"change_type": "MODIFY",
"old_path": "packages/apis/tsconfig.json",
"new_path": "packages/apis/tsconfig.json",
"diff": "\"extends\": \"../../tsconfig.base.json\",\n\"compilerOptions\": {\n\"resolveJsonModule\": true,\n- \"lib\": [\"es2015\"]\n+ \"lib\": [\"es2018\"]\n},\n\"include\": [\"src/**/*.ts\", \"generator/*.ts\"],\n\"exclude\": [\"**/*.js\"]\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/apis/tsup.config.browser.ts",
"new_path": "packages/apis/tsup.config.browser.ts",
"diff": "@@ -16,7 +16,7 @@ export default defineConfig({\nglobalName: 'influxdbApis',\ndts: false,\nminify,\n- target: ['es2015'],\n+ target: ['es2018'],\nplatform: 'browser',\nsplitting: false,\nesbuildOptions(options, {format}) {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/apis/tsup.config.ts",
"new_path": "packages/apis/tsup.config.ts",
"diff": "@@ -14,7 +14,7 @@ export default defineConfig({\ndts: true,\nformat: ['cjs', 'esm'],\nminify,\n- target: ['es2015'],\n+ target: ['es2018'],\nplatform: 'node',\nsplitting: false,\nesbuildOptions(options, {format}) {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/browser/tsconfig.json",
"new_path": "packages/core/src/impl/browser/tsconfig.json",
"diff": "{\n\"extends\": \"../../../tsconfig.json\",\n\"compilerOptions\": {\n- \"lib\": [\"DOM\", \"es2015\", \"es2017\"]\n+ \"lib\": [\"DOM\", \"es2018\"]\n},\n\"include\": [\"*.ts\"]\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/tsup.config.browser.ts",
"new_path": "packages/core/tsup.config.browser.ts",
"diff": "@@ -18,7 +18,7 @@ export default defineConfig({\nglobalName: 'influxdb',\ndts: false,\nminify,\n- target: ['es2015'],\n+ target: ['es2018'],\nplatform: 'browser',\nsplitting: false,\ndefine: {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/giraffe/tsconfig.json",
"new_path": "packages/giraffe/tsconfig.json",
"diff": "\"extends\": \"../../tsconfig.base.json\",\n\"compilerOptions\": {\n\"resolveJsonModule\": true,\n- \"lib\": [\"es2015\"]\n+ \"lib\": [\"es2018\"]\n},\n\"include\": [\"src/**/*.ts\", \"test/**/*.ts\"],\n\"exclude\": [\"*.js\"]\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/giraffe/tsup.config.ts",
"new_path": "packages/giraffe/tsup.config.ts",
"diff": "@@ -16,7 +16,7 @@ export default defineConfig({\nglobalName: 'g',\ndts: true,\nminify,\n- target: ['es2015'],\n+ target: ['es2018'],\nplatform: 'browser',\nsplitting: false,\nesbuildOptions(options, {format}) {\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat: change typescript target/lib to es2018 |
305,159 | 30.09.2022 15:25:44 | -7,200 | 8d085ab191479972c117aa4d3b58a67c6a430759 | feat(core): add async iterator to browser transport | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/browser/FetchTransport.ts",
"new_path": "packages/core/src/impl/browser/FetchTransport.ts",
"diff": "import {Transport, SendOptions} from '../../transport'\nimport {ConnectionOptions} from '../../options'\n-import {HttpError} from '../../errors'\n+import {AbortError, HttpError} from '../../errors'\nimport completeCommunicationObserver from '../completeCommunicationObserver'\nimport {Log} from '../../util/logger'\nimport {\n@@ -96,39 +96,7 @@ export default class FetchTransport implements Transport {\nresponse.status\n)\n}\n- if (response.status >= 300) {\n- return response\n- .text()\n- .then((text: string) => {\n- if (!text) {\n- const headerError = response.headers.get('x-influxdb-error')\n- if (headerError) {\n- text = headerError\n- }\n- }\n- observer.error(\n- new HttpError(\n- response.status,\n- response.statusText,\n- text,\n- response.headers.get('retry-after'),\n- response.headers.get('content-type')\n- )\n- )\n- })\n- .catch((e: Error) => {\n- Log.warn('Unable to receive error body', e)\n- observer.error(\n- new HttpError(\n- response.status,\n- response.statusText,\n- undefined,\n- response.headers.get('retry-after'),\n- response.headers.get('content-type')\n- )\n- )\n- })\n- } else {\n+ await this.throwOnErrorResponse(response)\nif (response.body) {\nconst reader = response.body.getReader()\nlet chunk: ReadableStreamReadResult<Uint8Array>\n@@ -164,7 +132,6 @@ export default class FetchTransport implements Transport {\nconst text = await response.text()\nobserver.next(new TextEncoder().encode(text))\n}\n- }\n})\n.catch((e) => {\nif (!cancelled) {\n@@ -173,6 +140,65 @@ export default class FetchTransport implements Transport {\n})\n.finally(() => observer.complete())\n}\n+ private async throwOnErrorResponse(response: Response): Promise<void> {\n+ if (response.status >= 300) {\n+ let text = ''\n+ try {\n+ text = await response.text()\n+ if (!text) {\n+ const headerError = response.headers.get('x-influxdb-error')\n+ if (headerError) {\n+ text = headerError\n+ }\n+ }\n+ } catch (e) {\n+ Log.warn('Unable to receive error body', e)\n+ throw new HttpError(\n+ response.status,\n+ response.statusText,\n+ undefined,\n+ response.headers.get('retry-after'),\n+ response.headers.get('content-type')\n+ )\n+ }\n+ throw new HttpError(\n+ response.status,\n+ response.statusText,\n+ text,\n+ response.headers.get('retry-after'),\n+ response.headers.get('content-type')\n+ )\n+ }\n+ }\n+\n+ async *iterate(\n+ path: string,\n+ body: string,\n+ options: SendOptions\n+ ): AsyncIterableIterator<Uint8Array> {\n+ const response = await this.fetch(path, body, options)\n+ await this.throwOnErrorResponse(response)\n+ if (response.body) {\n+ const reader = response.body.getReader()\n+ for (;;) {\n+ const {value, done} = await reader.read()\n+ if (done) {\n+ break\n+ }\n+ if (options.signal?.aborted) {\n+ throw new AbortError()\n+ }\n+ yield value\n+ }\n+ } else if (response.arrayBuffer) {\n+ const buffer = await response.arrayBuffer()\n+ yield new Uint8Array(buffer)\n+ } else {\n+ const text = await response.text()\n+ yield new TextEncoder().encode(text)\n+ }\n+ }\n+\nasync request(\npath: string,\nbody: any,\n@@ -180,28 +206,13 @@ export default class FetchTransport implements Transport {\nresponseStarted?: ResponseStartedFn\n): Promise<any> {\nconst response = await this.fetch(path, body, options)\n- const {status, headers} = response\n+ const {headers} = response\nconst responseContentType = headers.get('content-type') || ''\nif (responseStarted) {\nresponseStarted(getResponseHeaders(response), response.status)\n}\n- if (status >= 300) {\n- let data = await response.text()\n- if (!data) {\n- const headerError = headers.get('x-influxdb-error')\n- if (headerError) {\n- data = headerError\n- }\n- }\n- throw new HttpError(\n- status,\n- response.statusText,\n- data,\n- response.headers.get('retry-after'),\n- response.headers.get('content-type')\n- )\n- }\n+ await this.throwOnErrorResponse(response)\nconst responseType = options.headers?.accept ?? responseContentType\nif (responseType.includes('json')) {\nreturn await response.json()\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"new_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"diff": "@@ -627,6 +627,98 @@ describe('FetchTransport', () => {\n).equals(responseBody)\n})\n})\n+ describe('iterate', () => {\n+ const transport = new FetchTransport({url: 'http://test:8086'})\n+ ;[\n+ {\n+ body: 'a',\n+ status: 201,\n+ url: 'string body',\n+ },\n+ {\n+ body: 'a',\n+ url: 'error',\n+ },\n+ {\n+ body: [Buffer.from('a'), Buffer.from('b')],\n+ url: 'use response reader',\n+ },\n+ {\n+ body: Buffer.from('a'),\n+ url: 'use array buffer',\n+ },\n+ {\n+ body: 'error',\n+ status: 501,\n+ url: '501 error',\n+ },\n+ {\n+ body: '',\n+ status: 500,\n+ headers: {'x-influxdb-error': 'header error'},\n+ errorBody: 'header error',\n+ url: 'x-influxdb-error header',\n+ },\n+ {\n+ body: '',\n+ status: 500,\n+ errorBody: '',\n+ url: 'empty err body',\n+ },\n+ {\n+ body: 'this is error message',\n+ status: 500,\n+ errorBody: 'this is error message',\n+ url: 'check error body message',\n+ },\n+ {\n+ body: [Buffer.from('signal breaks reading response')],\n+ status: 200,\n+ signal: new AbortController(true).getSignal(),\n+ url: 'breaked by a signal',\n+ },\n+ ].forEach(\n+ ({body, url, status = 200, headers = {}, errorBody, signal}, i) => {\n+ it(`iterates chunks ${i} (${url})`, async () => {\n+ emulateFetchApi({\n+ headers: {\n+ 'content-type': 'text/plain',\n+ duplicate: 'ok',\n+ ...headers,\n+ },\n+ status,\n+ body,\n+ })\n+ let error: any = undefined\n+ const vals: Uint8Array[] = []\n+ try {\n+ for await (const chunk of transport.iterate(url, '', {\n+ method: 'POST',\n+ signal,\n+ })) {\n+ vals.push(chunk)\n+ }\n+ } catch (e) {\n+ error = e\n+ }\n+ const isError = url === 'error' || status >= 300 || signal?.aborted\n+ if (isError) {\n+ expect(error).is.not.undefined\n+ expect(vals).is.empty\n+ if (errorBody) {\n+ expect(error).property('body').equals(errorBody)\n+ }\n+ } else {\n+ expect(\n+ Array.isArray(body)\n+ ? body\n+ : [Buffer.isBuffer(body) ? body : Buffer.from(body)]\n+ ).is.deep.equal(vals)\n+ }\n+ })\n+ }\n+ )\n+ })\ndescribe('chunkCombiner', () => {\nconst options = {url: 'http://test:8086'}\nconst chunkCombiner = new FetchTransport(options).chunkCombiner\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/emulateBrowser.ts",
"new_path": "packages/core/test/unit/impl/browser/emulateBrowser.ts",
"diff": "@@ -97,6 +97,10 @@ export class AbortController {\nthis.signal.aborted = true\nthis.listeners.forEach((x) => x())\n}\n+\n+ getSignal(): AbortSignal {\n+ return this.signal as unknown as AbortSignal\n+ }\n}\nexport function emulateFetchApi(\n@@ -105,7 +109,7 @@ export function emulateFetchApi(\n): void {\nfunction fetch(url: string, options: any): Promise<any> {\nif (onRequest) onRequest(options)\n- return url.indexOf('error') !== -1\n+ return url.endsWith('error')\n? Promise.reject(new Error(url))\n: Promise.resolve(createResponse(spec))\n}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add async iterator to browser transport |
305,159 | 30.09.2022 15:34:34 | -7,200 | c6442f44dc35ea45ff7c75ad809fd9f53962e3fe | chore: remove misleading comments | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"new_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"diff": "@@ -265,8 +265,6 @@ export class NodeHttpTransport implements Transport {\n* Creates configuration for a specific request.\n*\n* @param path - API path starting with '/' and containing also query parameters\n- * @param headers - HTTP headers to use\n- * @param method - HTTP method\n* @param body - request body, will be utf-8 encoded\n* @returns a configuration object that is suitable for making the request\n*/\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore: remove misleading comments |
305,159 | 30.09.2022 15:39:49 | -7,200 | 55e0b832232ed155ffb1786611b51ae4db0a6a13 | feat(core): add iterate function to transport | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/transport.ts",
"new_path": "packages/core/src/transport.ts",
"diff": "@@ -40,14 +40,30 @@ export interface Transport {\n* @param path - HTTP request path\n* @param requestBody - request body\n* @param options - send options\n+ * @returns response data\n*/\nrequest(\npath: string,\n- body: any,\n+ requestBody: any,\noptions: SendOptions,\nresponseStarted?: ResponseStartedFn\n): Promise<any>\n+ /**\n+ * Sends requestBody and returns response chunks in an async iterable\n+ * that can be easily consumed in an `for-await` loop.\n+ *\n+ * @param path - HTTP request path\n+ * @param requestBody - request body\n+ * @param options - send options\n+ * @returns async iterable\n+ */\n+ iterate(\n+ path: string,\n+ requestBody: any,\n+ options: SendOptions\n+ ): AsyncIterableIterator<Uint8Array>\n+\n/**\n* Combines response chunks to create a single response object.\n*/\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add iterate function to transport |
305,159 | 30.09.2022 16:16:58 | -7,200 | 5db6f3f90c27e355448ba2d8c051a504bc01d02c | feat(core): cancel body consumption when aborted | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/browser/FetchTransport.ts",
"new_path": "packages/core/src/impl/browser/FetchTransport.ts",
"diff": "@@ -186,6 +186,7 @@ export default class FetchTransport implements Transport {\nbreak\n}\nif (options.signal?.aborted) {\n+ await response.body.cancel()\nthrow new AbortError()\n}\nyield value\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"new_path": "packages/core/test/unit/impl/browser/FetchTransport.test.ts",
"diff": "@@ -6,7 +6,13 @@ import {\nAbortController,\n} from './emulateBrowser'\nimport sinon from 'sinon'\n-import {SendOptions, Cancellable, CommunicationObserver} from '../../../../src'\n+import {\n+ SendOptions,\n+ Cancellable,\n+ CommunicationObserver,\n+ AbortError,\n+ HttpError,\n+} from '../../../../src'\nimport {CollectedLogs, collectLogging} from '../../../util'\nimport {waitForCondition} from '../../util/waitForCondition'\n@@ -675,7 +681,7 @@ describe('FetchTransport', () => {\nbody: [Buffer.from('signal breaks reading response')],\nstatus: 200,\nsignal: new AbortController(true).getSignal(),\n- url: 'breaked by a signal',\n+ url: 'aborted by a signal',\n},\n].forEach(\n({body, url, status = 200, headers = {}, errorBody, signal}, i) => {\n@@ -708,6 +714,11 @@ describe('FetchTransport', () => {\nif (errorBody) {\nexpect(error).property('body').equals(errorBody)\n}\n+ if (signal?.aborted) {\n+ expect(error).is.instanceOf(AbortError)\n+ } else {\n+ expect(error).is.instanceOf(HttpError)\n+ }\n} else {\nexpect(\nArray.isArray(body)\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/impl/browser/emulateBrowser.ts",
"new_path": "packages/core/test/unit/impl/browser/emulateBrowser.ts",
"diff": "+import {HttpError} from '../../../../src'\n+\ninterface ResponseSpec {\nheaders?: {[key: string]: string}\nstatus?: number\n@@ -52,6 +54,7 @@ function createResponse({\n}\nif (Array.isArray(body)) {\nretVal.body = {\n+ cancel() {},\ngetReader(): any {\nlet position = 0\nreturn {\n@@ -110,7 +113,9 @@ export function emulateFetchApi(\nfunction fetch(url: string, options: any): Promise<any> {\nif (onRequest) onRequest(options)\nreturn url.endsWith('error')\n- ? Promise.reject(new Error(url))\n+ ? Promise.reject(\n+ new HttpError(500, undefined, undefined, undefined, undefined, url)\n+ )\n: Promise.resolve(createResponse(spec))\n}\nclass TextEncoder {\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): cancel body consumption when aborted |
305,159 | 03.10.2022 11:41:46 | -7,200 | b3a50fa3be0453778f596e4ff492867388248cfb | feat(core): add chunksToLinesIterable transformation | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "packages/core/src/results/chunksToLinesIterable.ts",
"diff": "+import {ChunkCombiner, createTextDecoderCombiner} from './chunkCombiner'\n+\n+/**\n+ * ChunksToLinesIterable is a transformation that accepts an iterable of Uint8Array instances\n+ * and returns iterable of lines.\n+ * @param source - iterable of transport buffers\n+ * @param chunkCombiner - chunk combiner\n+ * @returns iterable of lines\n+ */\n+export async function* chunksToLinesIterable(\n+ source: AsyncIterable<Uint8Array>,\n+ chunkCombiner?: ChunkCombiner\n+): AsyncIterableIterator<string> {\n+ const chunks = chunkCombiner ?? createTextDecoderCombiner()\n+ let previous: Uint8Array | undefined\n+ let quoted = false\n+\n+ for await (let chunk of source) {\n+ let index: number\n+ let start = 0\n+ if (previous) {\n+ index = previous.length\n+ chunk = chunks.concat(previous, chunk)\n+ } else {\n+ index = 0\n+ }\n+ while (index < chunk.length) {\n+ const c = chunk[index]\n+ if (c === 10) {\n+ if (!quoted) {\n+ /* do not emit CR+LR or LF line ending */\n+ const end = index > 0 && chunk[index - 1] === 13 ? index - 1 : index\n+ yield chunks.toUtf8String(chunk, start, end)\n+ start = index + 1\n+ }\n+ } else if (c === 34 /* \" */) {\n+ quoted = !quoted\n+ }\n+ index++\n+ }\n+ if (start < chunk.length) {\n+ previous = chunks.copy(chunk, start, chunk.length)\n+ } else {\n+ previous = undefined\n+ }\n+ }\n+ if (previous) {\n+ yield chunks.toUtf8String(previous, 0, previous.length)\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/index.ts",
"new_path": "packages/core/src/results/index.ts",
"diff": "export * from './chunkCombiner'\nexport * from './chunksToLines'\n+export * from './chunksToLinesIterable'\nexport * from './Cancellable'\nexport * from './CommunicationObserver'\nexport * from './linesToTables'\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "packages/core/test/unit/results/chunksToLinesIterable.test.ts",
"diff": "+import {expect} from 'chai'\n+import {ChunkCombiner, chunksToLinesIterable} from '../../../src/results'\n+import chunksToLinesTables from '../../fixture/chunksToLinesTables.json'\n+import nodeChunkCombiner from '../../../src/impl/node/nodeChunkCombiner'\n+import {Buffer} from 'buffer'\n+\n+interface ChunkTest {\n+ name: string\n+ chunks: string[]\n+ lines: string[]\n+ withCancellable?: boolean // use observer with cancellable implementation\n+}\n+\n+describe('chunksToLines', () => {\n+ const combiners: Array<{name: string; value?: ChunkCombiner}> = [\n+ {name: 'default', value: undefined as undefined},\n+ {name: 'nodeChunkCombiner', value: nodeChunkCombiner},\n+ ]\n+ combiners.forEach(({name, value: combiner}) => {\n+ describe(`with ${name} chunk combiner`, () => {\n+ ;(chunksToLinesTables as Array<ChunkTest>).forEach((test: ChunkTest) => {\n+ it(`iterates correct lines from test set '${test.name}'`, async () => {\n+ let error = false\n+ const source = async function* (): AsyncIterableIterator<Uint8Array> {\n+ for (let i = 0; i < test.chunks.length; i++) {\n+ const chunk = test.chunks[i]\n+ if (chunk === 'error') {\n+ error = true\n+ throw new Error()\n+ } else {\n+ yield Buffer.from(chunk, 'utf8')\n+ }\n+ }\n+ }\n+ const lines = []\n+ let failed = false\n+ try {\n+ for await (const line of chunksToLinesIterable(\n+ source(),\n+ combiner\n+ )) {\n+ lines.push(line)\n+ }\n+ } catch (e) {\n+ failed = true\n+ }\n+ expect(test.lines).deep.equal(lines)\n+ expect(error).equal(failed)\n+ })\n+ })\n+ })\n+ })\n+})\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add chunksToLinesIterable transformation |
305,159 | 03.10.2022 11:52:15 | -7,200 | bc75d68c3b0ce0c61fd918ef3a99173a5d4f2eb3 | chore: improve tsdoc | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/linesToTables.ts",
"new_path": "packages/core/src/results/linesToTables.ts",
"diff": "@@ -9,7 +9,7 @@ import {\nimport {FluxTableMetaData, createFluxTableMetaData} from './FluxTableMetaData'\n/**\n- * linesToTables creates a transformationthat accepts (flux) annotated CSV lines\n+ * LinesToTables creates a transformation that accepts (flux) annotated CSV lines\n* and emits rows together with table metadata.\n*/\nexport function linesToTables(\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/results/chunksToLinesIterable.test.ts",
"new_path": "packages/core/test/unit/results/chunksToLinesIterable.test.ts",
"diff": "@@ -11,7 +11,7 @@ interface ChunkTest {\nwithCancellable?: boolean // use observer with cancellable implementation\n}\n-describe('chunksToLines', () => {\n+describe('chunksToLinesIterable', () => {\nconst combiners: Array<{name: string; value?: ChunkCombiner}> = [\n{name: 'default', value: undefined as undefined},\n{name: 'nodeChunkCombiner', value: nodeChunkCombiner},\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore: improve tsdoc |
305,159 | 03.10.2022 12:30:29 | -7,200 | 04d2899a2c7ac6a98e4c46f6ca2a9d29f0d36bae | feat(core): reuse returned row to optimize code | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/index.ts",
"new_path": "packages/core/src/results/index.ts",
"diff": "@@ -4,6 +4,7 @@ export * from './chunksToLinesIterable'\nexport * from './Cancellable'\nexport * from './CommunicationObserver'\nexport * from './linesToTables'\n+export * from './linesToRowsIterable'\nexport * from './LineSplitter'\nexport * from './FluxTableMetaData'\nexport * from './FluxResultObserver'\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "packages/core/src/results/linesToRowsIterable.ts",
"diff": "+import {LineSplitter} from './LineSplitter'\n+import {\n+ FluxTableColumn,\n+ ColumnType,\n+ newFluxTableColumn,\n+} from './FluxTableColumn'\n+import {\n+ FluxTableMetaData,\n+ createFluxTableMetaData,\n+ Row,\n+} from './FluxTableMetaData'\n+\n+/**\n+ * LinesToRowsIterable is a transformation that accepts and iterable of flux annotated CSV lines\n+ * and returns iterable of rows (row data and row metadata).\n+ */\n+export async function* linesToRowsIterable(\n+ source: AsyncIterable<string>\n+): AsyncIterableIterator<Row> {\n+ const splitter = new LineSplitter().withReuse()\n+ let columns: FluxTableColumn[] | undefined\n+ let expectMeta = true\n+ let firstColumnIndex = 0\n+ let lastMeta: FluxTableMetaData | undefined = undefined\n+ for await (const line of source) {\n+ if (line === '') {\n+ expectMeta = true\n+ columns = undefined\n+ } else {\n+ const values = splitter.splitLine(line)\n+ const size = splitter.lastSplitLength\n+ if (expectMeta) {\n+ // create columns\n+ if (!columns) {\n+ columns = new Array(size)\n+ for (let i = 0; i < size; i++) {\n+ columns[i] = newFluxTableColumn()\n+ }\n+ }\n+ if (!values[0].startsWith('#')) {\n+ // fill in column names\n+ if (values[0] === '') {\n+ firstColumnIndex = 1\n+ columns = columns.slice(1)\n+ } else {\n+ firstColumnIndex = 0\n+ }\n+ for (let i = firstColumnIndex; i < size; i++) {\n+ columns[i - firstColumnIndex].label = values[i]\n+ }\n+ lastMeta = createFluxTableMetaData(columns)\n+ expectMeta = false\n+ } else if (values[0] === '#datatype') {\n+ for (let i = 1; i < size; i++) {\n+ columns[i].dataType = values[i] as ColumnType\n+ }\n+ } else if (values[0] === '#default') {\n+ for (let i = 1; i < size; i++) {\n+ columns[i].defaultValue = values[i]\n+ }\n+ } else if (values[0] === '#group') {\n+ for (let i = 1; i < size; i++) {\n+ columns[i].group = values[i][0] === 't'\n+ }\n+ }\n+ } else {\n+ yield {\n+ values: values.slice(firstColumnIndex, size),\n+ tableMeta:\n+ lastMeta as unknown as FluxTableMetaData /* never undefined */,\n+ }\n+ }\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "packages/core/test/unit/results/linesToRowsIterable.test.ts",
"diff": "+import {expect} from 'chai'\n+import fs from 'fs'\n+import {\n+ chunksToLinesIterable,\n+ FluxTableMetaData,\n+ linesToRowsIterable,\n+} from '../../../src/results'\n+\n+describe('linesToRowsIterable', () => {\n+ it('iterates rows', async () => {\n+ const data = fs.readFileSync('test/fixture/query/response2.txt')\n+ const chunkSource = async function* () {\n+ yield data\n+ }\n+ const response = JSON.parse(\n+ fs.readFileSync('test/fixture/query/response2.parsed.json', 'utf8')\n+ )\n+ let index = 0\n+ let lastMeta: FluxTableMetaData | undefined = undefined\n+ const tables: Array<{index: number; meta: FluxTableMetaData}> = []\n+ const rows: Array<{index: number; row: string[]}> = []\n+ for await (const {values, tableMeta} of linesToRowsIterable(\n+ chunksToLinesIterable(chunkSource())\n+ )) {\n+ if (lastMeta !== tableMeta) {\n+ tables.push({index: index++, meta: tableMeta})\n+ lastMeta = tableMeta\n+ }\n+ rows.push({index: index++, row: values})\n+ }\n+ expect(tables).deep.equal(response.tables)\n+ expect(rows).deep.equal(response.rows)\n+ })\n+ it('iterates rows without table annotations', async () => {\n+ const chunkSource = async function* () {\n+ yield Buffer.from('a,b\\n1,2\\n3,4')\n+ }\n+ let lastMeta: FluxTableMetaData | undefined = undefined\n+ const rows: Array<string[]> = []\n+ for await (const {values, tableMeta} of linesToRowsIterable(\n+ chunksToLinesIterable(chunkSource())\n+ )) {\n+ if (lastMeta !== undefined && lastMeta !== tableMeta) {\n+ expect.fail('only one metadata expected')\n+ }\n+ lastMeta = tableMeta\n+ rows.push(values)\n+ }\n+ expect(rows).deep.equal([\n+ ['1', '2'],\n+ ['3', '4'],\n+ ])\n+ expect(lastMeta).deep.equal({\n+ columns: [\n+ {\n+ index: 0,\n+ label: 'a',\n+ },\n+ {\n+ index: 1,\n+ label: 'b',\n+ },\n+ ],\n+ })\n+ })\n+})\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): reuse returned row to optimize code |
305,159 | 03.10.2022 16:13:38 | -7,200 | cfdfc40f1c088004322f97d84ae0e25f7218a26e | feat(core): allow to process annotated CSV response as iterable of lines or rows | [
{
"change_type": "MODIFY",
"old_path": "packages/apis/src/custom/FluxScriptInvocationAPI.ts",
"new_path": "packages/apis/src/custom/FluxScriptInvocationAPI.ts",
"diff": "@@ -2,8 +2,6 @@ import {\nInfluxDB,\nTransport,\nAnnotatedCSVResponse,\n- APIExecutor,\n- CommunicationObserver,\n} from '@influxdata/influxdb-client'\n/** ExecutionOptions contains execution options for a flux script. */\n@@ -25,7 +23,7 @@ export interface ExecutionOptions {\nexport class FluxScriptInvocationAPI {\n// internal\nprivate transport: Transport\n- private processCSVResponse: (supplier: APIExecutor) => AnnotatedCSVResponse\n+ private processCSVResponse: InfluxDB['processCSVResponse']\nprivate options: ExecutionOptions\n/**\n@@ -48,31 +46,22 @@ export class FluxScriptInvocationAPI {\n* CSV response data stream\n*/\ninvoke(scriptID: string, params?: Record<string, any>): AnnotatedCSVResponse {\n- return this.processCSVResponse(this.createExecutor(scriptID, params))\n- }\n-\n- private createExecutor(\n- scriptID: string,\n- params: Record<string, any> | undefined\n- ): APIExecutor {\nconst {gzip, headers} = this.options\n-\n- return (consumer: CommunicationObserver<Uint8Array>): void => {\n- this.transport.send(\n- `/api/v2/scripts/${scriptID}/invoke`,\n- JSON.stringify({\n+ const path = `/api/v2/scripts/${scriptID}/invoke`\n+ const body = JSON.stringify({\nparams: {...params},\n- }),\n- {\n+ })\n+ const options = {\nmethod: 'POST',\nheaders: {\n'content-type': 'application/json; encoding=utf-8',\n'accept-encoding': gzip ? 'gzip' : 'identity',\n...headers,\n},\n- },\n- consumer\n- )\n}\n+ return this.processCSVResponse(\n+ (consumer) => this.transport.send(path, body, options, consumer),\n+ () => this.transport.iterate(path, body, options)\n+ )\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/apis/test/unit/custom/FluxScriptInvocationAPI.test.ts",
"new_path": "packages/apis/test/unit/custom/FluxScriptInvocationAPI.test.ts",
"diff": "@@ -8,6 +8,7 @@ import zlib from 'zlib'\nconst fakeUrl = 'http://fake:8086'\nconst fakeToken = 'a'\nconst fakeResponseLines = [\n+ '#datatype,string,long,dateTime:RFC3339,double',\n',result,table,_time,_value',\n',_result,0,2021-01-01T00:00:00Z,2',\n',_result,0,2021-01-01T02:24:00Z,6',\n@@ -40,6 +41,45 @@ describe('FluxScriptInvocationAPI', () => {\nexpect(body).to.deep.equal({params: {hi: 'Bob'}})\nexpect(authorization).equals(`Token ${fakeToken}`)\n})\n+ it('iterates lines', async () => {\n+ const subject = new FluxScriptInvocationAPI(influxDB)\n+ nock(fakeUrl)\n+ .post(`/api/v2/scripts/${fakeScriptID}/invoke`)\n+ .reply(200, fakeResponse)\n+ .persist()\n+ const lines: string[] = []\n+ const response = subject.invoke(fakeScriptID, {hi: 'Bob'})\n+ for await (const line of response.iterateLines()) {\n+ lines.push(line)\n+ }\n+ expect(lines).to.deep.equal(fakeResponseLines)\n+ })\n+ it('iterates rows', async () => {\n+ const subject = new FluxScriptInvocationAPI(influxDB)\n+ nock(fakeUrl)\n+ .post(`/api/v2/scripts/${fakeScriptID}/invoke`)\n+ .reply(200, fakeResponse)\n+ .persist()\n+ const rows: any[] = []\n+ const response = subject.invoke(fakeScriptID, {hi: 'Bob'})\n+ for await (const {values, tableMeta} of response.iterateRows()) {\n+ rows.push(tableMeta.toObject(values))\n+ }\n+ expect(rows).to.deep.equal([\n+ {\n+ result: '_result',\n+ table: 0,\n+ _time: '2021-01-01T00:00:00Z',\n+ _value: 2,\n+ },\n+ {\n+ result: '_result',\n+ table: 0,\n+ _time: '2021-01-01T02:24:00Z',\n+ _value: 6,\n+ },\n+ ])\n+ })\nit('can provide custom headers', async () => {\nconst subject = new FluxScriptInvocationAPI(influxDB, {\nheaders: {whatever: 'it is'},\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/InfluxDB.ts",
"new_path": "packages/core/src/InfluxDB.ts",
"diff": "@@ -7,7 +7,11 @@ import {Transport} from './transport'\nimport TransportImpl from './impl/node/NodeHttpTransport'\nimport QueryApi, {QueryOptions} from './QueryApi'\nimport QueryApiImpl from './impl/QueryApiImpl'\n-import {AnnotatedCSVResponse, APIExecutor} from './results'\n+import {\n+ AnnotatedCSVResponse,\n+ APIExecutor,\n+ IterableResultExecutor,\n+} from './results'\nimport {AnnotatedCSVResponseImpl} from './results/AnnotatedCSVResponseImpl'\n/**\n@@ -17,7 +21,10 @@ import {AnnotatedCSVResponseImpl} from './results/AnnotatedCSVResponseImpl'\nexport default class InfluxDB {\nprivate _options: ClientOptions\nreadonly transport: Transport\n- readonly processCSVResponse: (executor: APIExecutor) => AnnotatedCSVResponse\n+ readonly processCSVResponse: (\n+ executor: APIExecutor,\n+ iterableResultExecutor: IterableResultExecutor\n+ ) => AnnotatedCSVResponse\n/**\n* Creates influxdb client options from an options object or url.\n@@ -36,8 +43,15 @@ export default class InfluxDB {\nthrow new IllegalArgumentError('No url specified!')\nif (url.endsWith('/')) this._options.url = url.substring(0, url.length - 1)\nthis.transport = this._options.transport ?? new TransportImpl(this._options)\n- this.processCSVResponse = (executor: APIExecutor): AnnotatedCSVResponse =>\n- new AnnotatedCSVResponseImpl(executor, this.transport.chunkCombiner)\n+ this.processCSVResponse = (\n+ executor: APIExecutor,\n+ iterableResultExecutor: IterableResultExecutor\n+ ): AnnotatedCSVResponse =>\n+ new AnnotatedCSVResponseImpl(\n+ executor,\n+ iterableResultExecutor,\n+ this.transport.chunkCombiner\n+ )\n}\n/**\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/QueryApiImpl.ts",
"new_path": "packages/core/src/impl/QueryApiImpl.ts",
"diff": "@@ -7,6 +7,7 @@ import {\nFluxTableMetaData,\nRow,\nAnnotatedCSVResponse,\n+ IterableResultExecutor,\n} from '../results'\nimport {ParameterizedQuery} from '../query/flux'\nimport {APIExecutor} from '../results/ObservableQuery'\n@@ -23,7 +24,10 @@ export class QueryApiImpl implements QueryApi {\nprivate options: QueryOptions\nconstructor(\nprivate transport: Transport,\n- private createCSVResponse: (executor: APIExecutor) => AnnotatedCSVResponse,\n+ private createCSVResponse: (\n+ executor: APIExecutor,\n+ iterableResultExecutor: IterableResultExecutor\n+ ) => AnnotatedCSVResponse,\norg: string | QueryOptions\n) {\nthis.options = typeof org === 'string' ? {org} : org\n@@ -37,7 +41,27 @@ export class QueryApiImpl implements QueryApi {\n}\nresponse(query: string | ParameterizedQuery): AnnotatedCSVResponse {\n- return this.createCSVResponse(this.createExecutor(query))\n+ const {org, type, gzip, headers} = this.options\n+ const path = `/api/v2/query?org=${encodeURIComponent(org)}`\n+ const body = JSON.stringify(\n+ this.decorateRequest({\n+ query: query.toString(),\n+ dialect: DEFAULT_dialect,\n+ type,\n+ })\n+ )\n+ const options = {\n+ method: 'POST',\n+ headers: {\n+ 'content-type': 'application/json; encoding=utf-8',\n+ 'accept-encoding': gzip ? 'gzip' : 'identity',\n+ ...headers,\n+ },\n+ }\n+ return this.createCSVResponse(\n+ (consumer) => this.transport.send(path, body, options, consumer),\n+ () => this.transport.iterate(path, body, options)\n+ )\n}\nlines(query: string | ParameterizedQuery): Observable<string> {\n@@ -99,31 +123,6 @@ export class QueryApiImpl implements QueryApi {\n)\n}\n- private createExecutor(query: string | ParameterizedQuery): APIExecutor {\n- const {org, type, gzip, headers} = this.options\n-\n- return (consumer): void => {\n- this.transport.send(\n- `/api/v2/query?org=${encodeURIComponent(org)}`,\n- JSON.stringify(\n- this.decorateRequest({\n- query: query.toString(),\n- dialect: DEFAULT_dialect,\n- type,\n- })\n- ),\n- {\n- method: 'POST',\n- headers: {\n- 'content-type': 'application/json; encoding=utf-8',\n- 'accept-encoding': gzip ? 'gzip' : 'identity',\n- ...headers,\n- },\n- },\n- consumer\n- )\n- }\n- }\nprivate decorateRequest(request: any): any {\nif (typeof this.options.now === 'function') {\nrequest.now = this.options.now()\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/AnnotatedCSVResponse.ts",
"new_path": "packages/core/src/results/AnnotatedCSVResponse.ts",
"diff": "@@ -3,12 +3,27 @@ import {FluxTableMetaData, Row} from './FluxTableMetaData'\nimport {CommunicationObserver} from './CommunicationObserver'\nimport {FluxResultObserver} from './FluxResultObserver'\n+/**\n+ * A factory that returns async iterables.\n+ */\n+export type IterableResultExecutor = () => AsyncIterable<Uint8Array>\n+\n/**\n* AnnotatedCSVResponse provides various ways of how to\n* process data from an annotated CSV response stream,\n* which is returned as a result of a flux script execution.\n*/\nexport interface AnnotatedCSVResponse {\n+ /**\n+ * IterateLines returns iterable of CSV response lines suitable for `for-await` loop consumption.\n+ * @returns iterable of lines\n+ */\n+ iterateLines(): AsyncIterable<string>\n+ /**\n+ * IterateRows returns iterable of response table rows suitable for `for-await` loop consumption.\n+ * @returns iterable of rows\n+ */\n+ iterateRows(): AsyncIterable<Row>\n/**\n* Lines creates a cold observable of the CSV response lines.\n* @returns observable of CSV result lines\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/results/AnnotatedCSVResponseImpl.ts",
"new_path": "packages/core/src/results/AnnotatedCSVResponseImpl.ts",
"diff": "@@ -6,9 +6,14 @@ import {\nlinesToTables,\nChunkCombiner,\nchunksToLines,\n+ chunksToLinesIterable,\n+ linesToRowsIterable,\n} from '../results'\nimport {Observable} from '../observable'\n-import {AnnotatedCSVResponse} from './AnnotatedCSVResponse'\n+import {\n+ AnnotatedCSVResponse,\n+ IterableResultExecutor,\n+} from './AnnotatedCSVResponse'\nimport ObservableQuery, {APIExecutor} from './ObservableQuery'\nexport function defaultRowMapping(\n@@ -25,8 +30,17 @@ export function defaultRowMapping(\nexport class AnnotatedCSVResponseImpl implements AnnotatedCSVResponse {\nconstructor(\nprivate executor: APIExecutor,\n+ private iterableResultExecutor: IterableResultExecutor,\nprivate chunkCombiner: ChunkCombiner\n) {}\n+ iterateLines(): AsyncIterable<string> {\n+ return chunksToLinesIterable(this.iterableResultExecutor())\n+ }\n+ iterateRows(): AsyncIterable<Row> {\n+ return linesToRowsIterable(\n+ chunksToLinesIterable(this.iterableResultExecutor())\n+ )\n+ }\nlines(): Observable<string> {\nreturn new ObservableQuery(this.executor, (observer) =>\nchunksToLines(observer, this.chunkCombiner)\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): allow to process annotated CSV response as iterable of lines or rows |
305,159 | 04.10.2022 06:02:07 | -7,200 | af7cf3b6c1003ff0400e91bcb6a0b860668d6458 | feat(core): add iterateLines and iterateRows to QueryApi | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/QueryApi.ts",
"new_path": "packages/core/src/QueryApi.ts",
"diff": "@@ -55,6 +55,26 @@ export default interface QueryApi {\n*/\nresponse(query: string | ParameterizedQuery): AnnotatedCSVResponse\n+ /**\n+ * IterateLines executes the supplied query and returns results in\n+ * an async iterable of annotated CSV lines.\n+ * Async iterables are best consumed by `for-await` loop.\n+ *\n+ * @param query - query\n+ * @returns async iterable of CSV result lines\n+ */\n+ iterateLines(query: string | ParameterizedQuery): AsyncIterable<string>\n+\n+ /**\n+ * IterateRows executes the supplied query and returns results in\n+ * an async iterable of row data and table metadata pairs.\n+ * Async iterables are best consumed by `for-await` loop.\n+ *\n+ * @param query - query\n+ * @returns async iterable of CSV result lines\n+ */\n+ iterateRows(query: string | ParameterizedQuery): AsyncIterable<Row>\n+\n/**\n* Creates a cold observable of the lines returned by the given query.\n*\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/QueryApiImpl.ts",
"new_path": "packages/core/src/impl/QueryApiImpl.ts",
"diff": "@@ -64,6 +64,12 @@ export class QueryApiImpl implements QueryApi {\n)\n}\n+ iterateLines(query: string | ParameterizedQuery): AsyncIterable<string> {\n+ return this.response(query).iterateLines()\n+ }\n+ iterateRows(query: string | ParameterizedQuery): AsyncIterable<Row> {\n+ return this.response(query).iterateRows()\n+ }\nlines(query: string | ParameterizedQuery): Observable<string> {\nreturn this.response(query).lines()\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/test/unit/QueryApi.test.ts",
"new_path": "packages/core/test/unit/QueryApi.test.ts",
"diff": "@@ -50,6 +50,24 @@ describe('QueryApi', () => {\nexpect(target.completed).to.equals(1)\nexpect(target.lines).to.deep.equal(simpleResponseLines)\n})\n+ it('iterates lines', async () => {\n+ const subject = new InfluxDB(clientOptions).getQueryApi(ORG)\n+ nock(clientOptions.url)\n+ .post(QUERY_PATH)\n+ .reply((_uri, _requestBody) => {\n+ return [\n+ 200,\n+ fs.createReadStream('test/fixture/query/simpleResponse.txt'),\n+ {'retry-after': '1'},\n+ ]\n+ })\n+ .persist()\n+ const lines: any[] = []\n+ for await (const line of subject.iterateLines('buckets()')) {\n+ lines.push(line)\n+ }\n+ expect(lines).to.deep.equal(simpleResponseLines)\n+ })\n;[\n['response2', undefined],\n['response2', true],\n@@ -85,6 +103,46 @@ describe('QueryApi', () => {\nexpect(target.rows).to.deep.equal(response.rows)\n})\n})\n+ ;[\n+ ['response2', undefined],\n+ ['response2', true],\n+ ['response3', false],\n+ ].forEach(([name, gzip]) => {\n+ it(`iterate rows from ${name} with gzip=${gzip}`, async () => {\n+ const subject = new InfluxDB(clientOptions)\n+ .getQueryApi(ORG)\n+ .with({gzip: gzip as boolean | undefined})\n+ nock(clientOptions.url)\n+ .post(QUERY_PATH)\n+ .reply((_uri, _requestBody) => {\n+ let stream: any = fs.createReadStream(\n+ `test/fixture/query/${name}.txt`\n+ )\n+ if (gzip) stream = stream.pipe(zlib.createGzip())\n+ return [200, stream, {'content-encoding': gzip ? 'gzip' : 'identity'}]\n+ })\n+ .persist()\n+ let index = 0\n+ let lastMeta: FluxTableMetaData | undefined = undefined\n+ const tables: Array<{index: number; meta: FluxTableMetaData}> = []\n+ const rows: Array<{index: number; row: string[]}> = []\n+ for await (const {values, tableMeta} of subject.iterateRows(\n+ 'buckets()'\n+ )) {\n+ if (lastMeta !== tableMeta) {\n+ tables.push({index: index++, meta: tableMeta})\n+ lastMeta = tableMeta\n+ }\n+ rows.push({index: index++, row: values})\n+ }\n+\n+ const response = JSON.parse(\n+ fs.readFileSync(`test/fixture/query/${name}.parsed.json`, 'utf8')\n+ )\n+ expect(tables).to.deep.equal(response.tables)\n+ expect(rows).to.deep.equal(response.rows)\n+ })\n+ })\nit('receives properly indexed table data', async () => {\nconst subject = new InfluxDB(clientOptions).getQueryApi(ORG).with({})\nnock(clientOptions.url)\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(core): add iterateLines and iterateRows to QueryApi |
305,159 | 04.10.2022 06:37:45 | -7,200 | bbb4763c6d1e5c74fcb318695bffbc3356c5926a | feat(examples): change deno example to use for-await loop | [
{
"change_type": "MODIFY",
"old_path": "examples/query.deno.ts",
"new_path": "examples/query.deno.ts",
"diff": "// A modified query.ts example that works with deno //\n//////////////////////////////////////////////////////\n-import {\n- FluxTableMetaData,\n- InfluxDB,\n-} from 'https://cdn.skypack.dev/@influxdata/influxdb-client-browser?dts'\n+import {InfluxDB} from 'https://cdn.skypack.dev/@influxdata/influxdb-client-browser?dts'\nconst url = 'http://localhost:8086'\nconst token = 'my-token'\n@@ -17,19 +14,16 @@ const fluxQuery =\n'from(bucket:\"my-bucket\" ) |> range(start: 0) |> filter(fn: (r) => r._measurement == \"temperature\")'\nconsole.log('** QUERY ROWS ***')\n-queryApi.queryRows(fluxQuery, {\n- next(row: string[], tableMeta: FluxTableMetaData) {\n- const o = tableMeta.toObject(row)\n+try {\n+ for await (const {values, tableMeta} of queryApi.iterateRows(fluxQuery)) {\n+ const o = tableMeta.toObject(values)\n// console.log(JSON.stringify(o, null, 2))\nconsole.log(\n`${o._time} ${o._measurement} in '${o.location}' (${o.example}): ${o._field}=${o._value}`\n)\n- },\n- error(error: Error) {\n+ }\n+ console.log('\\nFinished SUCCESS')\n+} catch (error) {\nconsole.error(error)\nconsole.log('\\nFinished ERROR')\n- },\n- complete() {\n- console.log('\\nFinished SUCCESS')\n- },\n-})\n+}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(examples): change deno example to use for-await loop |
305,159 | 04.10.2022 08:13:47 | -7,200 | fde1c8f6746664fabae1a9c8db1760b46ff26140 | feat(examples): use for-await to get query results in influxdb-1.8 example | [
{
"change_type": "MODIFY",
"old_path": "examples/influxdb-1.8.ts",
"new_path": "examples/influxdb-1.8.ts",
"diff": "@@ -24,33 +24,27 @@ const clientOptions: ClientOptions = {\nconst influxDB = new InfluxDB(clientOptions)\n+async function writePoints() {\nconsole.log('*** WRITE POINTS ***')\n-\nconst writeAPI = influxDB.getWriteApi('', bucket)\nconst point = new Point('mem')\n.tag('host', 'host1')\n.floatField('used_percent', 23.43234543)\nwriteAPI.writePoint(point)\n-writeAPI\n- .close()\n- .then(() => console.log('Write FINISHED'))\n- .catch((error) => {\n- console.error(error)\n- })\n+ await writeAPI.close()\n+}\n+async function queryRows() {\nconsole.log('*** QUERY ROWS ***')\n-\nconst queryAPI = influxDB.getQueryApi('')\nconst query = `from(bucket: \"${bucket}\") |> range(start: -1h)`\n-queryAPI.queryRows(query, {\n- next: (row, tableMeta) => {\n- const o = tableMeta.toObject(row)\n+ for await (const {values, tableMeta} of queryAPI.iterateRows(query)) {\n+ const o = tableMeta.toObject(values)\nconsole.log(`${o._time} ${o._measurement} : ${o._field}=${o._value}`)\n- },\n- error: (error: Error) => {\n- console.error(error)\n- },\n- complete: () => {\n+ }\nconsole.log('\\nQuery FINISHED')\n- },\n-})\n+}\n+\n+writePoints()\n+ .then(() => queryRows())\n+ .catch((e) => console.error(e))\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(examples): use for-await to get query results in influxdb-1.8 example |
305,159 | 04.10.2022 08:28:07 | -7,200 | 8e657df7b11742fd0181d352932e407bfe70c5c7 | chore(examples): indent browser example | [
{
"change_type": "MODIFY",
"old_path": "examples/index.html",
"new_path": "examples/index.html",
"diff": "import {\nInfluxDB,\nPoint,\n- flux\n+ flux,\n} from 'https://unpkg.com/@influxdata/influxdb-client/dist/index.browser.mjs'\nimport {\nPingAPI,\nSetupAPI,\n} from 'https://unpkg.com/@influxdata/influxdb-client-apis/dist/index.mjs'\n// or use the following imports to use local builds\n- // import {InfluxDB, Point} from '../packages/core/dist/index.browser.mjs'\n+ // import {InfluxDB, Point, flux} from '../packages/core/dist/index.browser.mjs'\n// import {PingAPI, SetupAPI} from '../packages/apis/dist/index.browser.mjs'\n/**\n* Import InfluxDB configuration rather than inlining it.\n*/\n- import {url, token, org, bucket, username, password} from './env_browser.mjs'\n+ import {\n+ url,\n+ token,\n+ org,\n+ bucket,\n+ username,\n+ password,\n+ } from './env_browser.mjs'\nconst influxDB = new InfluxDB({url, token})\n// log results also to HTML page\nconst logField = document.getElementById('log')\n- function log(message, ...rest) {\n- console.log(arguments[0], rest)\n+ function log(...args) {\n+ console.log.apply(console, args)\nconst previousValue = logField.value\nlogField.value +=\n(previousValue ? '\\n' : '') +\npingExample()\n})\ndocument.addEventListener('DOMContentLoaded', () => {\n- const fluxQueryParam = new URLSearchParams(window.location.search).get('fluxQuery')\n+ const fluxQueryParam = new URLSearchParams(window.location.search).get(\n+ 'fluxQuery'\n+ )\nif (fluxQueryParam) {\nqueryInput.value = fluxQueryParam\n} else {\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(examples): indent browser example |
305,159 | 04.10.2022 08:39:13 | -7,200 | ef170175b78546d3e62d507b0a2b91efdd5b88e7 | feat(examples): use for-await to print results in queryWithParams example | [
{
"change_type": "MODIFY",
"old_path": "examples/README.md",
"new_path": "examples/README.md",
"diff": "@@ -4,7 +4,7 @@ This directory contains javascript and typescript examples for node.js, browser,\n- Node.js examples\n- Prerequisites\n- - [node](https://nodejs.org/en/) installed\n+ - [node](https://nodejs.org/en/) installed, at least version 16 is recommended\n- Run `npm install` in this directory\n- Change variables in [./env.mjs](env.mjs) to configure connection to your InfluxDB instance. The file can be used as-is against a new [docker InfluxDB v2.3 OSS GA installation](https://docs.influxdata.com/influxdb/v2.3/get-started/)\n- Examples are executable. If it does not work for you, run `npm run esr EXAMPLE.ts`.\n@@ -12,7 +12,7 @@ This directory contains javascript and typescript examples for node.js, browser,\nWrite data points to InfluxDB.\n- [query.ts](./query.ts)\nQuery InfluxDB with [Flux](https://docs.influxdata.com/influxdb/latest/get-started/).\n- - [queryWithParams.ts](./queryWithParams.ts)\n+ - [queryWithParams.mjs](./queryWithParams.mjs)\nSupply parameters to a [Flux](https://docs.influxdata.com/influxdb/latest/get-started/) query.\n- [ping.mjs](./ping.mjs)\nCheck status of InfluxDB server.\n"
},
{
"change_type": "RENAME",
"old_path": "examples/queryWithParams.ts",
"new_path": "examples/queryWithParams.mjs",
"diff": "-#!./node_modules/.bin/esr\n+#!/usr/bin/env node\n//////////////////////////////////////////\n// Shows how to use InfluxDB query API. //\n//////////////////////////////////////////\n-import {\n- InfluxDB,\n- FluxTableMetaData,\n- flux,\n- fluxDuration,\n-} from '@influxdata/influxdb-client'\n+import {InfluxDB, flux, fluxDuration} from '@influxdata/influxdb-client'\nimport {url, token, org} from './env.mjs'\nconst queryApi = new InfluxDB({url, token}).getQueryApi(org)\n@@ -20,21 +15,15 @@ const fluxQuery = flux`from(bucket:\"my-bucket\")\nconsole.log('query:', fluxQuery.toString())\nconsole.log('*** QUERY ROWS ***')\n-// performs query and receive line table metadata and rows\n-// https://v2.docs.influxdata.com/v2.0/reference/syntax/annotated-csv/\n-queryApi.queryRows(fluxQuery, {\n- next: (row: string[], tableMeta: FluxTableMetaData) => {\n- const o = tableMeta.toObject(row)\n+try {\n+ for await (const {values, tableMeta} of queryApi.iterateRows(fluxQuery)) {\n+ const o = tableMeta.toObject(values)\n// console.log(JSON.stringify(o, null, 2))\nconsole.log(\n`${o._time} ${o._measurement} in '${o.location}' (${o.example}): ${o._field}=${o._value}`\n)\n- },\n- error: (error: Error) => {\n- console.error(error)\n- console.log('\\nFinished ERROR')\n- },\n- complete: () => {\n+ }\nconsole.log('\\nFinished SUCCESS')\n- },\n-})\n+} catch (e) {\n+ console.log('\\nFinished ERROR')\n+}\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(examples): use for-await to print results in queryWithParams example |
305,159 | 04.10.2022 09:58:06 | -7,200 | e3882060759a9b83dbab7fc9412a2e510436a3c7 | chore(examples): improve influxdb-1.8 example | [
{
"change_type": "MODIFY",
"old_path": "examples/influxdb-1.8.ts",
"new_path": "examples/influxdb-1.8.ts",
"diff": "@@ -24,7 +24,7 @@ const clientOptions: ClientOptions = {\nconst influxDB = new InfluxDB(clientOptions)\n-async function writePoints() {\n+async function writePoints(): Promise<void> {\nconsole.log('*** WRITE POINTS ***')\nconst writeAPI = influxDB.getWriteApi('', bucket)\nconst point = new Point('mem')\n@@ -34,7 +34,7 @@ async function writePoints() {\nawait writeAPI.close()\n}\n-async function queryRows() {\n+async function queryRows(): Promise<void> {\nconsole.log('*** QUERY ROWS ***')\nconst queryAPI = influxDB.getQueryApi('')\nconst query = `from(bucket: \"${bucket}\") |> range(start: -1h)`\n@@ -46,5 +46,5 @@ async function queryRows() {\n}\nwritePoints()\n- .then(() => queryRows())\n+ .then(queryRows)\n.catch((e) => console.error(e))\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(examples): improve influxdb-1.8 example |
305,159 | 04.10.2022 09:59:35 | -7,200 | b3d17a10d30cd1ee6a261049231dd89dbd5d159f | feat(examples): receive results using async iterables in query.ts example | [
{
"change_type": "MODIFY",
"old_path": "examples/query.ts",
"new_path": "examples/query.ts",
"diff": "#!./node_modules/.bin/esr\n+/* eslint-disable @typescript-eslint/no-unused-vars */\n//////////////////////////////////////////\n// Shows how to use InfluxDB query API. //\n//////////////////////////////////////////\n@@ -10,12 +11,34 @@ const queryApi = new InfluxDB({url, token}).getQueryApi(org)\nconst fluxQuery =\n'from(bucket:\"my-bucket\") |> range(start: -1d) |> filter(fn: (r) => r._measurement == \"temperature\")'\n-console.log('*** QUERY ROWS ***')\n// There are more ways of how to receive results,\n-// the essential ones are shown/commented below. See also rxjs-query.ts .\n-//\n-// Execute query and receive table metadata and rows as they arrive from the server.\n-// https://docs.influxdata.com/influxdb/latest/reference/syntax/annotated-csv/\n+// the essential ones are shown in functions below.\n+// Execution of a particular function follow its declaration,\n+// comment/uncomment it at will.\n+// See also rxjs-query.ts and queryWithParamas.mjs .\n+\n+// Execute query and receive table metadata and table row values using async iterator.\n+async function iterateRows() {\n+ console.log('*** IterateRows ***')\n+ for await (const {values, tableMeta} of queryApi.iterateRows(fluxQuery)) {\n+ // the following line creates an object for each row\n+ const o = tableMeta.toObject(values)\n+ // console.log(JSON.stringify(o, null, 2))\n+ console.log(\n+ `${o._time} ${o._measurement} in '${o.location}' (${o.example}): ${o._field}=${o._value}`\n+ )\n+\n+ // alternatively, you can get only a specific column value without\n+ // the need to create an object for every row\n+ // console.log(tableMeta.get(row, '_time'))\n+ }\n+ console.log('\\nIterateRows SUCCESS')\n+}\n+iterateRows().catch((error) => console.error('IterateRows ERROR', error))\n+\n+// Execute query and receive table metadata and rows in a result observer.\n+function queryRows() {\n+ console.log('*** QueryRows ***')\nqueryApi.queryRows(fluxQuery, {\nnext: (row: string[], tableMeta: FluxTableMetaData) => {\n// the following line creates an object for each row\n@@ -28,59 +51,61 @@ queryApi.queryRows(fluxQuery, {\n// alternatively, you can get only a specific column value without\n// the need to create an object for every row\n// console.log(tableMeta.get(row, '_time'))\n-\n- // or you can create a proxy to get column values on demand\n- // const p = new Proxy<Record<string, any>>(row, tableMeta)\n- // console.log(\n- // `${p._time} ${p._measurement} in '${p.location}' (${p.example}): ${p._field}=${p._value}`\n- // )\n},\nerror: (error: Error) => {\nconsole.error(error)\n- console.log('\\nFinished ERROR')\n+ console.log('\\nQueryRows ERROR')\n},\ncomplete: () => {\n- console.log('\\nFinished SUCCESS')\n+ console.log('\\nQueryRows SUCCESS')\n},\n})\n+}\n+queryRows()\n+\n+// Execute query and collect result rows in a Promise.\n+// Use with caution, it copies the whole stream of results into memory.\n+async function collectRows() {\n+ console.log('\\n*** CollectRows ***')\n+ const data = await queryApi.collectRows(\n+ fluxQuery //, you can also specify a row mapper as a second argument\n+ )\n+ data.forEach((x) => console.log(JSON.stringify(x)))\n+ console.log('\\nCollect ROWS SUCCESS')\n+}\n+// collectRows().catch((error) => console.error('CollectRows ERROR', error))\n-// // Execute query and collect result rows in a Promise.\n-// // Use with caution, it copies the whole stream of results into memory.\n-// try {\n-// const data = await queryApi.collectRows(\n-// fluxQuery /*, you can specify a row mapper as a second arg */\n-// )\n-// data.forEach((x) => console.log(JSON.stringify(x)))\n-// console.log('\\nCollect ROWS SUCCESS')\n-// } catch (e) {\n-// console.error(e)\n-// console.log('\\nCollect ROWS ERROR')\n-// }\n+// Execute query and return the whole result as a string.\n+// Use with caution, it copies the whole stream of results into memory.\n+async function queryRaw() {\n+ const result = await queryApi.queryRaw(fluxQuery)\n+ console.log(result)\n+ console.log('\\nQueryRaw SUCCESS')\n+}\n+// queryRaw().catch((error) => console.error('QueryRaw ERROR', error))\n-// // Execute query and return the whole result as a string.\n-// // Use with caution, it copies the whole stream of results into memory.\n-// try {\n-// const result = await queryApi.queryRaw(fluxQuery)\n-// console.log(result)\n-// console.log('\\nQueryRaw SUCCESS')\n-// } catch (e) {\n-// console.error(e)\n-// console.log('\\nQueryRaw ERROR')\n-// }\n+// Execute query and receive result CSV lines in an observer\n+function queryLines() {\n+ queryApi.queryLines(fluxQuery, {\n+ next: (line: string) => {\n+ console.log(line)\n+ },\n+ error: (error: Error) => {\n+ console.error(error)\n+ console.log('\\nQueryLines ERROR')\n+ },\n+ complete: () => {\n+ console.log('\\nQueryLines SUCCESS')\n+ },\n+ })\n+}\n+// queryLines()\n-// Execute query and receive result lines in annotated csv format\n-// queryApi.queryLines(\n-// fluxQuery,\n-// {\n-// next: (line: string) => {\n-// console.log(line)\n-// },\n-// error: (error: Error) => {\n-// console.error(error)\n-// console.log('\\nFinished ERROR')\n-// },\n-// complete: () => {\n-// console.log('\\nFinished SUCCESS')\n-// },\n-// }\n-// )\n+// Execute query and receive result csv lines using async iterable\n+async function iterateLines() {\n+ for await (const line of queryApi.iterateLines(fluxQuery)) {\n+ console.log(line)\n+ }\n+ console.log('\\nIterateLines SUCCESS')\n+}\n+// iterateLines().catch((error) => console.error('\\nIterateLines ERROR', error))\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(examples): receive results using async iterables in query.ts example |
305,159 | 04.10.2022 10:02:32 | -7,200 | 200ddc84041f733e364572d4e761c602ab003bda | feat(examples): demonstrate collectRows row mapper fn | [
{
"change_type": "MODIFY",
"old_path": "examples/follow-redirects.mjs",
"new_path": "examples/follow-redirects.mjs",
"diff": "@@ -38,9 +38,11 @@ server.listen(0, 'localhost', async () => {\n},\n}).getQueryApi(org)\ntry {\n- const data = await queryApi.collectRows('buckets()')\n+ const data = await queryApi.collectRows('buckets()', (values, tableMeta) =>\n+ tableMeta.get(values, 'name')\n+ )\nconsole.info('Available buckets:')\n- data.forEach((x) => console.info('', x.name))\n+ data.forEach((name) => console.info('', name))\nconsole.log('\\nQuery SUCCESS')\n} catch (e) {\nconsole.error(e)\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(examples): demonstrate collectRows row mapper fn |
305,159 | 04.10.2022 11:09:22 | -7,200 | d9a8f69985a3d485c3aa2251634863d9a985304c | chore: update platform requirements | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -91,13 +91,13 @@ JavaScript client API Reference Documentation is available online at https://inf\nTo contribute code, fork the repository, apply changes and submit a pull request to the `master` branch.\n-Build Requirements:\n+Requirements:\n-- Node.js v14 LTS\n+- Node.js v16 LTS\n```bash\nnode --version\n```\n-- yarn 1.9.4. or higher\n+- yarn 1.22.19 or higher\n```bash\nyarn -v\n```\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore: update platform requirements |
305,159 | 04.10.2022 11:22:48 | -7,200 | 8291db41c34a08a77477ad2dc9945210583e4e6d | feat(examples): use async iterable in invokableScripts example | [
{
"change_type": "MODIFY",
"old_path": "examples/invokableScripts.mjs",
"new_path": "examples/invokableScripts.mjs",
"diff": "@@ -69,26 +69,15 @@ async function invokeScript(scriptID) {\n// Use FluxScriptInvocationAPI to execute a particular\n// script with specified parametes and process parsed results\nconst invocationAPI = new FluxScriptInvocationAPI(influxDB)\n- await new Promise((accept, reject) => {\n- let count = 0\n- invocationAPI.invoke(scriptID, params).consumeRows({\n- complete: accept,\n- error: reject,\n- next(row, tableMetaData) {\n- count++\n+ const results = invocationAPI.invoke(scriptID, params)\n+ let cnt = 0\n+ for await (const {values, tableMeta} of results.iterateRows()) {\n+ cnt++\n// console.log(tableMetaData.toObject(row))\n- console.log(\n- count,\n- '*',\n- count + 1,\n- '=',\n- row[tableMetaData.column('_value').index]\n- )\n- },\n- })\n- })\n- // You can also receive the whole response body. Use with caution,\n- // a possibly huge stream of results is copied to memory.\n+ console.log(cnt, '*', cnt + 1, '=', tableMeta.get(values, '_value'))\n+ }\n+ // // You can also receive the whole response body. Use with caution,\n+ // // a possibly huge stream of results is copied to memory.\n// const response = await scriptsAPI.postScriptsIDInvoke({\n// scriptID,\n// body: {params},\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | feat(examples): use async iterable in invokableScripts example |
305,159 | 04.10.2022 14:14:21 | -7,200 | d36abbcb3ce446ba24ac4d9f98d34f841dcc096e | chore(ci): switch CI jobs to run with node 16 | [
{
"change_type": "MODIFY",
"old_path": ".circleci/config.yml",
"new_path": ".circleci/config.yml",
"diff": "@@ -21,7 +21,7 @@ jobs:\nparameters:\nimage:\ntype: string\n- default: &default-image 'cimg/node:14.19'\n+ default: &default-image 'cimg/node:16.17'\ndocker:\n- image: << parameters.image >>\nsteps:\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(ci): switch CI jobs to run with node 16 |
305,159 | 04.10.2022 14:23:32 | -7,200 | b3f55a912abfa7dcc65ea3516c6bdd18fd496cc4 | chore(examples): improve docs in query.ts example | [
{
"change_type": "MODIFY",
"old_path": "examples/query.ts",
"new_path": "examples/query.ts",
"diff": "@@ -13,8 +13,8 @@ const fluxQuery =\n// There are more ways of how to receive results,\n// the essential ones are shown in functions below.\n-// Execution of a particular function follow its declaration,\n-// comment/uncomment it at will.\n+// Execution of a particular function follows\n+// its defintion, comment/uncomment it at will.\n// See also rxjs-query.ts and queryWithParamas.mjs .\n// Execute query and receive table metadata and table row values using async iterator.\n@@ -34,7 +34,7 @@ async function iterateRows() {\n}\nconsole.log('\\nIterateRows SUCCESS')\n}\n-iterateRows().catch((error) => console.error('IterateRows ERROR', error))\n+// iterateRows().catch((error) => console.error('IterateRows ERROR', error))\n// Execute query and receive table metadata and rows in a result observer.\nfunction queryRows() {\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(examples): improve docs in query.ts example |
305,159 | 04.10.2022 14:47:31 | -7,200 | 79526e5e2073a00cc50bbd730f7385707b7e58b2 | chore(core): avoid code branching | [
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"new_path": "packages/core/src/impl/node/NodeHttpTransport.ts",
"diff": "@@ -308,12 +308,12 @@ export class NodeHttpTransport implements Transport {\noptions.body = res\nresolve(options)\n})\n- } else {\n+ return\n+ }\noptions.body = bodyBuffer\noptions.headers['content-length'] = options.body.length\nresolve(options)\n}\n- }\nprivate _prepareResponse(\nres: http.IncomingMessage,\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(core): avoid code branching |
305,159 | 04.10.2022 18:44:25 | -7,200 | 2075d834f5acb689adbef0f22d9bafc894abfe02 | chore(examples): repair comment | [
{
"change_type": "MODIFY",
"old_path": "examples/query.ts",
"new_path": "examples/query.ts",
"diff": "@@ -14,8 +14,8 @@ const fluxQuery =\n// There are more ways of how to receive results,\n// the essential ones are shown in functions below.\n// Execution of a particular function follows\n-// its defintion, comment/uncomment it at will.\n-// See also rxjs-query.ts and queryWithParamas.mjs .\n+// its definition, comment/uncomment it at will.\n+// See also rxjs-query.ts and queryWithParams.mjs .\n// Execute query and receive table metadata and table row values using async iterator.\nasync function iterateRows() {\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(examples): repair comment |
305,159 | 05.10.2022 07:10:04 | -7,200 | 218189ae224f6b4eb934bd97ccc2d6cd7aac210b | chore: repair typos in tsdoc | [
{
"change_type": "MODIFY",
"old_path": "examples/invokableScripts.mjs",
"new_path": "examples/invokableScripts.mjs",
"diff": "@@ -67,7 +67,7 @@ async function invokeScript(scriptID) {\nconst params = {count: count}\nconsole.log('Script parameters: ', params)\n// Use FluxScriptInvocationAPI to execute a particular\n- // script with specified parametes and process parsed results\n+ // script with specified parameters and process parsed results\nconst invocationAPI = new FluxScriptInvocationAPI(influxDB)\nconst results = invocationAPI.invoke(scriptID, params)\nlet cnt = 0\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore: repair typos in tsdoc |
305,159 | 05.10.2022 09:53:37 | -7,200 | cbe6076e6bea11a885a086232a654afe69922d4b | chore: improve description of a breaking change | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG.md",
"new_path": "CHANGELOG.md",
"diff": "## 1.32.0 [unreleased]\n+### Features\n+\n+1. [#592](https://github.com/influxdata/influxdb-client-js/pull/592): Allow to receive query results using for-await loop.\n+\n+### Breaking Changes\n+\n+1. [#592](https://github.com/influxdata/influxdb-client-js/pull/592): The client packages newly require ES2018 runtime (was ES2015). The javascript code now needs [async generators](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/AsyncGenerator#browser_compatibility) and [for-await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of#browser_compatibility) loop. At least the latest node 14 is required because of mature support for iterable http response. Deno and all current modern browsers support ES2018 for years back. This change shoudn't cause any harm in existing installations. In case of troubles, configure your project with babel ES2018 preset to produce ES2015 code.\n+\n## 1.31.0 [2022-10-03]\n### Features\n1. [#604](https://github.com/influxdata/influxdb-client-js/pull/604): Fix unhandled promise rejection in write retry.\n-1. [#592](https://github.com/influxdata/influxdb-client-js/pull/592): Allow to receive query results using for-await loop.\n-\n-### Breaking Changes\n-\n-1. [#592](https://github.com/influxdata/influxdb-client-js/pull/592): The client packages newly require ES2018 runtime (was ES2015). The javascript code now needs [async generators](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/AsyncGenerator#browser_compatibility) and [for-await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of#browser_compatibility) loop. At least the latest node 14 is required because of mature support for iterable http response. Deno and all current modern browsers support ES2018 for years back.\n-\n## 1.30.0 [2022-09-29]\n### Features\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore: improve description of a breaking change |
305,160 | 28.11.2022 22:53:11 | -3,600 | 3cf4d6572544c2ef470a208de1d961a1b958b4a0 | fix(docs): enable keep alive | [
{
"change_type": "MODIFY",
"old_path": "examples/writeAdvanced.mjs",
"new_path": "examples/writeAdvanced.mjs",
"diff": "@@ -53,7 +53,7 @@ const writeOptions = {\n// can be used to reuse them and thus reduce the count of newly established networking sockets\nimport {Agent} from 'http'\nconst keepAliveAgent = new Agent({\n- keepAlive: false, // reuse existing connections\n+ keepAlive: true, // reuse existing connections\nkeepAliveMsecs: 20 * 1000, // 20 seconds keep alive\n})\nprocess.on('exit', () => keepAliveAgent.destroy())\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | fix(docs): enable keep alive (#647) |
305,160 | 29.11.2022 16:01:21 | -3,600 | 734f014f9a080dcd4e1013f30e9e3e155ee61e01 | chore(ci): upgrade recommended node version to 18 | [
{
"change_type": "MODIFY",
"old_path": ".circleci/config.yml",
"new_path": ".circleci/config.yml",
"diff": "@@ -21,7 +21,7 @@ jobs:\nparameters:\nimage:\ntype: string\n- default: &default-image 'cimg/node:16.17'\n+ default: &default-image 'cimg/node:18.11'\ndocker:\n- image: << parameters.image >>\nsteps:\n@@ -87,6 +87,19 @@ workflows:\nbuild:\njobs:\n- tests:\n+ name: 'tests-node-18'\n+ filters:\n+ branches:\n+ ignore: gh-pages\n+ - tests:\n+ name: 'tests-node-14'\n+ image: 'cimg/node:14.21'\n+ filters:\n+ branches:\n+ ignore: gh-pages\n+ - tests:\n+ name: 'tests-node-16'\n+ image: 'cimg/node:16.18'\nfilters:\nbranches:\nignore: gh-pages\n@@ -96,7 +109,7 @@ workflows:\nignore: gh-pages\n- deploy-preview:\nrequires:\n- - tests\n+ - tests-node-18\n- coverage\nfilters:\nbranches:\n"
},
{
"change_type": "MODIFY",
"old_path": "CHANGELOG.md",
"new_path": "CHANGELOG.md",
"diff": "1. [#592](https://github.com/influxdata/influxdb-client-js/pull/592): Allow to receive query results using for-await loop.\n+### Other\n+\n+1. [#624](https://github.com/influxdata/influxdb-client-js/pull/624): Upgrade to the latest node v18 LTS.\n+\n### Breaking Changes\n1. [#592](https://github.com/influxdata/influxdb-client-js/pull/592): The client packages newly require ES2018 runtime (was ES2015). The javascript code now needs [async generators](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/AsyncGenerator#browser_compatibility) and [for-await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of#browser_compatibility) loop. At least the latest node 14 is required because of mature support for iterable http response. Deno and all current modern browsers support ES2018 for years back. This change shoudn't cause any harm in existing installations. In case of troubles, configure your project with babel ES2018 preset to produce ES2015 code.\n"
},
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -93,7 +93,7 @@ To contribute code, fork the repository, apply changes and submit a pull request\nRequirements:\n-- Node.js v16 LTS\n+- Node.js LTS version, v18 recommended\n```bash\nnode --version\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/README.md",
"new_path": "examples/README.md",
"diff": "@@ -4,7 +4,7 @@ This directory contains javascript and typescript examples for node.js, browser,\n- Node.js examples\n- Prerequisites\n- - [node](https://nodejs.org/en/) installed, at least version 16 is recommended\n+ - [node](https://nodejs.org/en/) installed, at least version 18 is recommended\n- Run `npm install` in this directory\n- Change variables in [./env.mjs](env.mjs) to configure connection to your InfluxDB instance. The file can be used as-is against a new [docker InfluxDB v2.3 OSS GA installation](https://docs.influxdata.com/influxdb/v2.3/get-started/)\n- Examples are executable. If it does not work for you, run `npm run esr EXAMPLE.ts`.\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(ci): upgrade recommended node version to 18 (#624) |
305,180 | 02.12.2022 07:45:40 | 25,200 | 033959c3c25e7a252db920a79b99420a50dbd020 | chore(release): prepare to release influxdb-client-js-1.33.0 | [
{
"change_type": "MODIFY",
"old_path": "CHANGELOG.md",
"new_path": "CHANGELOG.md",
"diff": "-## 1.33.0 [unreleased]\n+## 1.33.0 [2022-12-02]\n+\n+### Bug Fixes\n+\n+1. Updates to projects dependencies\n## 1.32.0 [2022-11-01]\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/src/impl/version.ts",
"new_path": "packages/core/src/impl/version.ts",
"diff": "-export const CLIENT_LIB_VERSION = '1.32.0'\n+export const CLIENT_LIB_VERSION = '1.33.0'\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(release): prepare to release influxdb-client-js-1.33.0 |
305,180 | 02.12.2022 08:01:07 | 25,200 | 20c9b33a0a7653000dc23c45d1c2dac6a4101c87 | chore(release): publish v1.33.0 [skip CI] | [
{
"change_type": "MODIFY",
"old_path": "lerna.json",
"new_path": "lerna.json",
"diff": "{\n- \"version\": \"1.32.0\",\n+ \"version\": \"1.33.0\",\n\"npmClient\": \"yarn\",\n\"packages\": [\"packages/*\"],\n\"command\": {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/apis/package.json",
"new_path": "packages/apis/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-apis\",\n- \"version\": \"1.32.0\",\n+ \"version\": \"1.33.0\",\n\"description\": \"InfluxDB 2.x generated APIs\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n\"@influxdata/influxdb-client\": \"*\"\n},\n\"devDependencies\": {\n- \"@influxdata/influxdb-client\": \"^1.32.0\",\n+ \"@influxdata/influxdb-client\": \"^1.33.0\",\n\"@influxdata/oats\": \"^0.7.0\",\n\"@microsoft/api-extractor\": \"^7.31.0\",\n\"@types/mocha\": \"^10.0.0\",\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core-browser/package.json",
"new_path": "packages/core-browser/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-browser\",\n- \"version\": \"1.32.0\",\n+ \"version\": \"1.33.0\",\n\"description\": \"InfluxDB 2.x client for browser\",\n\"scripts\": {\n\"apidoc:extract\": \"echo \\\"Nothing to do\\\"\",\n},\n\"license\": \"MIT\",\n\"devDependencies\": {\n- \"@influxdata/influxdb-client\": \"^1.32.0\",\n+ \"@influxdata/influxdb-client\": \"^1.33.0\",\n\"cpr\": \"^3.0.1\",\n\"rimraf\": \"^3.0.0\"\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/package.json",
"new_path": "packages/core/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client\",\n- \"version\": \"1.32.0\",\n+ \"version\": \"1.33.0\",\n\"description\": \"InfluxDB 2.x client\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/giraffe/package.json",
"new_path": "packages/giraffe/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-giraffe\",\n- \"version\": \"1.32.0\",\n+ \"version\": \"1.33.0\",\n\"description\": \"InfluxDB 2.x client - giraffe integration\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n\"license\": \"MIT\",\n\"devDependencies\": {\n\"@influxdata/giraffe\": \"*\",\n- \"@influxdata/influxdb-client\": \"^1.32.0\",\n+ \"@influxdata/influxdb-client\": \"^1.33.0\",\n\"@microsoft/api-extractor\": \"^7.31.0\",\n\"@types/chai\": \"^4.2.5\",\n\"@types/mocha\": \"^10.0.0\",\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(release): publish v1.33.0 [skip CI] |
305,180 | 26.01.2023 11:12:43 | 25,200 | 4ffd54959e569b1696a60295eccf7985af81dcd1 | chore(release): publish v1.33.1 [skip CI] | [
{
"change_type": "MODIFY",
"old_path": "lerna.json",
"new_path": "lerna.json",
"diff": "{\n- \"version\": \"1.33.0\",\n+ \"version\": \"1.33.1\",\n\"npmClient\": \"yarn\",\n\"packages\": [\"packages/*\"],\n\"command\": {\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/apis/package.json",
"new_path": "packages/apis/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-apis\",\n- \"version\": \"1.33.0\",\n+ \"version\": \"1.33.1\",\n\"description\": \"InfluxDB 2.x generated APIs\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n\"@influxdata/influxdb-client\": \"*\"\n},\n\"devDependencies\": {\n- \"@influxdata/influxdb-client\": \"^1.33.0\",\n+ \"@influxdata/influxdb-client\": \"^1.33.1\",\n\"@influxdata/oats\": \"^0.7.0\",\n\"@microsoft/api-extractor\": \"^7.31.0\",\n\"@types/mocha\": \"^10.0.0\",\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core-browser/package.json",
"new_path": "packages/core-browser/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-browser\",\n- \"version\": \"1.33.0\",\n+ \"version\": \"1.33.1\",\n\"description\": \"InfluxDB 2.x client for browser\",\n\"scripts\": {\n\"apidoc:extract\": \"echo \\\"Nothing to do\\\"\",\n},\n\"license\": \"MIT\",\n\"devDependencies\": {\n- \"@influxdata/influxdb-client\": \"^1.33.0\",\n+ \"@influxdata/influxdb-client\": \"^1.33.1\",\n\"cpr\": \"^3.0.1\",\n\"rimraf\": \"^4.1.1\"\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/core/package.json",
"new_path": "packages/core/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client\",\n- \"version\": \"1.33.0\",\n+ \"version\": \"1.33.1\",\n\"description\": \"InfluxDB 2.x client\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n"
},
{
"change_type": "MODIFY",
"old_path": "packages/giraffe/package.json",
"new_path": "packages/giraffe/package.json",
"diff": "{\n\"name\": \"@influxdata/influxdb-client-giraffe\",\n- \"version\": \"1.33.0\",\n+ \"version\": \"1.33.1\",\n\"description\": \"InfluxDB 2.x client - giraffe integration\",\n\"scripts\": {\n\"apidoc:extract\": \"api-extractor run\",\n\"license\": \"MIT\",\n\"devDependencies\": {\n\"@influxdata/giraffe\": \"*\",\n- \"@influxdata/influxdb-client\": \"^1.33.0\",\n+ \"@influxdata/influxdb-client\": \"^1.33.1\",\n\"@microsoft/api-extractor\": \"^7.31.0\",\n\"@types/chai\": \"^4.2.5\",\n\"@types/mocha\": \"^10.0.0\",\n"
}
] | TypeScript | MIT License | influxdata/influxdb-client-js | chore(release): publish v1.33.1 [skip CI] |
350,419 | 04.03.2020 15:11:00 | 18,000 | 4c5bd59796546021f0e91ba9e6b552e2e61b02d7 | wascc-host 0.5.0 working for a request round trip | [
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/lib/libwascc_httpsrv.so",
"new_path": "crates/wascc-provider/lib/libwascc_httpsrv.so",
"diff": "Binary files a/crates/wascc-provider/lib/libwascc_httpsrv.so and b/crates/wascc-provider/lib/libwascc_httpsrv.so differ\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/lib.rs",
"new_path": "crates/wascc-provider/src/lib.rs",
"diff": "@@ -66,7 +66,7 @@ impl Provider for WasccProvider {\n.namespace\n.unwrap_or_else(|| \"default\".into());\n// TODO: Replace with actual image store lookup when it is merged\n- let data = std::fs::read(\"./testdata/greet_actor_signed.wasm\")?;\n+ let data = std::fs::read(\"./testdata/echo.wasm\")?;\n// TODO: Implement this for real.\n// Okay, so here is where things are REALLY unfinished. Right now, we are\n@@ -108,7 +108,7 @@ impl Provider for WasccProvider {\n.get(ACTOR_PUBLIC_KEY)\n.map(|a| a.to_string())\n.unwrap_or_else(|| \"\".into());\n-\n+ debug!(\"{:?}\", pubkey);\n// TODO: Launch this in a thread. (not necessary with waSCC)\nlet env = self.env_vars(client.clone(), &first_container, &pod);\n//let args = first_container.args.unwrap_or_else(|| vec![]);\n@@ -249,18 +249,18 @@ mod test {\n#[test]\nfn test_wascc_run() {\n// Open file\n- let data = std::fs::read(\"./testdata/greet_actor_signed.wasm\").expect(\"read the wasm file\");\n+ let data = std::fs::read(\"./testdata/echo.wasm\").expect(\"read the wasm file\");\n// Send into wascc_run\nwascc_run_http(\ndata,\nEnvVars::new(),\n- \"MADK3R3H47FGXN5F4HWPSJH4WCKDWKXQBBIOVI7YEPEYEMGJ2GDFIFE5\",\n+ \"MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2\",\n)\n.expect(\"successfully executed a WASM\");\n// Give the webserver a chance to start up.\nstd::thread::sleep(std::time::Duration::from_secs(3));\n- wascc_stop(\"MADK3R3H47FGXN5F4HWPSJH4WCKDWKXQBBIOVI7YEPEYEMGJ2GDFIFE5\")\n+ wascc_stop(\"MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2\")\n.expect(\"Removed the actor\");\n}\n"
},
{
"change_type": "ADD",
"old_path": "crates/wascc-provider/testdata/echo.wasm",
"new_path": "crates/wascc-provider/testdata/echo.wasm",
"diff": "Binary files /dev/null and b/crates/wascc-provider/testdata/echo.wasm differ\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/greet.yaml",
"new_path": "examples/greet.yaml",
"diff": "@@ -3,11 +3,11 @@ kind: Pod\nmetadata:\nname: greet\nannotations:\n- deislabs.io/wascc-action-key: MADK3R3H47FGXN5F4HWPSJH4WCKDWKXQBBIOVI7YEPEYEMGJ2GDFIFE5\n+ deislabs.io/wascc-action-key: MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2\nspec:\ncontainers:\n- - image: greet_actor_signed.wasm\n+ - image: echo.wasm\nimagePullPolicy: Always\nname: greet\nports:\n@@ -16,3 +16,7 @@ spec:\nkubernetes.io/role: agent\nbeta.kubernetes.io/os: linux\nbeta.kubernetes.io/arch: wasm32-wascc\n+ tolerations:\n+ - key: \"node.kubernetes.io/network-unavailable\"\n+ operator: \"Exists\"\n+ effect: \"NoSchedule\"\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "justfile",
"new_path": "justfile",
"diff": "-export RUST_LOG := \"wascc_provider=debug,wasi_provider=debug,main=debug\"\n+export RUST_LOG := \"wascc_host=debug,wascc_provider=debug,wasi_provider=debug,main=debug\"\nbuild:\ncargo build\n@@ -25,7 +25,7 @@ push:\nitest:\nkubectl create -f examples/greet.yaml\nsleep 5\n- for i in 1 2 3 4 5; do sleep 3 && kubectl get po greet2; done\n+ for i in 1 2 3 4 5; do sleep 3 && kubectl get po greet; done\n_cleanup_kube:\nkubectl delete no krustlet || true\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | wascc-host 0.5.0 working for a request round trip |
350,425 | 05.03.2020 14:28:42 | 28,800 | ea5bc2a3fc18fd8f93069dba584d1f9d40133a66 | Implement workaround for LeaseSpec serialization issue.
See relating to the issue this attempts to workaround.
Rather than serialize a `LeaseSpec` which has a variable sized timestamp
formatting, this forces the timestamps to the six digits of subsecond precision
currently required by the k8s API. | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/node.rs",
"new_path": "crates/kubelet/src/node.rs",
"diff": "use chrono::prelude::*;\n-use k8s_openapi::api::coordination::v1::{Lease, LeaseSpec};\n+use k8s_openapi::api::coordination::v1::Lease;\nuse k8s_openapi::api::core::v1::Node;\n-use k8s_openapi::apimachinery::pkg::apis::meta::v1::{MicroTime, Time};\n+use k8s_openapi::apimachinery::pkg::apis::meta::v1::Time;\nuse kube::{\napi::{Api, PatchParams, PostParams},\nclient::APIClient,\n@@ -246,12 +246,17 @@ fn lease_definition(node_uid: &str) -> serde_json::Value {\n/// Defines a new coordiation lease for Kubernetes\n///\n/// We set the lease times, the lease duration, and the node name.\n-fn lease_spec_definition() -> LeaseSpec {\n- LeaseSpec {\n- holder_identity: Some(NODE_NAME.to_string()),\n- acquire_time: Some(MicroTime(Utc::now())),\n- renew_time: Some(MicroTime(Utc::now())),\n- lease_duration_seconds: Some(300),\n- ..Default::default()\n+fn lease_spec_definition() -> serde_json::Value {\n+ // Workaround for https://github.com/deislabs/krustlet/issues/5\n+ // In the future, use LeaseSpec rather than a JSON value\n+ let now = Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Micros, true);\n+\n+ json!(\n+ {\n+ \"holderIdentity\": NODE_NAME,\n+ \"acquireTime\": now,\n+ \"renewTime\": now,\n+ \"leaseDurationSeconds\": 300\n}\n+ )\n}\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Implement workaround for LeaseSpec serialization issue.
See #5 relating to the issue this attempts to workaround.
Rather than serialize a `LeaseSpec` which has a variable sized timestamp
formatting, this forces the timestamps to the six digits of subsecond precision
currently required by the k8s API. |
350,419 | 10.03.2020 12:58:44 | 14,400 | b4f891f15a27a814666f34787f741a689a5706d3 | update LINUX only echo provider | [
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/lib.rs",
"new_path": "crates/wascc-provider/src/lib.rs",
"diff": "@@ -260,6 +260,11 @@ mod test {\nuse super::*;\nuse k8s_openapi::api::core::v1::PodSpec;\n+ #[cfg(target_os = \"linux\")]\n+ const ECHO_LIB: &str = \"./testdata/libecho_provider.so\";\n+ #[cfg(target_os = \"macos\")]\n+ const ECHO_LIB: &str = \"./testdata/libecho_provider.dylib\";\n+\n#[tokio::test]\nasync fn test_init() {\nlet provider = WasccProvider {};\n@@ -287,6 +292,26 @@ mod test {\n.expect(\"Removed the actor\");\n}\n+ #[test]\n+ fn test_wascc_echo() {\n+ let data = NativeCapability::from_file(ECHO_LIB).expect(\"loaded echo library\");\n+ host::add_native_capability(data).expect(\"added echo capability\");\n+\n+ let key = \"MDAYLDTOZEHQFPB3CL5PAFY5UTNCW32P54XGWYX3FOM2UBRYNCP3I3BF\";\n+\n+ let wasm = std::fs::read(\"./testdata/echo_actor_s.wasm\").expect(\"load echo WASM\");\n+ // TODO: use wascc_run to execute echo_actor\n+ wascc_run(\n+ wasm,\n+ key,\n+ vec![Capability {\n+ name: \"wok:echoProvider\",\n+ env: EnvVars::new(),\n+ }],\n+ )\n+ .expect(\"completed echo run\")\n+ }\n+\n#[test]\nfn test_can_schedule() {\nlet wr = WasccProvider {};\n"
},
{
"change_type": "ADD",
"old_path": "crates/wascc-provider/testdata/echo_actor_s.wasm",
"new_path": "crates/wascc-provider/testdata/echo_actor_s.wasm",
"diff": "Binary files /dev/null and b/crates/wascc-provider/testdata/echo_actor_s.wasm differ\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/testdata/libecho_provider.so",
"new_path": "crates/wascc-provider/testdata/libecho_provider.so",
"diff": "Binary files a/crates/wascc-provider/testdata/libecho_provider.so and b/crates/wascc-provider/testdata/libecho_provider.so differ\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | update LINUX only echo provider |
350,437 | 10.03.2020 10:06:06 | 25,200 | a5d6c7e36afa324e3d80c42e64c7b7842b4f55a3 | Try cross-platorm build action | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/build.yml",
"new_path": ".github/workflows/build.yml",
"diff": "@@ -4,13 +4,21 @@ on: [push, pull_request]\njobs:\nbuild:\n- runs-on: ubuntu-latest\n-\n+ runs-on: ${{ matrix.config.os }}\n+ strategy:\n+ fail-fast: false\n+ matrix:\n+ config:\n+ - {os: \"ubuntu-latest\", url: \"https://github.com/casey/just/releases/download/v0.5.8/just-v0.5.8-x86_64-unknown-linux-musl.tar.gz\", name: \"just\", pathInArchive: \"just\" }\n+ - {os: \"macos-latest\", url: \"https://github.com/casey/just/releases/download/v0.5.8/just-v0.5.8-x86_64-apple-darwin.tar.gz\", name: \"just\", pathInArchive: \"just\" }\nsteps:\n- uses: actions/checkout@v1\n+ - uses: engineerd/[email protected]\n+ with:\n+ name: ${{ matrix.config.name }}\n+ url: ${{ matrix.config.url }}\n+ pathInArchive: ${{ matrix.config.pathInArchive }}\n- name: Build\nrun: |\n- wget https://github.com/casey/just/releases/download/v0.5.8/just-v0.5.8-x86_64-unknown-linux-musl.tar.gz\n- tar -xzf just-v0.5.8-x86_64-unknown-linux-musl.tar.gz -C /tmp\n- /tmp/just build\n- /tmp/just test\n+ just build\n+ just test\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Try cross-platorm build action
Signed-off-by: Radu M <[email protected]> |
350,425 | 15.03.2020 22:11:51 | 25,200 | f4c00a3cb8f815db73525c76bd94a501ec96b823 | Fix environment variables from secrets.
The `string_data` field of `Secret` is a write-only convenience and will always
be `None` when read.
This commit fixes it so that the `data` field is used instead. | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/kubelet.rs",
"new_path": "crates/kubelet/src/kubelet.rs",
"diff": "@@ -438,18 +438,17 @@ async fn on_missing_value(\nmatch Api::<Secret>::namespaced(client, ns).get(name).await {\nOk(secret) => {\n// I am not totally clear on what the outcome should\n- // be of a cfgmap key miss. So for now just return an\n+ // be of a secret key miss. So for now just return an\n// empty default.\n-\nreturn secret\n- .string_data\n+ .data\n.unwrap_or_default()\n- .get(&seckey.key)\n- .cloned()\n+ .remove(&seckey.key)\n+ .map(|s| String::from_utf8(s.0).unwrap_or_default())\n.unwrap_or_default();\n}\nErr(e) => {\n- error!(\"Error fetching config map {}: {}\", name, e);\n+ error!(\"Error fetching secret {}: {}\", name, e);\nreturn \"\".to_string();\n}\n}\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Fix environment variables from secrets.
The `string_data` field of `Secret` is a write-only convenience and will always
be `None` when read.
This commit fixes it so that the `data` field is used instead. |
350,409 | 17.03.2020 17:06:48 | -3,600 | 41dc3838ffabc3194209eed381f6d4c38cdca674 | Panic if name on pod is not set | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/kubelet.rs",
"new_path": "crates/kubelet/src/kubelet.rs",
"diff": "@@ -328,10 +328,7 @@ pub trait Provider {\n// Step 1: Is this legit?\n// Step 2: Can the provider handle this?\nif !self.can_schedule(&pod) {\n- debug!(\n- \"Provider cannot schedule {}\",\n- pod.name().unwrap_or_default()\n- );\n+ debug!(\"Provider cannot schedule {}\", pod.name());\nreturn Ok(());\n};\n// Step 3: DO IT!\n@@ -342,10 +339,7 @@ pub trait Provider {\n// Step 1: Can the provider handle this? (This should be the faster function,\n// so we can weed out negatives quickly.)\nif !self.can_schedule(&pod) {\n- debug!(\n- \"Provider cannot schedule {}\",\n- pod.name().unwrap_or_default()\n- );\n+ debug!(\"Provider cannot schedule {}\", pod.name());\nreturn Ok(());\n};\n// Step 2: Is this a real modification, or just status?\n@@ -356,10 +350,7 @@ pub trait Provider {\nlet pod = pod.into();\n// Step 1: Can the provider handle this?\nif !self.can_schedule(&pod) {\n- debug!(\n- \"Provider cannot schedule {}\",\n- pod.name().unwrap_or_default()\n- );\n+ debug!(\"Provider cannot schedule {}\", pod.name());\nreturn Ok(());\n};\n// Step 2: DO IT!\n@@ -477,10 +468,7 @@ async fn on_missing_value(\n/// provides those fields.\nfn field_map(pod: &Pod) -> HashMap<String, String> {\nlet mut map: HashMap<String, String> = HashMap::new();\n- map.insert(\n- \"metadata.name\".into(),\n- pod.name().unwrap_or_default().to_owned(),\n- );\n+ map.insert(\"metadata.name\".into(), pod.name().to_owned());\nmap.insert(\"metadata.namespace\".into(), pod.namespace().to_owned());\nmap.insert(\n\"spec.serviceAccountName\".into(),\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/pod.rs",
"new_path": "crates/kubelet/src/pod.rs",
"diff": "@@ -21,8 +21,12 @@ impl Pod {\n}\n/// Get the name of the pod\n- pub fn name(&self) -> Option<&str> {\n- self.0.metadata.as_ref()?.name.as_deref()\n+ pub fn name(&self) -> &str {\n+ self.0\n+ .metadata\n+ .as_ref()\n+ .and_then(|m| m.name.as_deref())\n+ .expect(\"Pod name should always be set but was not\")\n}\n/// Get the pod's namespace\n@@ -95,7 +99,7 @@ impl Pod {\n);\nlet data = serde_json::to_vec(&status).expect(\"Should always serialize\");\n- let name = self.name().unwrap_or_default();\n+ let name = self.name();\nlet api: Api<KubePod> = Api::namespaced(client, self.namespace());\nmatch api.patch_status(&name, &PatchParams::default(), data).await {\nOk(o) => {\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wasi-provider/src/lib.rs",
"new_path": "crates/wasi-provider/src/lib.rs",
"diff": "@@ -122,7 +122,7 @@ impl Provider for WasiProvider {\n}\nasync fn status(&self, pod: Pod, _client: APIClient) -> anyhow::Result<Status> {\n- let pod_name = pod.name().unwrap_or_default();\n+ let pod_name = pod.name();\nlet mut handles = self.handles.write().await;\nlet container_handles =\nhandles\n@@ -167,7 +167,7 @@ impl Provider for WasiProvider {\n/// Generates a unique human readable key for storing a handle to a pod\nfn key_from_pod(pod: &Pod) -> String {\n- pod_key(&pod.namespace(), pod.name().unwrap_or_default())\n+ pod_key(pod.namespace(), pod.name())\n}\nfn pod_key<N: AsRef<str>, T: AsRef<str>>(namespace: N, pod_name: T) -> String {\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Panic if name on pod is not set (#78) |
350,409 | 20.03.2020 17:38:12 | -3,600 | 49a1a1325f347bc03ac990612f48fe0bc4f48c3e | Configure protocol of oci client | [
{
"change_type": "MODIFY",
"old_path": "crates/oci-distribution/src/lib.rs",
"new_path": "crates/oci-distribution/src/lib.rs",
"diff": "@@ -36,6 +36,7 @@ type OciResult<T> = anyhow::Result<T>;\n/// For true anonymous access, you can skip `auth()`. This is not recommended\n/// unless you are sure that the remote registry does not require Oauth2.\npub struct Client {\n+ config: ClientConfig,\ntoken: Option<RegistryToken>,\nclient: reqwest::Client,\n}\n@@ -44,6 +45,16 @@ impl Client {\n// Create a new client initialized to share HTTP connections across multiple requests.\npub fn new() -> Self {\nClient {\n+ config: Default::default(),\n+ token: None,\n+ client: reqwest::Client::new(),\n+ }\n+ }\n+\n+ // Create a new client with the supplied config\n+ pub fn new_with_config(config: ClientConfig) -> Self {\n+ Client {\n+ config,\ntoken: None,\nclient: reqwest::Client::new(),\n}\n@@ -88,7 +99,7 @@ impl Client {\n/// `reqwest` error, the request itself failed. All other error messages mean that\n/// v2 is not supported.\npub async fn version(&self, host: &str) -> OciResult<String> {\n- let url = format!(\"https://{}/v2/\", host);\n+ let url = format!(\"{}://{}/v2/\", self.config.protocol.as_str(), host);\nlet res = self.client.get(&url).send().await?;\nlet dist_hdr = res.headers().get(OCI_VERSION_KEY);\nlet version = dist_hdr\n@@ -104,7 +115,11 @@ impl Client {\n/// on other requests.\npub async fn auth(&mut self, image: &Reference, _secret: Option<&str>) -> OciResult<()> {\n// The version request will tell us where to go.\n- let url = format!(\"https://{}/v2/\", image.registry());\n+ let url = format!(\n+ \"{}://{}/v2/\",\n+ self.config.protocol.as_str(),\n+ image.registry()\n+ );\nlet res = self.client.get(&url).send().await?;\nlet dist_hdr = match res.headers().get(reqwest::header::WWW_AUTHENTICATE) {\nSome(h) => h,\n@@ -153,7 +168,7 @@ impl Client {\n/// If the connection has already gone through authentication, this will\n/// use the bearer token. Otherwise, this will attempt an anonymous pull.\npub async fn pull_manifest(&self, image: &Reference) -> OciResult<OciManifest> {\n- let url = image.to_v2_manifest_url();\n+ let url = image.to_v2_manifest_url(self.config.protocol.as_str());\nlet request = self.client.get(&url);\nlet res = request.headers(self.auth_headers()).send().await?;\n@@ -191,7 +206,7 @@ impl Client {\ndigest: &str,\nmut out: T,\n) -> OciResult<()> {\n- let url = image.to_v2_blob_url(&digest);\n+ let url = image.to_v2_blob_url(self.config.protocol.as_str(), digest);\nlet mut stream = self\n.client\n.get(&url)\n@@ -261,6 +276,34 @@ impl ModuleStore for FileModuleStore {\n}\n}\n+#[derive(Debug, Clone)]\n+pub struct ClientConfig {\n+ pub protocol: ClientProtocol,\n+}\n+\n+impl Default for ClientConfig {\n+ fn default() -> Self {\n+ Self {\n+ protocol: ClientProtocol::Https,\n+ }\n+ }\n+}\n+\n+#[derive(Debug, Clone)]\n+pub enum ClientProtocol {\n+ Http,\n+ Https,\n+}\n+\n+impl ClientProtocol {\n+ fn as_str(&self) -> &str {\n+ match self {\n+ ClientProtocol::Https => \"https\",\n+ ClientProtocol::Http => \"http\",\n+ }\n+ }\n+}\n+\n/// A token granted during the OAuth2-like workflow for OCI registries.\n#[derive(serde::Deserialize, Default)]\nstruct RegistryToken {\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/oci-distribution/src/reference.rs",
"new_path": "crates/oci-distribution/src/reference.rs",
"diff": "@@ -32,9 +32,10 @@ impl Reference {\n}\n/// Convert a Reference to a v2 manifest URL.\n- pub fn to_v2_manifest_url(&self) -> String {\n+ pub fn to_v2_manifest_url(&self, protocol: &str) -> String {\nformat!(\n- \"https://{}/v2/{}/manifests/{}\",\n+ \"{}://{}/v2/{}/manifests/{}\",\n+ protocol,\nself.registry(),\nself.repository(),\nself.tag()\n@@ -42,9 +43,10 @@ impl Reference {\n}\n/// Convert a Reference to a v2 blob (layer) URL.\n- pub fn to_v2_blob_url(&self, digest: &str) -> String {\n+ pub fn to_v2_blob_url(&self, protocol: &str, digest: &str) -> String {\nformat!(\n- \"https://{}/v2/{}/blobs/{}\",\n+ \"{}://{}/v2/{}/blobs/{}\",\n+ protocol,\nself.registry(),\nself.repository(),\ndigest\n@@ -117,7 +119,7 @@ mod tests {\n.expect(\"Could not parse reference\");\nassert_eq!(\n\"https://webassembly.azurecr.io/v2/hello/manifests/v1\",\n- reference.to_v2_manifest_url()\n+ reference.to_v2_manifest_url(\"https\")\n);\n}\n}\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Configure protocol of oci client (#92) |
350,409 | 23.03.2020 19:19:59 | -3,600 | 0a75fe04351984645f0f84712c9dbac7369fe09a | Change kube cleanup in justfile to delete the proper nodes and pods | [
{
"change_type": "MODIFY",
"old_path": "justfile",
"new_path": "justfile",
"diff": "@@ -41,5 +41,5 @@ bootstrap-ssl:\n@chmod 400 $(eval echo $KEY_DIR)/*\n_cleanup_kube:\n- kubectl delete no $(hostname | tr '[:upper:]' '[:lower:]') || true\n- kubectl delete po greet || true\n+ kubectl delete node krustlet-wasi krustlet-wascc || true\n+ kubectl delete --all pods --namespace=default || true\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Change kube cleanup in justfile to delete the proper nodes and pods (#98) |
350,423 | 26.03.2020 15:24:52 | 14,400 | a170484f13e03dd696cd2ddc96c9d4307549b462 | Simplfy imports
fixes | [
{
"change_type": "MODIFY",
"old_path": "demos/wasi/hello-world-assemblyscript/assembly/index.ts",
"new_path": "demos/wasi/hello-world-assemblyscript/assembly/index.ts",
"diff": "-import { Console, Environ, CommandLine } from \"../node_modules/as-wasi/assembly\";\n+import { Console, Environ, CommandLine } from \"as-wasi\";\nexport function _start(): void {\nConsole.log(\"hello from stdout!\");\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Simplfy imports
fixes #116 |
350,419 | 27.03.2020 15:59:08 | 14,400 | d0db68e38d7c662f9f8b92b29eeaec1103fd46d2 | plumbing for logging provider | [
{
"change_type": "MODIFY",
"old_path": "Cargo.lock",
"new_path": "Cargo.lock",
"diff": "@@ -1218,7 +1218,6 @@ dependencies = [\n\"serde_derive\",\n\"serde_json\",\n\"tokio\",\n- \"wascc-logging\",\n\"wascc-provider\",\n\"wasi-provider\",\n]\n@@ -2876,6 +2875,7 @@ version = \"0.1.0\"\ndependencies = [\n\"env_logger 0.7.1\",\n\"log\",\n+ \"tempfile\",\n\"wascc-codec\",\n]\n@@ -2895,6 +2895,7 @@ dependencies = [\n\"serde_derive\",\n\"tokio\",\n\"wascc-host\",\n+ \"wascc-logging\",\n]\n[[package]]\n"
},
{
"change_type": "MODIFY",
"old_path": "Cargo.toml",
"new_path": "Cargo.toml",
"diff": "@@ -21,7 +21,6 @@ env_logger = \"0.7.1\"\nkubelet = { path = \"./crates/kubelet\", version = \"0.1.0\", features = [\"cli\"] }\nwascc-provider = { path = \"./crates/wascc-provider\", version = \"0.1.0\" }\nwasi-provider = { path = \"./crates/wasi-provider\", version = \"0.1.0\" }\n-wascc-logging = { path = \"./crates/wascc-logging\", version = \"0.1.0\" }\noci-distribution = { path = \"./crates/oci-distribution\", version = \"0.1.0\" }\n[dev-dependencies]\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/lib.rs",
"new_path": "crates/kubelet/src/lib.rs",
"diff": "@@ -67,67 +67,3 @@ pub use pod::Pod;\npub use handle::{RuntimeHandle, PodHandle};\n#[doc(inline)]\npub use provider::Provider;\n\\ No newline at end of file\n- \"thiserror\",\n- \"wast 9.0.0\",\n-]\n-\n-[[package]]\n-name = \"ws2_32-sys\"\n-version = \"0.2.1\"\n-source = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e\"\n-dependencies = [\n- \"winapi 0.2.8\",\n- \"winapi-build\",\n-]\n-\n-[[package]]\n-name = \"www-authenticate\"\n-version = \"0.3.0\"\n-source = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"8c62efb8259cda4e4c732287397701237b78daa4c43edcf3e613c8503a6c07dd\"\n-dependencies = [\n- \"hyperx\",\n- \"unicase 1.4.2\",\n- \"url 1.7.2\",\n-]\n-\n-[[package]]\n-name = \"yaml-rust\"\n-version = \"0.4.3\"\n-source = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"65923dd1784f44da1d2c3dbbc5e822045628c590ba72123e1c73d3c230c4434d\"\n-dependencies = [\n- \"linked-hash-map\",\n-]\n-\n-[[package]]\n-name = \"yanix\"\n-version = \"0.12.0\"\n-source = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"d9a936d5291b6269cf230b50fb995c5d5ff6d775f8efaa75234b26b88e5e3e78\"\n-dependencies = [\n- \"bitflags\",\n- \"cfg-if\",\n- \"libc\",\n- \"log\",\n- \"thiserror\",\n-]\n-\n-[[package]]\n-name = \"zeroize\"\n-version = \"1.1.0\"\n-source = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8\"\n-\n-[[package]]\n-name = \"zstd\"\n-version = \"0.5.1+zstd.1.4.4\"\n-source = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"5c5d978b793ae64375b80baf652919b148f6a496ac8802922d9999f5a553194f\"\n-dependencies = [\n- \"zstd-safe\",\n-]\n-\n-[[package]]\n-name = \"zstd-safe\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "crates/wascc-logging/Cargo.toml",
"diff": "+[package]\n+name = \"wascc-logging\"\n+version = \"0.1.0\"\n+authors = [\n+ \"Matt Butcher <[email protected]>\",\n+ \"Matthew Fisher <[email protected]>\",\n+ \"Radu Matei <[email protected]>\",\n+ \"Taylor Thomas <[email protected]>\",\n+ \"Brian Ketelsen <[email protected]>\",\n+ \"Brian Hardock <[email protected]>\",\n+ \"Ryan Levick <[email protected]>\",\n+]\n+edition = \"2018\"\n+\n+\n+[lib]\n+crate-type = [\"cdylib\", \"rlib\"]\n+\n+[dependencies]\n+wascc-codec = \"0.5.2\"\n+log = \"0.4.8\"\n+env_logger = \"0.7.1\"\n+tempfile = \"3.1\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "crates/wascc-logging/src/lib.rs",
"diff": "+// Copyright 2015-2019 Capital One Services, LLC\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#[macro_use]\n+extern crate wascc_codec as codec;\n+\n+use codec::capabilities::{CapabilityProvider, Dispatcher, NullDispatcher};\n+use codec::core::{CapabilityConfiguration, OP_CONFIGURE, OP_REMOVE_ACTOR};\n+use codec::{\n+ deserialize,\n+ logging::{WriteLogRequest, OP_LOG},\n+};\n+\n+#[macro_use]\n+extern crate log;\n+\n+use std::error::Error;\n+use std::sync::RwLock;\n+\n+capability_provider!(LoggingProvider, LoggingProvider::new);\n+\n+pub const LOG_PATH_KEY: &str = \"LOG_PATH\";\n+const CAPABILITY_ID: &str = \"wascc:logging\";\n+\n+const ERROR: usize = 1;\n+const WARN: usize = 2;\n+const INFO: usize = 3;\n+const DEBUG: usize = 4;\n+const TRACE: usize = 5;\n+/// LoggingProvider provides an implementation of the wascc:logging capability\n+/// that keeps separate log output for each actor.\n+pub struct LoggingProvider {\n+ dispatcher: RwLock<Box<dyn Dispatcher>>,\n+}\n+\n+impl Default for LoggingProvider {\n+ fn default() -> Self {\n+ env_logger::init();\n+\n+ LoggingProvider {\n+ dispatcher: RwLock::new(Box::new(NullDispatcher::new())),\n+ }\n+ }\n+}\n+\n+impl LoggingProvider {\n+ pub fn new() -> Self {\n+ Self::default()\n+ }\n+\n+ fn configure(\n+ &self,\n+ config: CapabilityConfiguration,\n+ ) -> Result<Vec<u8>, Box<dyn Error>> {\n+ // let conf: CapabilityConfiguration = config.into();\n+ trace!(\"configuring {} for {:?}\", CAPABILITY_ID, config.module);\n+ Ok(vec![])\n+ }\n+}\n+\n+impl CapabilityProvider for LoggingProvider {\n+ fn capability_id(&self) -> &'static str {\n+ CAPABILITY_ID\n+ }\n+\n+ // Invoked by the runtime host to give this provider plugin the ability to communicate\n+ // with actors\n+ fn configure_dispatch(&self, dispatcher: Box<dyn Dispatcher>) -> Result<(), Box<dyn Error>> {\n+ let mut lock = self.dispatcher.write().unwrap();\n+ *lock = dispatcher;\n+\n+ Ok(())\n+ }\n+\n+ fn name(&self) -> &'static str {\n+ \"waSCC Logging Provider\"\n+ }\n+\n+ // Invoked by host runtime to allow an actor to make use of the capability\n+ // All providers MUST handle the \"configure\" message, even if no work will be done\n+ fn handle_call(&self, actor: &str, op: &str, msg: &[u8]) -> Result<Vec<u8>, Box<dyn Error>> {\n+ // TIP: do not allow individual modules to attempt to send configuration,\n+ // only accept it from the host runtime\n+ if op == OP_CONFIGURE && actor == \"system\" {\n+ let cfg_vals = deserialize::<CapabilityConfiguration>(msg)?;\n+ self.configure(cfg_vals)\n+ } else if op == OP_REMOVE_ACTOR && actor == \"system\" {\n+ let cfg_vals = deserialize::<CapabilityConfiguration>(msg)?;\n+ info!(\"Removing actor configuration for {}\", cfg_vals.module);\n+ // tear down stuff here\n+ Ok(vec![])\n+ } else if op == OP_LOG {\n+ let log_msg = deserialize::<WriteLogRequest>(msg)?;\n+ match log_msg.level {\n+ ERROR => error!(\"[{}] {}\", actor, log_msg.body),\n+ WARN => warn!(\"[{}] {}\", actor, log_msg.body),\n+ INFO => info!(\"[{}] {}\", actor, log_msg.body),\n+ DEBUG => debug!(\"[{}] {}\", actor, log_msg.body),\n+ TRACE => trace!(\"[{}] {}\", actor, log_msg.body),\n+ _ => error!(\"Unknown log level: {}\", log_msg.level),\n+ }\n+ Ok(vec![])\n+ } else {\n+ Err(format!(\"Unknown operation: {}\", op).into())\n+ }\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/Cargo.toml",
"new_path": "crates/wascc-provider/Cargo.toml",
"diff": "@@ -23,6 +23,7 @@ kube = \"0.29.0\"\nkubelet = { path = \"../kubelet\", version = \"0.1.0\" }\ntokio = { version = \"0.2.11\", features = [\"fs\", \"macros\"] }\nchrono = { version = \"0.4\", features = [\"serde\"] }\n+wascc-logging = { path = \"../wascc-logging\", version = \"0.1.0\" }\n[dev-dependencies]\nk8s-openapi = { version = \"0.7.1\", features = [\"v1_17\"] }\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/lib.rs",
"new_path": "crates/wascc-provider/src/lib.rs",
"diff": "@@ -39,14 +39,22 @@ use kubelet::status::{ContainerStatus, Status};\nuse kubelet::{Pod, Provider};\nuse log::{debug, info};\nuse wascc_host::{host, Actor, NativeCapability};\n+use tokio::sync::RwLock;\n+\n+use wascc_logging::{LOG_PATH_KEY};\nuse std::collections::HashMap;\n+use std::path::{PathBuf, Path};\n+use std::sync::Arc;\nconst ACTOR_PUBLIC_KEY: &str = \"deislabs.io/wascc-action-key\";\nconst TARGET_WASM32_WASCC: &str = \"wasm32-wascc\";\n/// The name of the HTTP capability.\nconst HTTP_CAPABILITY: &str = \"wascc:http_server\";\n+const LOG_CAPABILITY: &str = \"wascc:logging\";\n+\n+const LOG_DIR_NAME: &str = \"wascc-logs\";\n#[cfg(target_os = \"linux\")]\nconst HTTP_LIB: &str = \"./lib/libwascc_httpsrv.so\";\n@@ -63,13 +71,18 @@ type EnvVars = std::collections::HashMap<String, String>;\n/// from Kubernetes.\n#[derive(Clone)]\npub struct WasccProvider<S> {\n+ handles: Arc<RwLock<HashMap<String, PodHandle<File>>>>,\nstore: S,\n+ log_path: PathBuf,\n}\nimpl<S: ModuleStore + Send + Sync> WasccProvider<S> {\n/// Returns a new wasCC provider configured to use the proper data directory\n/// (including creating it if necessary)\n- pub async fn new(store: S, _config: &kubelet::config::Config) -> anyhow::Result<Self> {\n+ pub async fn new(store: S, config: &kubelet::config::Config) -> anyhow::Result<Self> {\n+ let log_path = config.data_dir.to_path_buf().join(LOG_DIR_NAME);\n+ tokio::fs::create_dir_all(&log_path).await?;\n+\ntokio::task::spawn_blocking(|| {\nlet data = NativeCapability::from_file(HTTP_LIB).map_err(|e| {\nanyhow::anyhow!(\"Failed to read HTTP capability {}: {}\", HTTP_LIB, e)\n@@ -78,7 +91,11 @@ impl<S: ModuleStore + Send + Sync> WasccProvider<S> {\n.map_err(|e| anyhow::anyhow!(\"Failed to load HTTP capability: {}\", e))\n})\n.await??;\n- Ok(Self { store })\n+ Ok(Self {\n+ handles: Default::default(),\n+ store,\n+ log_path,\n+ })\n}\n}\n@@ -227,15 +244,7 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\n/// Run a WasCC module inside of the host, configuring it to handle HTTP requests.\n///\n/// This bootstraps an HTTP host, using the value of the env's `PORT` key to expose a port.\n-fn wascc_run_http(data: Vec<u8>, env: EnvVars, key: &str) -> anyhow::Result<()> {\n- let mut httpenv: HashMap<String, String> = HashMap::new();\n- httpenv.insert(\n- \"PORT\".into(),\n- env.get(\"PORT\")\n- .map(|a| a.to_string())\n- .unwrap_or_else(|| \"80\".to_string()),\n- );\n-\n+fn wascc_run_http(data: Vec<u8>, env: EnvVars, key: &str, log_path: &Path) -> anyhow::Result<()> {\nwascc_run(\ndata,\nkey,\n@@ -265,8 +274,14 @@ struct Capability {\n///\n/// The provided capabilities will be configured for this actor, but the capabilities\n/// must first be loaded into the host by some other process, such as register_native_capabilities().\n-fn wascc_run(data: Vec<u8>, key: &str, capabilities: Vec<Capability>) -> anyhow::Result<()> {\n+fn wascc_run(data: Vec<u8>, key: &str, capabilities: &mut Vec<Capability>, log_path: &Path) -> anyhow::Result<()> {\ninfo!(\"wascc run\");\n+ let mut logenv: HashMap<String, String> = HashMap::new();\n+ logenv.insert(LOG_PATH_KEY, log_path.to_str().unwrap().to_owned());\n+ capabilities.push(Capability {\n+ name: LOG_CAPABILITY,\n+ env: logenv,\n+ });\nlet load = Actor::from_bytes(data).map_err(|e| anyhow::anyhow!(\"Error loading WASM: {}\", e))?;\nhost::add_actor(load).map_err(|e| anyhow::anyhow!(\"Error adding actor: {}\", e))?;\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | plumbing for logging provider |
350,419 | 30.03.2020 15:34:40 | 14,400 | 4abbcdf56fa4b555e2f71cf4eca3f874a3054e8c | writing to a logger per actor | [
{
"change_type": "MODIFY",
"old_path": "Cargo.lock",
"new_path": "Cargo.lock",
"diff": "@@ -2321,6 +2321,17 @@ dependencies = [\n\"digest\",\n]\n+[[package]]\n+name = \"simplelog\"\n+version = \"0.7.5\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"bcacac97349a890d437921dfb23cbec52ab5b4752551cb637df2721371acd467\"\n+dependencies = [\n+ \"chrono\",\n+ \"log\",\n+ \"term\",\n+]\n+\n[[package]]\nname = \"slab\"\nversion = \"0.4.2\"\n@@ -2472,6 +2483,16 @@ dependencies = [\n\"winapi 0.3.8\",\n]\n+[[package]]\n+name = \"term\"\n+version = \"0.6.1\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5\"\n+dependencies = [\n+ \"dirs\",\n+ \"winapi 0.3.8\",\n+]\n+\n[[package]]\nname = \"term_size\"\nversion = \"0.3.1\"\n@@ -2873,8 +2894,8 @@ dependencies = [\nname = \"wascc-logging\"\nversion = \"0.1.0\"\ndependencies = [\n- \"env_logger 0.7.1\",\n\"log\",\n+ \"simplelog\",\n\"tempfile\",\n\"wascc-codec\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-logging/Cargo.toml",
"new_path": "crates/wascc-logging/Cargo.toml",
"diff": "@@ -19,5 +19,5 @@ crate-type = [\"cdylib\", \"rlib\"]\n[dependencies]\nwascc-codec = \"0.5.2\"\nlog = \"0.4.8\"\n-env_logger = \"0.7.1\"\n+simplelog = \"0.7\"\ntempfile = \"3.1\"\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-logging/src/lib.rs",
"new_path": "crates/wascc-logging/src/lib.rs",
"diff": "@@ -24,32 +24,40 @@ use codec::{\n#[macro_use]\nextern crate log;\n+use log::Log;\n+use std::collections::HashMap;\nuse std::error::Error;\n+use std::fs::File;\nuse std::sync::RwLock;\n+use simplelog::{Config, LevelFilter, WriteLogger};\n+\ncapability_provider!(LoggingProvider, LoggingProvider::new);\npub const LOG_PATH_KEY: &str = \"LOG_PATH\";\nconst CAPABILITY_ID: &str = \"wascc:logging\";\n-const ERROR: usize = 1;\n-const WARN: usize = 2;\n-const INFO: usize = 3;\n-const DEBUG: usize = 4;\n-const TRACE: usize = 5;\n+enum LogLevel {\n+ ERROR = 1,\n+ WARN,\n+ INFO,\n+ DEBUG,\n+ TRACE,\n+}\n+\n/// LoggingProvider provides an implementation of the wascc:logging capability\n/// that keeps separate log output for each actor.\npub struct LoggingProvider {\ndispatcher: RwLock<Box<dyn Dispatcher>>,\n+ output_map: RwLock<HashMap<String, Box<WriteLogger<File>>>>,\n}\nimpl Default for LoggingProvider {\nfn default() -> Self {\n- env_logger::init();\n-\nLoggingProvider {\ndispatcher: RwLock::new(Box::new(NullDispatcher::new())),\n+ output_map: RwLock::new(HashMap::new()),\n}\n}\n}\n@@ -59,12 +67,17 @@ impl LoggingProvider {\nSelf::default()\n}\n- fn configure(\n- &self,\n- config: CapabilityConfiguration,\n- ) -> Result<Vec<u8>, Box<dyn Error>> {\n- // let conf: CapabilityConfiguration = config.into();\n+ fn configure(&self, config: CapabilityConfiguration) -> Result<Vec<u8>, Box<dyn Error>> {\ntrace!(\"configuring {} for {:?}\", CAPABILITY_ID, config.module);\n+ let file = File::open(\n+ config\n+ .values\n+ .get(LOG_PATH_KEY)\n+ .ok_or(\"log file path was unspecified\")?,\n+ )?;\n+ let logger = WriteLogger::new(LevelFilter::Info, Config::default(), file);\n+ let mut output_map = self.output_map.write().unwrap();\n+ output_map.insert(config.module, logger);\nOk(vec![])\n}\n}\n@@ -102,14 +115,23 @@ impl CapabilityProvider for LoggingProvider {\nOk(vec![])\n} else if op == OP_LOG {\nlet log_msg = deserialize::<WriteLogRequest>(msg)?;\n- match log_msg.level {\n- ERROR => error!(\"[{}] {}\", actor, log_msg.body),\n- WARN => warn!(\"[{}] {}\", actor, log_msg.body),\n- INFO => info!(\"[{}] {}\", actor, log_msg.body),\n- DEBUG => debug!(\"[{}] {}\", actor, log_msg.body),\n- TRACE => trace!(\"[{}] {}\", actor, log_msg.body),\n- _ => error!(\"Unknown log level: {}\", log_msg.level),\n- }\n+ let output_map = self.output_map.read().unwrap();\n+ let logger = output_map\n+ .get(actor)\n+ .ok_or(format!(\"unable to find logger for actor {}\", actor))?;\n+ logger.log(\n+ &log::Record::builder()\n+ .args(format_args!(\"[{}] {}\", actor, log_msg.body))\n+ .level(match log_msg.level {\n+ x if x == LogLevel::ERROR as usize => log::Level::Error,\n+ x if x == LogLevel::WARN as usize => log::Level::Warn,\n+ x if x == LogLevel::INFO as usize => log::Level::Info,\n+ x if x == LogLevel::DEBUG as usize => log::Level::Debug,\n+ x if x == LogLevel::TRACE as usize => log::Level::Trace,\n+ _ => return Err(format!(\"Unknown log level {}\", log_msg.level).into()),\n+ })\n+ .build(),\n+ );\nOk(vec![])\n} else {\nErr(format!(\"Unknown operation: {}\", op).into())\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | writing to a logger per actor |
350,419 | 31.03.2020 13:21:19 | 14,400 | c82ed3370be9b8eb3eb29256de41ab069d00265f | add logging native capability | [
{
"change_type": "ADD",
"old_path": "crates/wascc-provider/lib/libwascc_logging.so",
"new_path": "crates/wascc-provider/lib/libwascc_logging.so",
"diff": "Binary files /dev/null and b/crates/wascc-provider/lib/libwascc_logging.so differ\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/lib.rs",
"new_path": "crates/wascc-provider/src/lib.rs",
"diff": "@@ -37,9 +37,11 @@ use kubelet::module_store::ModuleStore;\nuse kubelet::provider::NotImplementedError;\nuse kubelet::status::{ContainerStatus, Status};\nuse kubelet::{Pod, Provider};\n-use log::{debug, info};\n+use kubelet::PodHandle;\n+use log::{debug, info, warn};\nuse wascc_host::{host, Actor, NativeCapability};\nuse tokio::sync::RwLock;\n+use tokio::fs::File;\nuse wascc_logging::{LOG_PATH_KEY};\n@@ -58,9 +60,16 @@ const LOG_DIR_NAME: &str = \"wascc-logs\";\n#[cfg(target_os = \"linux\")]\nconst HTTP_LIB: &str = \"./lib/libwascc_httpsrv.so\";\n+\n+#[cfg(target_os = \"linux\")]\n+const LOG_LIB: &str = \"./lib/libwascc_logging.so\";\n+\n#[cfg(target_os = \"macos\")]\nconst HTTP_LIB: &str = \"./lib/libwascc_httpsrv.dylib\";\n+#[cfg(target_os = \"macos\")]\n+const LOG_LIB: &str = \"./lib/libwascc_logging.dylib\";\n+\n/// Kubernetes' view of environment variables is an unordered map of string to string.\ntype EnvVars = std::collections::HashMap<String, String>;\n@@ -84,11 +93,21 @@ impl<S: ModuleStore + Send + Sync> WasccProvider<S> {\ntokio::fs::create_dir_all(&log_path).await?;\ntokio::task::spawn_blocking(|| {\n+ warn!(\"Loading HTTP Capability\");\nlet data = NativeCapability::from_file(HTTP_LIB).map_err(|e| {\nanyhow::anyhow!(\"Failed to read HTTP capability {}: {}\", HTTP_LIB, e)\n})?;\nhost::add_native_capability(data)\n- .map_err(|e| anyhow::anyhow!(\"Failed to load HTTP capability: {}\", e))\n+ .map_err(|e| {\n+ anyhow::anyhow!(\"Failed to load HTTP capability: {}\", e)\n+ })?;\n+\n+ warn!(\"Loading LOG Capability\");\n+ let logdata = NativeCapability::from_file(LOG_LIB).map_err(|e| {\n+ anyhow::anyhow!(\"Failed to read LOG capability {}: {}\", LOG_LIB, e)\n+ })?;\n+ host::add_native_capability(logdata)\n+ .map_err(|e| anyhow::anyhow!(\"Failed to load LOG capability: {}\", e))\n})\n.await??;\nOk(Self {\n@@ -165,8 +184,9 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\nlet module_data = modules\n.remove(&container.name)\n.expect(\"FATAL ERROR: module map not properly populated\");\n+ let lp = self.log_path.clone();\nlet http_result =\n- tokio::task::spawn_blocking(move || wascc_run_http(module_data, env, &pub_key))\n+ tokio::task::spawn_blocking(move || wascc_run_http(module_data, env, &pub_key, &lp))\n.await?;\nmatch http_result {\nOk(_) => {\n@@ -245,13 +265,17 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\n///\n/// This bootstraps an HTTP host, using the value of the env's `PORT` key to expose a port.\nfn wascc_run_http(data: Vec<u8>, env: EnvVars, key: &str, log_path: &Path) -> anyhow::Result<()> {\n+ let mut caps: Vec<Capability> = Vec::new();\n+\n+ caps.push(Capability {\n+ name: HTTP_CAPABILITY,\n+ env: env,\n+ });\nwascc_run(\ndata,\nkey,\n- vec![Capability {\n- name: HTTP_CAPABILITY,\n- env,\n- }],\n+ &mut caps,\n+ log_path,\n)\n}\n@@ -277,7 +301,7 @@ struct Capability {\nfn wascc_run(data: Vec<u8>, key: &str, capabilities: &mut Vec<Capability>, log_path: &Path) -> anyhow::Result<()> {\ninfo!(\"wascc run\");\nlet mut logenv: HashMap<String, String> = HashMap::new();\n- logenv.insert(LOG_PATH_KEY, log_path.to_str().unwrap().to_owned());\n+ logenv.insert(LOG_PATH_KEY.to_string(), log_path.to_str().unwrap().to_owned());\ncapabilities.push(Capability {\nname: LOG_CAPABILITY,\nenv: logenv,\n@@ -328,13 +352,19 @@ mod test {\n#[test]\nfn test_wascc_run() {\n+\n+ use std::path::PathBuf;\n// Open file\nlet data = std::fs::read(\"./testdata/echo.wasm\").expect(\"read the wasm file\");\n+\n+ let log_path = PathBuf::from(r\"~/.krustlet\");\n+\n// Send into wascc_run\nwascc_run_http(\ndata,\nEnvVars::new(),\n\"MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2\",\n+ &log_path,\n)\n.expect(\"successfully executed a WASM\");\n@@ -351,15 +381,17 @@ mod test {\nlet key = \"MDAYLDTOZEHQFPB3CL5PAFY5UTNCW32P54XGWYX3FOM2UBRYNCP3I3BF\";\n+ let log_path = PathBuf::from(r\"~/.krustlet\");\nlet wasm = std::fs::read(\"./testdata/echo_actor_s.wasm\").expect(\"load echo WASM\");\n// TODO: use wascc_run to execute echo_actor\nwascc_run(\nwasm,\nkey,\n- vec![Capability {\n+ &mut vec![Capability {\nname: \"wok:echoProvider\",\nenv: EnvVars::new(),\n}],\n+ &log_path,\n)\n.expect(\"completed echo run\")\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "justfile",
"new_path": "justfile",
"diff": "@@ -19,7 +19,7 @@ test:\ntest-e2e:\ncargo test --test integration_tests\n-run-wascc: _cleanup_kube bootstrap-ssl\n+run-wascc: _copy_log_lib _cleanup_kube bootstrap-ssl\n@# Change directories so we have access to the ./lib dir\ncd ./crates/wascc-provider && cargo run --bin krustlet-wascc --manifest-path ../../Cargo.toml -- --node-name krustlet-wascc --port 3000\n@@ -39,6 +39,10 @@ bootstrap-ssl:\n@test -f $(eval echo $KEY_DIR)/certificate.pfx || openssl pkcs12 -export -out $(eval echo $KEY_DIR)/certificate.pfx -inkey $(eval echo $KEY_DIR)/host.key -in $(eval echo $KEY_DIR)/host.cert -password \"pass:${PFX_PASSWORD}\"\n@chmod 400 $(eval echo $KEY_DIR)/*\n+_copy_log_lib:\n+ cp target/debug/libwascc_logging.so crates/wascc-provider/lib/\n+ #cp target/debug/libwascc_logging.dylib crates/wascc-provider/lib/\n+\n_cleanup_kube:\nkubectl delete node krustlet-wasi krustlet-wascc || true\nkubectl delete --all pods --namespace=default || true\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | add logging native capability |
350,419 | 31.03.2020 14:13:54 | 14,400 | f06c12e2124c708ec6777ac6ff7e05077ab059d2 | loading wascc module and running it, but apparently no logs yet | [
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/lib.rs",
"new_path": "crates/wascc-provider/src/lib.rs",
"diff": "@@ -243,6 +243,7 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\n}\nasync fn delete(&self, pod: Pod, _client: APIClient) -> anyhow::Result<()> {\n+ // TODO: this isn't the correct public key\nlet pub_key = pod\n.annotations()\n.get(ACTOR_PUBLIC_KEY)\n@@ -307,11 +308,12 @@ fn wascc_run(data: Vec<u8>, key: &str, capabilities: &mut Vec<Capability>, log_p\nenv: logenv,\n});\nlet load = Actor::from_bytes(data).map_err(|e| anyhow::anyhow!(\"Error loading WASM: {}\", e))?;\n+ let pk = load.public_key();\nhost::add_actor(load).map_err(|e| anyhow::anyhow!(\"Error adding actor: {}\", e))?;\ncapabilities.iter().try_for_each(|cap| {\ninfo!(\"configuring capability {}\", cap.name);\n- host::configure(key, cap.name, cap.env.clone())\n+ host::configure(&pk, cap.name, cap.env.clone())\n.map_err(|e| anyhow::anyhow!(\"Error configuring capabilities for module: {}\", e))\n})?;\ninfo!(\"Instance executing\");\n"
},
{
"change_type": "ADD",
"old_path": "crates/wascc-provider/testdata/testlogging_signed.wasm",
"new_path": "crates/wascc-provider/testdata/testlogging_signed.wasm",
"diff": "Binary files /dev/null and b/crates/wascc-provider/testdata/testlogging_signed.wasm differ\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/greet-wascc.yaml",
"new_path": "examples/greet-wascc.yaml",
"diff": "@@ -6,7 +6,7 @@ metadata:\ndeislabs.io/wascc-action-key: MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2\nspec:\ncontainers:\n- - image: webassembly.azurecr.io/hello-wasm:v1\n+ - image: webassembly.azurecr.io/greet-wascc:v0.2\nimagePullPolicy: Always\nname: greet\nports:\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | loading wascc module and running it, but apparently no logs yet |
350,419 | 31.03.2020 16:12:41 | 14,400 | 116fbfbb266f60afc79f880605bd133e02b146b9 | logging works on linux, still missing much | [
{
"change_type": "MODIFY",
"old_path": "crates/wascc-logging/src/lib.rs",
"new_path": "crates/wascc-logging/src/lib.rs",
"diff": "@@ -28,7 +28,7 @@ use log::Log;\nuse std::collections::HashMap;\nuse std::error::Error;\n-use std::fs::File;\n+use std::fs::{OpenOptions, File};\nuse std::sync::RwLock;\nuse simplelog::{Config, LevelFilter, WriteLogger};\n@@ -68,14 +68,20 @@ impl LoggingProvider {\n}\nfn configure(&self, config: CapabilityConfiguration) -> Result<Vec<u8>, Box<dyn Error>> {\n- trace!(\"configuring {} for {:?}\", CAPABILITY_ID, config.module);\n- let file = File::open(\n- config\n+ println!(\"CONFIGURE\");\n+ println!(\"{}\",config.module);\n+ println!(\"configuring {} for {:?}\", CAPABILITY_ID, config.module);\n+ let fp = config\n.values\n.get(LOG_PATH_KEY)\n- .ok_or(\"log file path was unspecified\")?,\n- )?;\n- let logger = WriteLogger::new(LevelFilter::Info, Config::default(), file);\n+ .ok_or(\"log file path was unspecified\")?;\n+\n+ println!(\"file path{}\", fp);\n+ let file = OpenOptions::new()\n+ .write(true)\n+ .open(fp)?;\n+ println!(\"Opened log file {}\", fp);\n+ let logger = WriteLogger::new(LevelFilter::Trace, Config::default(), file);\nlet mut output_map = self.output_map.write().unwrap();\noutput_map.insert(config.module, logger);\nOk(vec![])\n@@ -97,7 +103,7 @@ impl CapabilityProvider for LoggingProvider {\n}\nfn name(&self) -> &'static str {\n- \"waSCC Logging Provider\"\n+ \"krustlet Logging Provider\"\n}\n// Invoked by host runtime to allow an actor to make use of the capability\n@@ -106,15 +112,19 @@ impl CapabilityProvider for LoggingProvider {\n// TIP: do not allow individual modules to attempt to send configuration,\n// only accept it from the host runtime\nif op == OP_CONFIGURE && actor == \"system\" {\n+ println!(\"Received configure call {}\", actor);\nlet cfg_vals = deserialize::<CapabilityConfiguration>(msg)?;\nself.configure(cfg_vals)\n} else if op == OP_REMOVE_ACTOR && actor == \"system\" {\nlet cfg_vals = deserialize::<CapabilityConfiguration>(msg)?;\n- info!(\"Removing actor configuration for {}\", cfg_vals.module);\n+ println!(\"Removing actor configuration for {}\", cfg_vals.module);\n// tear down stuff here\nOk(vec![])\n} else if op == OP_LOG {\n+ println!(\"Received log call {}\", actor);\nlet log_msg = deserialize::<WriteLogRequest>(msg)?;\n+\n+ println!(\"[Level:{}] {}\", log_msg.level, log_msg.body);\nlet output_map = self.output_map.read().unwrap();\nlet logger = output_map\n.get(actor)\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/Cargo.toml",
"new_path": "crates/wascc-provider/Cargo.toml",
"diff": "@@ -15,7 +15,7 @@ edition = \"2018\"\n[dependencies]\nanyhow = \"1.0\"\nasync-trait = \"0.1.24\"\n-wascc-host = \"0.5.2\"\n+wascc-host = \"0.5.3\"\nlog = \"0.4\"\nserde = \"1.0\"\nserde_derive = \"1.0\"\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/lib/libwascc_logging.so",
"new_path": "crates/wascc-provider/lib/libwascc_logging.so",
"diff": "Binary files a/crates/wascc-provider/lib/libwascc_logging.so and b/crates/wascc-provider/lib/libwascc_logging.so differ\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/lib.rs",
"new_path": "crates/wascc-provider/src/lib.rs",
"diff": "@@ -154,6 +154,8 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\n// vars and suck it out of there. But that violates the intention\n// of env vars, which is to communicate _into_ the runtime, not to\n// configure the runtime.\n+ // TODO: This isn't the pub-key you're looking for, remove annotations from the pod spec\n+ // pull it from the actor\nlet pub_key = pod.get_annotation(ACTOR_PUBLIC_KEY).unwrap_or_default();\ndebug!(\"{:?}\", pub_key);\n@@ -301,14 +303,19 @@ struct Capability {\n/// must first be loaded into the host by some other process, such as register_native_capabilities().\nfn wascc_run(data: Vec<u8>, key: &str, capabilities: &mut Vec<Capability>, log_path: &Path) -> anyhow::Result<()> {\ninfo!(\"wascc run\");\n+ let load = Actor::from_bytes(data).map_err(|e| anyhow::anyhow!(\"Error loading WASM: {}\", e))?;\n+ let pk = load.public_key();\n+\nlet mut logenv: HashMap<String, String> = HashMap::new();\n- logenv.insert(LOG_PATH_KEY.to_string(), log_path.to_str().unwrap().to_owned());\n+ let actor_path = log_path.join(pk.clone());\n+ std::fs::create_dir_all(&actor_path).map_err(|e| anyhow::anyhow!(\"error creating directory: {}\", e))?;\n+ let actor_log_path = log_path.join(pk.clone()).join(\"log.txt\");\n+ let _ = std::fs::File::create(&actor_log_path).map_err(|e| anyhow::anyhow!(\"error creating directory: {}\", e))?;\n+ logenv.insert(LOG_PATH_KEY.to_string(), actor_log_path.to_str().unwrap().to_owned());\ncapabilities.push(Capability {\nname: LOG_CAPABILITY,\nenv: logenv,\n});\n- let load = Actor::from_bytes(data).map_err(|e| anyhow::anyhow!(\"Error loading WASM: {}\", e))?;\n- let pk = load.public_key();\nhost::add_actor(load).map_err(|e| anyhow::anyhow!(\"Error adding actor: {}\", e))?;\ncapabilities.iter().try_for_each(|cap| {\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/testdata/testlogging_signed.wasm",
"new_path": "crates/wascc-provider/testdata/testlogging_signed.wasm",
"diff": "Binary files a/crates/wascc-provider/testdata/testlogging_signed.wasm and b/crates/wascc-provider/testdata/testlogging_signed.wasm differ\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/greet-wascc.yaml",
"new_path": "examples/greet-wascc.yaml",
"diff": "@@ -6,7 +6,7 @@ metadata:\ndeislabs.io/wascc-action-key: MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2\nspec:\ncontainers:\n- - image: webassembly.azurecr.io/greet-wascc:v0.2\n+ - image: webassembly.azurecr.io/greet-wascc:v0.3\nimagePullPolicy: Always\nname: greet\nports:\n"
},
{
"change_type": "MODIFY",
"old_path": "justfile",
"new_path": "justfile",
"diff": "@@ -7,6 +7,9 @@ run: run-wascc\nbuild:\ncargo build\n+build-logging:\n+ cd crates/wascc-logging && cargo build\n+\nprefetch:\ncargo fetch --manifest-path ./Cargo.toml\n@@ -19,7 +22,7 @@ test:\ntest-e2e:\ncargo test --test integration_tests\n-run-wascc: _copy_log_lib _cleanup_kube bootstrap-ssl\n+run-wascc: build build-logging _copy_log_lib _cleanup_kube bootstrap-ssl\n@# Change directories so we have access to the ./lib dir\ncd ./crates/wascc-provider && cargo run --bin krustlet-wascc --manifest-path ../../Cargo.toml -- --node-name krustlet-wascc --port 3000\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | logging works on linux, still missing much |
350,419 | 02.04.2020 14:01:12 | 14,400 | aa363eadd08a2600d161bad765f9f5985b2c7710 | it works, get off my back | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/handle.rs",
"new_path": "crates/kubelet/src/handle.rs",
"diff": "@@ -190,3 +190,15 @@ impl<R: AsyncRead + AsyncSeek + Unpin, S: Stop> PodHandle<R, S> {\nOk(())\n}\n}\n+\n+/// Generates a unique human readable key for storing a handle to a pod in a\n+/// hash. This is a convenience wrapper around [crate::pod_key].\n+pub fn key_from_pod(pod: &Pod) -> String {\n+ pod_key(pod.namespace(), pod.name())\n+}\n+\n+// Generates a unique human readable key for storing a handle to a pod if you\n+// already have the namespace and pod name.\n+pub fn pod_key<N: AsRef<str>, T: AsRef<str>>(namespace: N, pod_name: T) -> String {\n+ format!(\"{}:{}\", namespace.as_ref(), pod_name.as_ref())\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/lib/libwascc_logging.so",
"new_path": "crates/wascc-provider/lib/libwascc_logging.so",
"diff": "Binary files a/crates/wascc-provider/lib/libwascc_logging.so and b/crates/wascc-provider/lib/libwascc_logging.so differ\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/lib.rs",
"new_path": "crates/wascc-provider/src/lib.rs",
"diff": "//! use kubelet::module_store::FileModuleStore;\n//! use wascc_provider::WasccProvider;\n//!\n-//! async {\n+//! #[tokio::main]\n+//! async fn main() {\n//! // Get a configuration for the Kubelet\n//! let kubelet_config = Config::default();\n//! let client = oci_distribution::Client::default();\n//! let store = FileModuleStore::new(client, &std::path::PathBuf::from(\"\"));\n//!\n+//! // Instantiate the provider type\n+//! let provider = WasccProvider::new(store, &kubelet_config).await.unwrap();\n+//!\n//! // Load a kubernetes configuration\n//! let kubeconfig = kube::config::load_kube_config().await.unwrap();\n//!\n-//! // Instantiate the provider type\n-//! let provider = WasccProvider::new(store, &kubelet_config, kubeconfig.clone()).await.unwrap();\n-//!\n//! // Instantiate the Kubelet\n//! let kubelet = Kubelet::new(provider, kubeconfig, kubelet_config);\n//! // Start the Kubelet and block on it\n//! kubelet.start().await.unwrap();\n-//! };\n+//! }\n//! ```\n#![warn(missing_docs)]\n-mod wascc_runtime;\nuse async_trait::async_trait;\n-use kubelet::handle::{RuntimeHandle, Stop};\n+use kube::client::Client;\nuse kubelet::module_store::ModuleStore;\n-use kubelet::provider::NotImplementedError;\n+use kubelet::provider::ProviderError;\nuse kubelet::status::{ContainerStatus, Status};\n-use kubelet::PodHandle;\nuse kubelet::{Pod, Provider};\n-\n+use kubelet::handle::{PodHandle, RuntimeHandle, Stop, key_from_pod, pod_key};\nuse log::{error,debug, info, warn};\n-use tokio::fs::File;\n+use wascc_host::{host, Actor, NativeCapability};\nuse tokio::sync::RwLock;\n-use tokio::task::JoinHandle;\n-\n-use wascc_host::{host, Actor};\n+use tokio::fs::File;\n+use tokio::sync::watch::{self, Receiver};\n+use tempfile::NamedTempFile;\n-use wascc_runtime::{HandleStopper, WasccRuntime};\n+use wascc_logging::{LOG_PATH_KEY};\nuse std::collections::HashMap;\n-use std::path::{Path, PathBuf};\n+use std::path::{PathBuf, Path};\nuse std::sync::Arc;\nconst ACTOR_PUBLIC_KEY: &str = \"deislabs.io/wascc-action-key\";\nconst TARGET_WASM32_WASCC: &str = \"wasm32-wascc\";\n+/// The name of the HTTP capability.\n+const HTTP_CAPABILITY: &str = \"wascc:http_server\";\n+const LOG_CAPABILITY: &str = \"wascc:logging\";\n+\nconst LOG_DIR_NAME: &str = \"wascc-logs\";\n-/// WasccProvider provides a Kubelet runtime implementation that executes WASM\n-/// binaries conforming to the waSCC spec\n+#[cfg(target_os = \"linux\")]\n+const HTTP_LIB: &str = \"./lib/libwascc_httpsrv.so\";\n+\n+#[cfg(target_os = \"linux\")]\n+const LOG_LIB: &str = \"./lib/libwascc_logging.so\";\n+\n+#[cfg(target_os = \"macos\")]\n+const HTTP_LIB: &str = \"./lib/libwascc_httpsrv.dylib\";\n+\n+#[cfg(target_os = \"macos\")]\n+const LOG_LIB: &str = \"./lib/libwascc_logging.dylib\";\n+\n+/// Kubernetes' view of environment variables is an unordered map of string to string.\n+type EnvVars = std::collections::HashMap<String, String>;\n+\n+/// A [kubelet::handle::Stop] implementation for a wascc actor\n+pub struct ActorStopper {\n+ pub key: String,\n+}\n+\n+#[async_trait::async_trait]\n+impl Stop for ActorStopper {\n+ async fn stop(&mut self) -> anyhow::Result<()> {\n+ debug!(\"stopping wascc instance {}\", self.key);\n+ host::remove_actor(&self.key).map_err(|e| anyhow::anyhow!(\"unable to remove actor: {:?}\", e))\n+ }\n+\n+ async fn wait(&mut self) -> anyhow::Result<()> {\n+ // TODO: Figure out if there is a way to wait for an actor to be removed\n+ Ok(())\n+ }\n+}\n+\n+/// WasccProvider provides a Kubelet runtime implementation that executes WASM binaries.\n+///\n+/// Currently, this runtime uses WASCC as a host, loading the primary container as an actor.\n+/// TODO: In the future, we will look at loading capabilities using the \"sidecar\" metaphor\n+/// from Kubernetes.\n#[derive(Clone)]\npub struct WasccProvider<S> {\n- handles: Arc<RwLock<HashMap<String, PodHandle<File, HandleStopper>>>>,\n+ handles: Arc<RwLock<HashMap<String, PodHandle<File, ActorStopper>>>>,\nstore: S,\nlog_path: PathBuf,\nkubeconfig: kube::config::Configuration,\n}\nimpl<S: ModuleStore + Send + Sync> WasccProvider<S> {\n- /// Create a new wasi provider from a module store and a kubelet config\n- pub async fn new(\n- store: S,\n- config: &kubelet::config::Config,\n- kubeconfig: kube::config::Configuration,\n- ) -> anyhow::Result<Self> {\n+ /// Returns a new wasCC provider configured to use the proper data directory\n+ /// (including creating it if necessary)\n+ pub async fn new(store: S, config: &kubelet::config::Config, kubeconfig: kube::config::Configuration) -> anyhow::Result<Self> {\nlet log_path = config.data_dir.to_path_buf().join(LOG_DIR_NAME);\ntokio::fs::create_dir_all(&log_path).await?;\n+\n+ tokio::task::spawn_blocking(|| {\n+ warn!(\"Loading HTTP Capability\");\n+ let data = NativeCapability::from_file(HTTP_LIB).map_err(|e| {\n+ anyhow::anyhow!(\"Failed to read HTTP capability {}: {}\", HTTP_LIB, e)\n+ })?;\n+ host::add_native_capability(data)\n+ .map_err(|e| {\n+ anyhow::anyhow!(\"Failed to load HTTP capability: {}\", e)\n+ })?;\n+\n+ warn!(\"Loading LOG Capability\");\n+ let logdata = NativeCapability::from_file(LOG_LIB).map_err(|e| {\n+ anyhow::anyhow!(\"Failed to read LOG capability {}: {}\", LOG_LIB, e)\n+ })?;\n+ host::add_native_capability(logdata)\n+ .map_err(|e| anyhow::anyhow!(\"Failed to load LOG capability: {}\", e))\n+ })\n+ .await??;\nOk(Self {\nhandles: Default::default(),\nstore,\n@@ -88,7 +143,6 @@ impl<S: ModuleStore + Send + Sync> WasccProvider<S> {\n#[async_trait]\nimpl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\nconst ARCH: &'static str = TARGET_WASM32_WASCC;\n-\nfn can_schedule(&self, pod: &Pod) -> bool {\n// If there is a node selector and it has arch set to wasm32-wascc, we can\n// schedule it.\n@@ -101,35 +155,84 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\n}\nasync fn add(&self, pod: Pod) -> anyhow::Result<()> {\n- let pod_name = pod.name();\n- let mut container_handles = HashMap::new();\n-\n+ // To run an Add event, we load the WASM, update the pod status to Running,\n+ // and then execute the WASM, passing in the relevant data.\n+ // When the pod finishes, we update the status to Succeeded unless it\n+ // produces an error, in which case we mark it Failed.\n+ debug!(\"Pod added {:?}\", pod.name());\n+ // This would lock us into one wascc actor per pod. I don't know if\n+ // that is a good thing. Other containers would then be limited\n+ // to acting as components... which largely follows the sidecar\n+ // pattern.\n+ //\n+ // Another possibility is to embed the key in the image reference\n+ // (image/foo.wasm@ed25519:PUBKEY). That might work best, but it is\n+ // not terribly useable.\n+ //\n+ // A really icky one would be to just require the pubkey in the env\n+ // vars and suck it out of there. But that violates the intention\n+ // of env vars, which is to communicate _into_ the runtime, not to\n+ // configure the runtime.\n+\n+ // TODO: Implement this for real.\n+ //\n+ // What it should do:\n+ // - for each volume\n+ // - set up the volume map\n+ // - for each init container:\n+ // - set up the runtime\n+ // - mount any volumes (popen)\n+ // - run it to completion\n+ // - bail with an error if it fails\n+ // - for each container and ephemeral_container\n+ // - set up the runtime\n+ // - mount any volumes (popen)\n+ // - run it to completion\n+ // - bail if it errors\n+\n+ info!(\"Starting containers for pod {:?}\", pod.name());\nlet mut modules = self.store.fetch_pod_modules(&pod).await?;\n+ let mut container_handles = HashMap::new();\nlet client = kube::Client::from(self.kubeconfig.clone());\n- info!(\"Starting containers for pod {:?}\", pod_name);\n-\nfor container in pod.containers() {\nlet env = Self::env_vars(&container, &pod, &client).await;\n+ debug!(\"Starting container {} on thread\", container.name);\n+\nlet module_data = modules\n.remove(&container.name)\n.expect(\"FATAL ERROR: module map not properly populated\");\n-\n- let runtime =\n- WasccRuntime::new(module_data, env, self.log_path.clone(),self.log_path.clone()).await?;\n-\n- debug!(\"Starting container {} on thread\", container.name);\n- let handle = runtime.start().await?;\n+ let lp = self.log_path.clone();\n+ let (status_sender, status_recv) = watch::channel(ContainerStatus::Waiting {\n+ timestamp: chrono::Utc::now(),\n+ message: \"No status has been received from the process\".into(),\n+ });\n+ let http_result =\n+ tokio::task::spawn_blocking(move || wascc_run_http(module_data, env, &lp, status_recv))\n+ .await?;\n+ match http_result {\n+ Ok(handle) => {\ncontainer_handles.insert(container.name.clone(), handle);\n+ status_sender.broadcast(ContainerStatus::Running {\n+ timestamp: chrono::Utc::now(),\n+ }).expect(\"status should be able to send\");\n+ }\n+ Err(e) => {\n+ status_sender.broadcast(ContainerStatus::Terminated {\n+ timestamp: chrono::Utc::now(),\n+ failed: true,\n+ message: format!(\"Error while starting container: {:?}\", e),\n+ }).expect(\"status should be able to send\");\n+ return Err(anyhow::anyhow!(\"Failed to run pod: {}\", e));\n+ }\n+ }\n}\ninfo!(\n\"All containers started for pod {:?}. Updating status\",\n- pod_name\n+ pod.name()\n);\n-\n// Wrap this in a block so the write lock goes out of scope when we are done\n{\n- // Grab the entry while we are creating things\nlet mut handles = self.handles.write().await;\nhandles.insert(\nkey_from_pod(&pod),\n@@ -154,7 +257,6 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\n}\nasync fn delete(&self, pod: Pod) -> anyhow::Result<()> {\n- // TODO: this isn't the correct public key\nlet mut handles = self.handles.write().await;\nif let Some(mut h) = handles.remove(&key_from_pod(&pod)) {\nh.stop().await.unwrap_or_else(|e| {\n@@ -179,24 +281,77 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\nasync fn logs(\n&self,\n- _namespace: String,\n- _pod_name: String,\n- _container_name: String,\n+ namespace: String,\n+ pod_name: String,\n+ container_name: String,\n) -> anyhow::Result<Vec<u8>> {\n- Err(NotImplementedError.into())\n+ let mut handles = self.handles.write().await;\n+ let handle = handles\n+ .get_mut(&pod_key(&namespace, &pod_name))\n+ .ok_or_else(|| ProviderError::PodNotFound {\n+ pod_name: pod_name.clone(),\n+ })?;\n+ let mut output = Vec::new();\n+ handle.output(&container_name, &mut output).await?;\n+ Ok(output)\n}\n}\n-/// Generates a unique human readable key for storing a handle to a pod\n-fn key_from_pod(pod: &Pod) -> String {\n- pod_key(pod.namespace(), pod.name())\n+/// Run a WasCC module inside of the host, configuring it to handle HTTP requests.\n+///\n+/// This bootstraps an HTTP host, using the value of the env's `PORT` key to expose a port.\n+fn wascc_run_http(data: Vec<u8>, env: EnvVars, log_path: &Path, status_recv: Receiver<ContainerStatus>) -> anyhow::Result<RuntimeHandle<File, ActorStopper>> {\n+ let mut caps: Vec<Capability> = Vec::new();\n+\n+ caps.push(Capability {\n+ name: HTTP_CAPABILITY,\n+ env: env,\n+ });\n+ wascc_run(\n+ data,\n+ &mut caps,\n+ log_path,\n+ status_recv,\n+ )\n}\n-fn pod_key<N: AsRef<str>, T: AsRef<str>>(namespace: N, pod_name: T) -> String {\n- format!(\"{}:{}\", namespace.as_ref(), pod_name.as_ref())\n+/// Capability describes a waSCC capability.\n+///\n+/// Capabilities are made available to actors through a two-part processthread:\n+/// - They must be registered\n+/// - For each actor, the capability must be configured\n+struct Capability {\n+ name: &'static str,\n+ env: EnvVars,\n}\n+/// Run the given WASM data as a waSCC actor with the given public key.\n+///\n+/// The provided capabilities will be configured for this actor, but the capabilities\n+/// must first be loaded into the host by some other process, such as register_native_capabilities().\n+fn wascc_run(data: Vec<u8>, capabilities: &mut Vec<Capability>, log_path: &Path, status_recv: Receiver<ContainerStatus>) -> anyhow::Result<RuntimeHandle<File, ActorStopper>> {\n+ info!(\"wascc run\");\n+\n+ let log_output = NamedTempFile::new_in(log_path)?;\n+ let mut logenv: HashMap<String, String> = HashMap::new();\n+ logenv.insert(LOG_PATH_KEY.to_string(), log_output.path().to_str().unwrap().to_owned());\n+ capabilities.push(Capability {\n+ name: LOG_CAPABILITY,\n+ env: logenv,\n+ });\n+ let load = Actor::from_bytes(data).map_err(|e| anyhow::anyhow!(\"Error loading WASM: {}\", e))?;\n+ let pk = load.public_key();\n+\n+ host::add_actor(load).map_err(|e| anyhow::anyhow!(\"Error adding actor: {}\", e))?;\n+ capabilities.iter().try_for_each(|cap| {\n+ info!(\"configuring capability {}\", cap.name);\n+ host::configure(&pk, cap.name, cap.env.clone())\n+ .map_err(|e| anyhow::anyhow!(\"Error configuring capabilities for module: {}\", e))\n+ })?;\n+ info!(\"Instance executing\");\n+ Ok(RuntimeHandle::new(tokio::fs::File::from_std(log_output.reopen()?), ActorStopper{key: pk}, status_recv))\n+}\n#[cfg(test)]\nmod test {\n@@ -232,6 +387,7 @@ mod test {\n#[test]\nfn test_wascc_run() {\n+\nuse std::path::PathBuf;\n// Open file\nlet data = std::fs::read(\"./testdata/echo.wasm\").expect(\"read the wasm file\");\n@@ -279,15 +435,7 @@ mod test {\nasync fn test_can_schedule() {\nlet store = TestStore::new(Default::default());\n- let wr = WasccProvider::new(\n- store,\n- &Default::default(),\n- kube::config::Configuration {\n- base_path: String::new(),\n- client: Default::default(),\n- default_ns: String::new(),\n- },\n- )\n+ let wr = WasccProvider::new(store, &Default::default())\n.await\n.unwrap();\nlet mock = Default::default();\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/wascc_runtime.rs",
"new_path": "crates/wascc-provider/src/wascc_runtime.rs",
"diff": "@@ -31,21 +31,19 @@ const HTTP_LIB: &str = \"./lib/libwascc_httpsrv.dylib\";\n#[cfg(target_os = \"macos\")]\nconst LOG_LIB: &str = \"./lib/libwascc_logging.dylib\";\n-pub struct HandleStopper {\n- pub handle: JoinHandle<anyhow::Result<()>>,\n+pub struct ActorStopper {\n+ pub key: String,\n}\n#[async_trait::async_trait]\n-impl Stop for HandleStopper {\n+impl Stop for ActorStopper {\nasync fn stop(&mut self) -> anyhow::Result<()> {\n- // TODO: Send an actual stop signal once there is support in wasmtime\n- warn!(\"There is currently no way to stop a running wasmtime instance. The pod will be deleted, but any long running processes will keep running\");\n- Ok(())\n+ debug!(\"stopping wascc instance {}\", self.key);\n+ host::remove_actor(self.key)\n}\nasync fn wait(&mut self) -> anyhow::Result<()> {\n- // Uncomment this and actually wait for the process to finish once we have a way to stop\n- // (&mut self.handle).await.unwrap()\n+ // TODO: Figure out if there is a way to wait for an actor to be removed\nOk(())\n}\n}\n@@ -129,7 +127,7 @@ impl WasccRuntime {\ntimestamp: chrono::Utc::now(),\nmessage: \"No status has been received from the process\".into(),\n});\n- let handle = spawn_wascc(self.module_data.to_vec(),self.env.clone(),status_sender).await;\n+ let handle = self.spawn_wascc(self.module_data.to_vec(),self.env.clone(),status_sender).await;\nOk(RuntimeHandle::new(\ntokio::fs::File::from_std(output_read),\n@@ -148,10 +146,11 @@ impl WasccRuntime {\n// channel. Due to the Instance type not being Send safe, all of the logic\n// needs to be done within the spawned task\nasync fn spawn_wascc(\n+ &self,\ndata: Vec<u8>,\nenv: HashMap<String,String>,\nstatus_sender: Sender<ContainerStatus>,\n- ) -> JoinHandle<anyhow::Result<()>> {\n+ ) -> anyhow::Result<()> {\n// Clone the module data Arc so it can be moved\nlet mut caps: Vec<Capability> = Vec::new();\n@@ -167,18 +166,13 @@ impl WasccRuntime {\nlet pk = load.public_key();\n//.unwrap() self.wascc_run(module_data.to_vec(), &pk, &mut caps).map_err(|e| anyhow::anyhow!(\"Error loading WASM: {}\", e)).unwrap();\n-\n- tokio::task::spawn_blocking(move || -> anyhow::Result<_> {\n- info!(\"wascc run\");\nlet load =\n- Actor::from_bytes(data).map_err(|e| anyhow::anyhow!(\"Error loading WASM: {}\", e))?;\n+ Actor::from_bytes(data).map_err(|e| anyhow::anyhow!(\"Error loading WASM: {}\", e)).unwrap();\nlet pk = load.public_key();\nlet mut logenv: HashMap<String, String> = HashMap::new();\n- let actor_path = self.log_path.join(pk.clone());\n-\n- let actor_log_path = actor_path.join(\"log.txt\");\n- std::fs::create_dir_all(&actor_path)\n+ let actor_log_path = self.log_path.join(pk.clone()).join(\"log.txt\");\n+ tokio::fs::create_dir_all(&actor_log_path).await\n.map_err(|e| anyhow::anyhow!(\"error creating directory: {}\", e))?;\nlogenv.insert(\nLOG_PATH_KEY.to_string(),\n@@ -189,6 +183,9 @@ impl WasccRuntime {\nname: LOG_CAPABILITY,\nenv: logenv,\n});\n+\n+ info!(\"beginning wascc run for: {}\", pk);\n+\nhost::add_actor(load).map_err(|e| anyhow::anyhow!(\"Error adding actor: {}\", e))?;\ncaps.iter().try_for_each(|cap| {\n@@ -206,7 +203,6 @@ impl WasccRuntime {\n})\n.expect(\"status should be able to send\");\nOk(())\n- })\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wasi-provider/src/lib.rs",
"new_path": "crates/wasi-provider/src/lib.rs",
"diff": "@@ -43,7 +43,7 @@ use log::{debug, error, info};\nuse tokio::fs::File;\nuse tokio::sync::RwLock;\n-use kubelet::handle::PodHandle;\n+use kubelet::handle::{PodHandle, key_from_pod, pod_key};\nuse wasi_runtime::{HandleStopper, WasiRuntime};\nconst TARGET_WASM32_WASI: &str = \"wasm32-wasi\";\n@@ -215,15 +215,6 @@ impl<S: ModuleStore + Send + Sync> Provider for WasiProvider<S> {\n}\n}\n-/// Generates a unique human readable key for storing a handle to a pod\n-fn key_from_pod(pod: &Pod) -> String {\n- pod_key(pod.namespace(), pod.name())\n-}\n-\n-fn pod_key<N: AsRef<str>, T: AsRef<str>>(namespace: N, pod_name: T) -> String {\n- format!(\"{}:{}\", namespace.as_ref(), pod_name.as_ref())\n-}\n-\n#[cfg(test)]\nmod test {\nuse super::*;\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | it works, get off my back |
350,419 | 02.04.2020 14:16:06 | 14,400 | 58c587d7f432da11fdb4e32f7c4a80922b4e4cd7 | clean up spurious writes | [
{
"change_type": "MODIFY",
"old_path": "crates/wascc-logging/src/lib.rs",
"new_path": "crates/wascc-logging/src/lib.rs",
"diff": "@@ -26,7 +26,6 @@ use codec::{\nlogging::{WriteLogRequest, OP_LOG},\n};\n-#[macro_use]\nextern crate log;\nuse log::Log;\n@@ -72,17 +71,12 @@ impl LoggingProvider {\n}\nfn configure(&self, config: CapabilityConfiguration) -> Result<Vec<u8>, Box<dyn Error>> {\n- println!(\"CONFIGURE\");\n- println!(\"{}\", config.module);\n- println!(\"configuring {} for {:?}\", CAPABILITY_ID, config.module);\nlet fp = config\n.values\n.get(LOG_PATH_KEY)\n.ok_or(\"log file path was unspecified\")?;\n- println!(\"file path{}\", fp);\nlet file = OpenOptions::new().write(true).open(fp)?;\n- println!(\"Opened log file {}\", fp);\nlet logger = WriteLogger::new(LevelFilter::Trace, Config::default(), file);\nlet mut output_map = self.output_map.write().unwrap();\noutput_map.insert(config.module, logger);\n@@ -114,19 +108,14 @@ impl CapabilityProvider for LoggingProvider {\n// TIP: do not allow individual modules to attempt to send configuration,\n// only accept it from the host runtime\nif op == OP_CONFIGURE && actor == \"system\" {\n- println!(\"Received configure call {}\", actor);\nlet cfg_vals = deserialize::<CapabilityConfiguration>(msg)?;\nself.configure(cfg_vals)\n} else if op == OP_REMOVE_ACTOR && actor == \"system\" {\n- let cfg_vals = deserialize::<CapabilityConfiguration>(msg)?;\n- println!(\"Removing actor configuration for {}\", cfg_vals.module);\n// tear down stuff here\nOk(vec![])\n} else if op == OP_LOG {\n- println!(\"Received log call {}\", actor);\nlet log_msg = deserialize::<WriteLogRequest>(msg)?;\n- println!(\"[Level:{}] {}\", log_msg.level, log_msg.body);\nlet output_map = self.output_map.read().unwrap();\nlet logger = output_map\n.get(actor)\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | clean up spurious writes |
350,419 | 03.04.2020 12:16:21 | 14,400 | 9dec53061a6f3fea39e9ccb79b6787ca0b8bbfc1 | fix integration test for wascc | [
{
"change_type": "MODIFY",
"old_path": "tests/integration_tests.rs",
"new_path": "tests/integration_tests.rs",
"diff": "@@ -54,7 +54,7 @@ async fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\n\"containers\": [\n{\n\"name\": \"hello-wascc\",\n- \"image\": \"webassembly.azurecr.io/hello-wasm:v1\",\n+ \"image\": \"webassembly.azurecr.io/hello-wascc:v0.3\",\n},\n],\n\"nodeSelector\": {\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | fix integration test for wascc |
350,419 | 03.04.2020 12:16:50 | 14,400 | f201cfc43e63dcc2f4f8cbb3ad28d9b43b56d854 | update podspec for labels, remove annotations | [
{
"change_type": "MODIFY",
"old_path": "examples/greet-wascc.yaml",
"new_path": "examples/greet-wascc.yaml",
"diff": "@@ -2,15 +2,18 @@ apiVersion: v1\nkind: Pod\nmetadata:\nname: greet\n- annotations:\n- deislabs.io/wascc-action-key: MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2\n+ labels:\n+ app: greet\nspec:\ncontainers:\n- image: webassembly.azurecr.io/greet-wascc:v0.3\nimagePullPolicy: Always\nname: greet\n+ env:\n+ - name: PORT\n+ value: \"8080\"\nports:\n- - containerPort: 80\n+ - containerPort: 8080\nnodeSelector:\nkubernetes.io/role: agent\nbeta.kubernetes.io/os: linux\n"
},
{
"change_type": "MODIFY",
"old_path": "examples/greet-wasi.yaml",
"new_path": "examples/greet-wasi.yaml",
"diff": "@@ -2,6 +2,8 @@ apiVersion: v1\nkind: Pod\nmetadata:\nname: greet\n+ labels:\n+ app: greet\nspec:\ncontainers:\n- image: webassembly.azurecr.io/hello-wasm:v1\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | update podspec for labels, remove annotations |
350,419 | 03.04.2020 14:19:29 | 14,400 | 1c2cbd1a0b5585d67d449cd96cddb830ff684f82 | update logging for review | [
{
"change_type": "MODIFY",
"old_path": "crates/wascc-logging/src/lib.rs",
"new_path": "crates/wascc-logging/src/lib.rs",
"diff": "@@ -44,9 +44,9 @@ pub const LOG_PATH_KEY: &str = \"LOG_PATH\";\nconst SYSTEM_ACTOR: &str = \"system\";\nconst CAPABILITY_ID: &str = \"wascc:logging\";\n-\nenum LogLevel {\n- ERROR = 1,\n+ NONE = 0,\n+ ERROR,\nWARN,\nINFO,\nDEBUG,\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | update logging for review |
350,419 | 03.04.2020 14:55:10 | 14,400 | 98e5efbec6116bedd414b56c7a108e517923c2f0 | remove copylib | [
{
"change_type": "MODIFY",
"old_path": "justfile",
"new_path": "justfile",
"diff": "@@ -19,7 +19,7 @@ test:\ntest-e2e:\ncargo test --test integration_tests\n-run-wascc: build build-logging _copy_log_lib _cleanup_kube bootstrap-ssl\n+run-wascc: build build-logging _cleanup_kube bootstrap-ssl\n@# Change directories so we have access to the ./lib dir\ncd ./crates/wascc-provider && cargo run --bin krustlet-wascc --manifest-path ../../Cargo.toml -- --node-name krustlet-wascc --port 3000\n@@ -33,9 +33,6 @@ bootstrap-ssl:\n@test -f $(eval echo $KEY_DIR)/certificate.pfx || openssl pkcs12 -export -out $(eval echo $KEY_DIR)/certificate.pfx -inkey $(eval echo $KEY_DIR)/host.key -in $(eval echo $KEY_DIR)/host.cert -password \"pass:${PFX_PASSWORD}\"\n@chmod 400 $(eval echo $KEY_DIR)/*\n-_copy_log_lib:\n- cp target/debug/libwascc_logging.so crates/wascc-provider/lib/\n- #cp target/debug/libwascc_logging.dylib crates/wascc-provider/lib/\n_cleanup_kube:\nkubectl delete --all pods --namespace=default || true\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | remove copylib |
350,419 | 03.04.2020 16:42:39 | 14,400 | b2cb7be31c5ca09e6a976dc96734da3f98f0bddf | bad container name | [
{
"change_type": "MODIFY",
"old_path": "tests/integration_tests.rs",
"new_path": "tests/integration_tests.rs",
"diff": "@@ -66,13 +66,13 @@ async fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\n\"apiVersion\": \"v1\",\n\"kind\": \"Pod\",\n\"metadata\": {\n- \"name\": \"hello-wascc\"\n+ \"name\": \"greet-wascc\"\n},\n\"spec\": {\n\"containers\": [\n{\n- \"name\": \"hello-wascc\",\n- \"image\": \"webassembly.azurecr.io/hello-wascc:v0.3\",\n+ \"name\": \"greet-wascc\",\n+ \"image\": \"webassembly.azurecr.io/greet-wascc:v0.3\",\n},\n],\n\"tolerations\": [\n@@ -93,7 +93,7 @@ async fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\nlet inf: Informer<Pod> = Informer::new(\nclient,\nListParams::default()\n- .fields(\"metadata.name=hello-wascc\")\n+ .fields(\"metadata.name=greet-wascc\")\n.timeout(10),\nResource::namespaced::<Pod>(\"default\"),\n);\n@@ -116,7 +116,7 @@ async fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\n}\nlet logs = pods\n- .logs(\"hello-wascc\", &LogParams::default())\n+ .logs(\"greet-wascc\", &LogParams::default())\n.await\n.expect(\"unable to get logs\");\nassert!(logs.contains(\"warn something\"));\n@@ -125,7 +125,7 @@ async fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\nassert!(logs.contains(\"error body\"));\n// cleanup\n- pods.delete(\"hello-wascc\", &DeleteParams::default()).await?;\n+ pods.delete(\"greet-wascc\", &DeleteParams::default()).await?;\nOk(())\n}\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | bad container name |
350,419 | 09.04.2020 11:21:25 | 14,400 | 9914ad0def338d77c8bca9e6c19b2cac87cba396 | add wascc demo | [
{
"change_type": "MODIFY",
"old_path": "Cargo.lock",
"new_path": "Cargo.lock",
"diff": "@@ -97,8 +97,8 @@ version = \"0.1.1\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"21705adc76bbe4bc98434890e73a89cd00c6015e5704a60bb6eea6c3b72316b6\"\ndependencies = [\n- \"quote\",\n- \"syn\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -282,9 +282,9 @@ version = \"0.2.1\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"4f00371942083469785f7e28c540164af1913ee7c96a4534acb9cea92c39f057\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -354,9 +354,9 @@ version = \"0.1.26\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"21a03abb7c9b93ae229356151a083d26218c0358866a2a59d4280c856e9482e6\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -969,9 +969,9 @@ version = \"0.99.5\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -1072,9 +1072,9 @@ source = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"bc4bfcfacb61d231109d1d55202c1f33263319668b168843e02ad4652725ec9c\"\ndependencies = [\n\"heck\",\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -1171,9 +1171,9 @@ version = \"0.1.7\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n\"synstructure\",\n]\n@@ -1321,9 +1321,9 @@ source = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7\"\ndependencies = [\n\"proc-macro-hack\",\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -1625,6 +1625,15 @@ dependencies = [\n\"winreg\",\n]\n+[[package]]\n+name = \"itertools\"\n+version = \"0.8.2\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484\"\n+dependencies = [\n+ \"either\",\n+]\n+\n[[package]]\nname = \"itoa\"\nversion = \"0.4.5\"\n@@ -2205,9 +2214,9 @@ version = \"0.4.8\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -2247,9 +2256,9 @@ source = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"18f33027081eba0a6d8aba6d1b1c3a3be58cbb12106341c2d5759fcd9b5277e7\"\ndependencies = [\n\"proc-macro-error-attr\",\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n\"version_check 0.9.1\",\n]\n@@ -2259,9 +2268,9 @@ version = \"0.4.12\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"8a5b4b77fdb63c1eca72173d68d24501c54ab1269409f6b672c85deb18af69de\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n\"syn-mid\",\n\"version_check 0.9.1\",\n]\n@@ -2278,13 +2287,56 @@ version = \"0.1.4\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694\"\n+[[package]]\n+name = \"proc-macro2\"\n+version = \"0.4.30\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759\"\n+dependencies = [\n+ \"unicode-xid 0.1.0\",\n+]\n+\n[[package]]\nname = \"proc-macro2\"\nversion = \"1.0.9\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435\"\ndependencies = [\n- \"unicode-xid\",\n+ \"unicode-xid 0.2.0\",\n+]\n+\n+[[package]]\n+name = \"prost\"\n+version = \"0.5.0\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"96d14b1c185652833d24aaad41c5832b0be5616a590227c1fbff57c616754b23\"\n+dependencies = [\n+ \"byteorder\",\n+ \"bytes 0.4.12\",\n+ \"prost-derive\",\n+]\n+\n+[[package]]\n+name = \"prost-derive\"\n+version = \"0.5.0\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"5e7dc378b94ac374644181a2247cebf59a6ec1c88b49ac77f3a94b86b79d0e11\"\n+dependencies = [\n+ \"failure\",\n+ \"itertools\",\n+ \"proc-macro2 0.4.30\",\n+ \"quote 0.6.13\",\n+ \"syn 0.15.44\",\n+]\n+\n+[[package]]\n+name = \"prost-types\"\n+version = \"0.5.0\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"1de482a366941c8d56d19b650fac09ca08508f2a696119ee7513ad590c8bac6f\"\n+dependencies = [\n+ \"bytes 0.4.12\",\n+ \"prost\",\n]\n[[package]]\n@@ -2293,13 +2345,22 @@ version = \"1.2.3\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0\"\n+[[package]]\n+name = \"quote\"\n+version = \"0.6.13\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1\"\n+dependencies = [\n+ \"proc-macro2 0.4.30\",\n+]\n+\n[[package]]\nname = \"quote\"\nversion = \"1.0.3\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f\"\ndependencies = [\n- \"proc-macro2\",\n+ \"proc-macro2 1.0.9\",\n]\n[[package]]\n@@ -2665,9 +2726,9 @@ version = \"1.0.2\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -2707,9 +2768,9 @@ version = \"0.10.1\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -2783,9 +2844,9 @@ version = \"1.0.105\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -2958,11 +3019,11 @@ version = \"0.5.3\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n\"serde\",\n\"serde_derive\",\n- \"syn\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -2972,13 +3033,13 @@ source = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11\"\ndependencies = [\n\"base-x\",\n- \"proc-macro2\",\n- \"quote\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n\"serde\",\n\"serde_derive\",\n\"serde_json\",\n\"sha1\",\n- \"syn\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -3021,9 +3082,9 @@ checksum = \"3f88b8e18c69496aad6f9ddf4630dd7d585bcaf765786cb415b9aec2fe5a0430\"\ndependencies = [\n\"heck\",\n\"proc-macro-error\",\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -3041,15 +3102,26 @@ dependencies = [\n\"zeroize\",\n]\n+[[package]]\n+name = \"syn\"\n+version = \"0.15.44\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5\"\n+dependencies = [\n+ \"proc-macro2 0.4.30\",\n+ \"quote 0.6.13\",\n+ \"unicode-xid 0.1.0\",\n+]\n+\n[[package]]\nname = \"syn\"\nversion = \"1.0.17\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"unicode-xid\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"unicode-xid 0.2.0\",\n]\n[[package]]\n@@ -3058,9 +3130,9 @@ version = \"0.5.0\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -3069,10 +3141,10 @@ version = \"0.12.3\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n- \"unicode-xid\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n+ \"unicode-xid 0.2.0\",\n]\n[[package]]\n@@ -3150,9 +3222,9 @@ version = \"1.0.13\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"ae2b85ba4c9aa32dd3343bd80eb8d22e9b54b7688c17ea3907f236885353b233\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -3216,9 +3288,9 @@ source = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"e987cfe0537f575b5fc99909de6185f6c19c3ad8889e2275e686a873d0869ba1\"\ndependencies = [\n\"proc-macro-hack\",\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -3249,9 +3321,9 @@ version = \"0.2.5\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n]\n[[package]]\n@@ -3408,6 +3480,12 @@ version = \"0.1.7\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479\"\n+[[package]]\n+name = \"unicode-xid\"\n+version = \"0.1.0\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc\"\n+\n[[package]]\nname = \"unicode-xid\"\nversion = \"0.2.0\"\n@@ -3420,6 +3498,16 @@ version = \"0.7.0\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece\"\n+[[package]]\n+name = \"uppercase\"\n+version = \"0.0.1\"\n+dependencies = [\n+ \"log\",\n+ \"serde\",\n+ \"wascc-actor\",\n+ \"wascc-codec\",\n+]\n+\n[[package]]\nname = \"url\"\nversion = \"1.7.2\"\n@@ -3504,6 +3592,19 @@ dependencies = [\n\"wasmtime-wasi 0.12.0\",\n]\n+[[package]]\n+name = \"wapc-guest\"\n+version = \"0.3.0\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"6c04289198b321831168863e11e6727c84c4bab76e06e19f232bd611b40d6d2b\"\n+dependencies = [\n+ \"prost\",\n+ \"prost-types\",\n+ \"serde\",\n+ \"serde_derive\",\n+ \"serde_json\",\n+]\n+\n[[package]]\nname = \"wascap\"\nversion = \"0.4.5\"\n@@ -3526,6 +3627,21 @@ dependencies = [\n\"serde_json\",\n]\n+[[package]]\n+name = \"wascc-actor\"\n+version = \"0.6.0\"\n+source = \"registry+https://github.com/rust-lang/crates.io-index\"\n+checksum = \"9ebc6308461d7bdfc232d20acf4b046bc387ac2d517ca0c1a6cf5bc9041d4dcb\"\n+dependencies = [\n+ \"lazy_static\",\n+ \"log\",\n+ \"serde\",\n+ \"serde_derive\",\n+ \"serde_json\",\n+ \"wapc-guest\",\n+ \"wascc-codec\",\n+]\n+\n[[package]]\nname = \"wascc-codec\"\nversion = \"0.6.0\"\n@@ -3701,9 +3817,9 @@ dependencies = [\n\"bumpalo\",\n\"lazy_static\",\n\"log\",\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n\"wasm-bindgen-shared\",\n]\n@@ -3725,7 +3841,7 @@ version = \"0.2.60\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4\"\ndependencies = [\n- \"quote\",\n+ \"quote 1.0.3\",\n\"wasm-bindgen-macro-support\",\n]\n@@ -3735,9 +3851,9 @@ version = \"0.2.60\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931\"\ndependencies = [\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n\"wasm-bindgen-backend\",\n\"wasm-bindgen-shared\",\n]\n@@ -4094,8 +4210,8 @@ source = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"a568ddecacabde7f8020cdd3ae78532f7c3d3f2735b2892036ff4ba6c43c89a7\"\ndependencies = [\n\"heck\",\n- \"proc-macro2\",\n- \"quote\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n\"witx\",\n]\n@@ -4106,8 +4222,8 @@ source = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"e303a54196f33cbcc969a2ec7dd4743b96a86fa1aafb145807cbdc6a12dfda4e\"\ndependencies = [\n\"heck\",\n- \"proc-macro2\",\n- \"quote\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n\"witx\",\n]\n@@ -4130,9 +4246,9 @@ checksum = \"01b366da52bffd4b3680aaf63d347c8a4abb8ea25521b0528c50646eb72cb1bc\"\ndependencies = [\n\"anyhow\",\n\"heck\",\n- \"proc-macro2\",\n- \"quote\",\n- \"syn\",\n+ \"proc-macro2 1.0.9\",\n+ \"quote 1.0.3\",\n+ \"syn 1.0.17\",\n\"witx\",\n]\n@@ -4142,7 +4258,7 @@ version = \"0.15.0\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\nchecksum = \"f35e98cd16ad3762c033ebe0b1971268dc236c97490a8ac2ebdd1fe331748fb3\"\ndependencies = [\n- \"syn\",\n+ \"syn 1.0.17\",\n\"wiggle-generate\",\n\"witx\",\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "Cargo.toml",
"new_path": "Cargo.toml",
"diff": "@@ -32,7 +32,8 @@ reqwest = \"0.10\"\n[workspace]\nmembers = [\n- \"crates/*\"\n+ \"crates/*\",\n+ \"demos/wascc/uppercase\"\n]\n[[bin]]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "demos/wascc/uppercase/.cargo/config",
"diff": "+[build]\n+target = \"wasm32-unknown-unknown\"\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "demos/wascc/uppercase/.gitignore",
"diff": "+.keys\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "demos/wascc/uppercase/Cargo.toml",
"diff": "+[package]\n+name = \"uppercase\"\n+version = \"0.0.1\"\n+authors = [\"Brian Ketelsen <[email protected]\"]\n+edition = \"2018\"\n+\n+[lib]\n+crate-type = [\"cdylib\"]\n+\n+[dependencies]\n+wascc-actor = \"0.6.0\"\n+log = '0.4.8'\n+serde = { version = \"1.0.104\", features = [\"derive\"]}\n+wascc-codec = \"0.6.0\"\n+\n+[profile.release]\n+opt-level = \"s\"\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "demos/wascc/uppercase/Makefile",
"diff": "+COLOR ?= always # Valid COLOR options: {always, auto, never}\n+CARGO = cargo --color $(COLOR)\n+TARGET = ../../../target/wasm32-unknown-unknown\n+DEBUG = $(TARGET)/debug\n+RELEASE = $(TARGET)/release\n+KEYDIR ?= .keys\n+VERSION = 0.2\n+\n+.PHONY: all bench build check clean doc test update keys keys-account keys-module\n+\n+all: build\n+\n+bench:\n+ @$(CARGO) bench\n+\n+build:\n+ @$(CARGO) build\n+ wascap sign $(DEBUG)/uppercase.wasm $(DEBUG)/uppercase_signed.wasm -i $(KEYDIR)/account.nk -u $(KEYDIR)/module.nk -s -l -n uppercase\n+\n+check:\n+ @$(CARGO) check\n+\n+clean:\n+ @$(CARGO) clean\n+\n+doc:\n+ @$(CARGO) doc\n+\n+test: build\n+ @$(CARGO) test\n+\n+update:\n+ @$(CARGO) update\n+\n+release:\n+ @$(CARGO) build --release\n+ wascap sign $(RELEASE)/uppercase.wasm $(RELEASE)/uppercase_signed.wasm -i $(KEYDIR)/account.nk -u $(KEYDIR)/module.nk -s -l -n uppercase\n+\n+push:\n+ wasm-to-oci push ../../../target/wasm32-unknown-unknown/release/uppercase_signed.wasm webassembly.azurecr.io/uppercase-wascc:v$(VERSION)\n+\n+\n+keys: keys-account\n+keys: keys-module\n+\n+keys-account:\n+ @mkdir -p $(KEYDIR)\n+ nk gen account > $(KEYDIR)/account.txt\n+ awk '/Seed/{ print $$2 }' $(KEYDIR)/account.txt > $(KEYDIR)/account.nk\n+\n+keys-module:\n+ @mkdir -p $(KEYDIR)\n+ nk gen module > $(KEYDIR)/module.txt\n+ awk '/Seed/{ print $$2 }' $(KEYDIR)/module.txt > $(KEYDIR)/module.nk\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "demos/wascc/uppercase/README.md",
"diff": "+# Uppercase\n+\n+An example that will respond with the uppercased version of the querystring sent in.\n+\n+It is meant to be a simple demo for the wascc-provider with Krustlet.\n+\n+## Video\n+\n+You can watch a video of the creation of this actor on [Youtube](https://www.youtube.com/watch?v=uy91W7OxHcQ).\n+\n+## Running the example\n+\n+This example has already been pre-built, so you only need to install it into your Kubernetes\n+cluster.\n+\n+Create the pod and configmap with `kubectl`:\n+\n+```shell\n+$ kubectl apply -f uppercase-wascc.yaml\n+```\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "demos/wascc/uppercase/src/lib.rs",
"diff": "+extern crate wascc_actor as actor;\n+\n+#[macro_use]\n+extern crate log;\n+extern crate serde;\n+extern crate wascc_codec;\n+\n+use actor::prelude::*;\n+use serde::Serialize;\n+use wascc_codec::serialize;\n+\n+actor_handlers! {\n+ codec::http::OP_HANDLE_REQUEST => uppercase,\n+ codec::core::OP_HEALTH_REQUEST => health\n+}\n+\n+fn uppercase(\n+ r: codec::http::Request\n+) -> CallResult{\n+ info!(\"Query String: {}\", r.query_string);\n+ let upper = UppercaseResponse {\n+ original: r.query_string.to_string(),\n+ uppercased: r.query_string.to_ascii_uppercase(),\n+ };\n+\n+ Ok(serialize(codec::http::Response::json(upper, 200, \"OK\"))?)\n+}\n+\n+fn health(\n+ _req: codec::core::HealthRequest\n+) -> ReceiveResult{\n+ Ok(vec![])\n+}\n+\n+#[derive(Serialize)]\n+struct UppercaseResponse {\n+ original: String,\n+ uppercased: String,\n+}\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "demos/wascc/uppercase/uppercase-wascc.yaml",
"diff": "+apiVersion: v1\n+kind: Pod\n+metadata:\n+ name: uppercase\n+ labels:\n+ app: uppercase\n+spec:\n+ containers:\n+ - image: webassembly.azurecr.io/uppercase-wascc:v0.2\n+ imagePullPolicy: Always\n+ name: uppercase\n+ env:\n+ - name: PORT\n+ value: \"8080\"\n+ ports:\n+ - containerPort: 8080\n+ nodeSelector:\n+ kubernetes.io/role: agent\n+ beta.kubernetes.io/os: linux\n+ beta.kubernetes.io/arch: wasm32-wascc\n+ tolerations:\n+ - key: \"node.kubernetes.io/network-unavailable\"\n+ operator: \"Exists\"\n+ effect: \"NoSchedule\"\n+ - key: \"krustlet/arch\"\n+ operator: \"Equal\"\n+ value: \"wasm32-wascc\"\n+ effect: \"NoExecute\"\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | add wascc demo |
350,428 | 09.04.2020 17:30:24 | -7,200 | 461ac002e56602e06d7b34ff9621190b93819183 | Added Krustlet on WSL2 howto
Instructions for adding Krustlet as a node to Docker Desktop for Windows Kubernetes cluster | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/howto/krustlet-on-wsl2.md",
"diff": "+# Running Krustlet on WSL2 with Docker Desktop\n+\n+This how-to guide demonstrates how to boot a Krustlet node in Docker Desktop for Windows with WSL2 backend.\n+\n+## Information\n+This tutorial will work on current Windows 10 Insider Slow ring and Docker Desktop for Windows stable release.\n+\n+Concerning Windows, this tutorial should work on the Production ring once it will be available.\n+\n+Last but not least, this will work on Windows 10 Home edition.\n+\n+## Prerequisites\n+\n+You will require a WSL2 distro and Docker Desktop for Windows for this how-to. The WSL2 backend and Kubernetes features will need to be also enabled.\n+See the [Docker Desktop for Windows > Getting started > Kubernetes](https://docs.docker.com/docker-for-windows/#kubernetes) howto for more information.\n+\n+This specific tutorial will be running Krustlet on your WSL2 distro and will explain how to access it from Windows.\n+\n+## Step 1: Create Certificate\n+\n+Krustlet requires a certificate for securing communication with the Kubernetes API. Because\n+Kubernetes has its own certificates, we'll need to get a signed certificate from the Kubernetes API\n+that we can use. First things first, let's create a certificate signing request (CSR):\n+\n+```shell\n+$ mkdir -p ~/.krustlet/config\n+$ cd $_\n+$ openssl req -new -sha256 -newkey rsa:2048 -keyout krustlet.key -out krustlet.csr -nodes -subj \"/C=US/ST=./L=./O=./OU=./CN=krustlet\"\n+Generating a RSA private key\n+.................+++++\n+....................................................+++++\n+writing new private key to 'krustlet.key'\n+```\n+\n+This will create a CSR and a new key for the certificate, using `krustlet` as the hostname of the\n+server.\n+\n+Now that it is created, we'll need to send the request to Kubernetes:\n+\n+```shell\n+$ cat <<EOF | kubectl apply -f -\n+apiVersion: certificates.k8s.io/v1beta1\n+kind: CertificateSigningRequest\n+metadata:\n+ name: krustlet\n+spec:\n+ request: $(cat krustlet.csr | base64 | tr -d '\\n')\n+ usages:\n+ - digital signature\n+ - key encipherment\n+ - server auth\n+EOF\n+certificatesigningrequest.certificates.k8s.io/krustlet created\n+```\n+\n+Once that runs, you will need to approve the request:\n+\n+```shell\n+$ kubectl certificate approve krustlet\n+certificatesigningrequest.certificates.k8s.io/krustlet approved\n+```\n+\n+After approval, you can download the cert like so:\n+\n+```shell\n+$ kubectl get csr krustlet -o jsonpath='{.status.certificate}' | base64 --decode > krustlet.crt\n+```\n+\n+Lastly, combine the key and the cert into a PFX bundle, choosing your own password instead of\n+\"password\":\n+\n+```shell\n+$ openssl pkcs12 -export -out certificate.pfx -inkey krustlet.key -in krustlet.crt -password \"pass:password\"\n+```\n+\n+## Step 2: Determine the default gateway\n+\n+The default gateway for most Docker containers is generally `172.17.0.1`.\n+This IP is only reachable, by default, from the WSL2 distro.\n+However, the `eth0` host network is accessible from Windows, so we can use this IP address to connect to the WSL2 distro (where Krustlet is running).\n+\n+If this was changed, check `ifconfig eth0` from\n+the host OS to determine the default gateway:\n+\n+```console\n+$ ifconfig eth0\n+eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500\n+ inet 172.26.47.208 netmask 255.255.240.0 broadcast 172.26.47.255\n+ inet6 fe80::215:5dff:fe98:ce48 prefixlen 64 scopeid 0x20<link>\n+ ether 00:15:5d:98:ce:48 txqueuelen 1000 (Ethernet)\n+ RX packets 16818 bytes 11576089 (11.0 MiB)\n+ RX errors 0 dropped 0 overruns 0 frame 0\n+ TX packets 1093 bytes 115724 (113.0 KiB)\n+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0\n+```\n+\n+In this example, I should use `172.26.47.208`.\n+\n+The hostname being \"applied\" from Windows, the default hostname will not resolve to this address, therefore you need to pass the `--node-ip` and `--node-name` flag to Krustlet.\n+\n+### Add a route to 172.17.0.1\n+\n+As stated above, the Docker default gateway, `172.17.0.1`, cannot be reached from Windows by default.\n+\n+However, a route can create to reach it, using the WSL2 own default gateway.\n+\n+We can use the following commands to create a temporary route:\n+\n+```shell\n+PS> $env:WSLIP = Get-NetIPConfiguration -InterfaceAlias *WSL* | % { $_.IPv4Address.IPAddress }\n+\n+PS> $env:WSLIP = Get-NetIPConfiguration -InterfaceAlias *WSL* | % { $_.IPv4Address.IPAddress }\n+ OK!\n+```\n+\n+**DO NOT** make this route permanent as the WSL2 default gateway is DHCP based and will change upon every reboot.\n+\n+## Step 3: Install and run Krustlet\n+\n+First, install the latest release of Krustlet following [the install guide](../intro/install.md).\n+\n+Once you have done that, run the following commands to run Krustlet's WASI provider:\n+\n+```shell\n+$ krustlet-wasi --node-ip 172.17.0.1 --node-name krustlet --pfx-password password\n+```\n+\n+In another terminal, run `kubectl get nodes -o wide` and you should see output that looks similar to\n+below:\n+\n+```\n+$ kubectl get nodes -o wide\n+NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\n+docker-desktop Ready master 3d23h v1.15.5 192.168.65.3 <none> Docker Desktop 4.19.104-microsoft-standard docker://19.3.8\n+krustlet Ready agent 34s v1.17.0 172.26.47.208 <none> <unknown> <unknown> mvp\n+```\n+\n+## Optional: Delete the Krustlet node\n+Once you will no more need the Krustlet node, you can remove it from your cluster with the following `kubectl delete node` command:\n+\n+```shell\n+$ kubectl delete node krustlet\n+node \"krustlet\" deleted\n+\n+$ kubectl get nodes -o wide\n+NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME\n+docker-desktop Ready master 4d v1.15.5 192.168.65.3 <none> Docker Desktop 4.19.104-microsoft-standard docker://19.3.8\n+```\n\\ No newline at end of file\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Added Krustlet on WSL2 howto
Instructions for adding Krustlet as a node to Docker Desktop for Windows Kubernetes cluster |
350,419 | 09.04.2020 11:37:28 | 14,400 | aa2161317b54527636419f1c3d0bd0d913df897d | cargo fmt ftw | [
{
"change_type": "MODIFY",
"old_path": "demos/wascc/uppercase/src/lib.rs",
"new_path": "demos/wascc/uppercase/src/lib.rs",
"diff": "@@ -14,9 +14,7 @@ actor_handlers! {\ncodec::core::OP_HEALTH_REQUEST => health\n}\n-fn uppercase(\n- r: codec::http::Request\n-) -> CallResult{\n+fn uppercase(r: codec::http::Request) -> CallResult {\ninfo!(\"Query String: {}\", r.query_string);\nlet upper = UppercaseResponse {\noriginal: r.query_string.to_string(),\n@@ -26,9 +24,7 @@ fn uppercase(\nOk(serialize(codec::http::Response::json(upper, 200, \"OK\"))?)\n}\n-fn health(\n- _req: codec::core::HealthRequest\n-) -> ReceiveResult{\n+fn health(_req: codec::core::HealthRequest) -> ReceiveResult {\nOk(vec![])\n}\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | cargo fmt ftw |
350,428 | 09.04.2020 19:23:25 | -7,200 | de1b67d516024e8af6e4284d5b92d92419349349 | Updated the instructions based on Brian comments
Removed entirely the `Step 1: Create certificate`
Removed the `Add a route to 172.17.0.1` subsection
Added a `TIP` to get the `eth0` IP
Added a check to the Kubernetes context
Updated the command to run `krustlet-wasi` and removed `--pfx-password` flag | [
{
"change_type": "MODIFY",
"old_path": "docs/howto/krustlet-on-wsl2.md",
"new_path": "docs/howto/krustlet-on-wsl2.md",
"diff": "@@ -16,64 +16,7 @@ See the [Docker Desktop for Windows > Getting started > Kubernetes](https://docs\nThis specific tutorial will be running Krustlet on your WSL2 distro and will explain how to access it from Windows.\n-## Step 1: Create Certificate\n-\n-Krustlet requires a certificate for securing communication with the Kubernetes API. Because\n-Kubernetes has its own certificates, we'll need to get a signed certificate from the Kubernetes API\n-that we can use. First things first, let's create a certificate signing request (CSR):\n-\n-```shell\n-$ mkdir -p ~/.krustlet/config\n-$ cd $_\n-$ openssl req -new -sha256 -newkey rsa:2048 -keyout krustlet.key -out krustlet.csr -nodes -subj \"/C=US/ST=./L=./O=./OU=./CN=krustlet\"\n-Generating a RSA private key\n-.................+++++\n-....................................................+++++\n-writing new private key to 'krustlet.key'\n-```\n-\n-This will create a CSR and a new key for the certificate, using `krustlet` as the hostname of the\n-server.\n-\n-Now that it is created, we'll need to send the request to Kubernetes:\n-\n-```shell\n-$ cat <<EOF | kubectl apply -f -\n-apiVersion: certificates.k8s.io/v1beta1\n-kind: CertificateSigningRequest\n-metadata:\n- name: krustlet\n-spec:\n- request: $(cat krustlet.csr | base64 | tr -d '\\n')\n- usages:\n- - digital signature\n- - key encipherment\n- - server auth\n-EOF\n-certificatesigningrequest.certificates.k8s.io/krustlet created\n-```\n-\n-Once that runs, you will need to approve the request:\n-\n-```shell\n-$ kubectl certificate approve krustlet\n-certificatesigningrequest.certificates.k8s.io/krustlet approved\n-```\n-\n-After approval, you can download the cert like so:\n-\n-```shell\n-$ kubectl get csr krustlet -o jsonpath='{.status.certificate}' | base64 --decode > krustlet.crt\n-```\n-\n-Lastly, combine the key and the cert into a PFX bundle, choosing your own password instead of\n-\"password\":\n-\n-```shell\n-$ openssl pkcs12 -export -out certificate.pfx -inkey krustlet.key -in krustlet.crt -password \"pass:password\"\n-```\n-\n-## Step 2: Determine the default gateway\n+## Step 1: Determine the default gateway\nThe default gateway for most Docker containers is generally `172.17.0.1`.\nThis IP is only reachable, by default, from the WSL2 distro.\n@@ -96,33 +39,33 @@ eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500\nIn this example, I should use `172.26.47.208`.\n-The hostname being \"applied\" from Windows, the default hostname will not resolve to this address, therefore you need to pass the `--node-ip` and `--node-name` flag to Krustlet.\n+> TIP: get the IP from `eth0`\n+\n+```shell\n+$ export mainIP=$(ifconfig eth0 | grep \"inet \" | awk '{ print $2 }')\n+```\n-### Add a route to 172.17.0.1\n+The hostname being \"applied\" from Windows, the default hostname will not resolve to this address, therefore you need to pass the `--node-ip` and `--node-name` flag to Krustlet.\n-As stated above, the Docker default gateway, `172.17.0.1`, cannot be reached from Windows by default.\n+## Step 2: Install and run Krustlet\n-However, a route can create to reach it, using the WSL2 own default gateway.\n+First, install the latest release of Krustlet following [the install guide](../intro/install.md).\n-We can use the following commands to create a temporary route:\n+Second, ensure the Kubernetes context is correctly set to `docker-desktop`:\n```shell\n-PS> $env:WSLIP = Get-NetIPConfiguration -InterfaceAlias *WSL* | % { $_.IPv4Address.IPAddress }\n+$ kubectl config get-contexts\n+CURRENT NAME CLUSTER AUTHINFO NAMESPACE\n+* docker-desktop docker-desktop docker-desktop\n-PS> $env:WSLIP = Get-NetIPConfiguration -InterfaceAlias *WSL* | % { $_.IPv4Address.IPAddress }\n- OK!\n+# Optional if the context is not set correctly\n+$ kubectl config set-context docker-desktop\n```\n-**DO NOT** make this route permanent as the WSL2 default gateway is DHCP based and will change upon every reboot.\n-\n-## Step 3: Install and run Krustlet\n-\n-First, install the latest release of Krustlet following [the install guide](../intro/install.md).\n-\nOnce you have done that, run the following commands to run Krustlet's WASI provider:\n```shell\n-$ krustlet-wasi --node-ip 172.17.0.1 --node-name krustlet --pfx-password password\n+$ krustlet-wasi --node-ip $mainIP --node-name krustlet\n```\nIn another terminal, run `kubectl get nodes -o wide` and you should see output that looks similar to\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Updated the instructions based on Brian comments
Removed entirely the `Step 1: Create certificate`
Removed the `Add a route to 172.17.0.1` subsection
Added a `TIP` to get the `eth0` IP
Added a check to the Kubernetes context
Updated the command to run `krustlet-wasi` and removed `--pfx-password` flag |
350,428 | 09.04.2020 19:24:37 | -7,200 | e164735cd367efe105654e6e841d7ac9d65de017 | Added missing context output
Added the output of the `kubectl config set-context` command | [
{
"change_type": "MODIFY",
"old_path": "docs/howto/krustlet-on-wsl2.md",
"new_path": "docs/howto/krustlet-on-wsl2.md",
"diff": "@@ -60,6 +60,7 @@ CURRENT NAME CLUSTER AUTHINFO NAMESPACE\n# Optional if the context is not set correctly\n$ kubectl config set-context docker-desktop\n+Context \"docker-desktop\" modified.\n```\nOnce you have done that, run the following commands to run Krustlet's WASI provider:\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Added missing context output
Added the output of the `kubectl config set-context` command |
350,425 | 12.04.2020 12:47:56 | 25,200 | 87fba7e2095fbddbbac1f1805cc41106f1e86a8e | Add a HOWTO for running Krustlet on EKS.
This commit adds scripts for building a Krustlet-based AMI and defining an EKS
cluster using Krustet.
It also provides a walkthrough of how to get Krustlet running on EKS.
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/howto/assets/eks/.gitignore",
"diff": "+manifest.json\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/howto/assets/eks/Makefile",
"diff": "+PACKER_BINARY ?= packer\n+PACKER_VARIABLES := aws_region ami_name krustlet_version source_ami_id source_ami_owners arch instance_type security_group_id\n+\n+aws_region ?= $(AWS_DEFAULT_REGION)\n+ami_name ?= amazon-eks-node-krustlet-$(KRUSTLET_VERSION)-v$(shell date +'%Y%m%d')\n+arch ?= x86_64\n+ifeq ($(arch), arm64)\n+instance_type ?= a1.large\n+else\n+instance_type ?= c5.2xlarge\n+endif\n+\n+ifeq ($(aws_region), cn-northwest-1)\n+source_ami_owners ?= 141808717104\n+endif\n+\n+T_RED := \\e[0;31m\n+T_GREEN := \\e[0;32m\n+T_YELLOW := \\e[0;33m\n+T_RESET := \\e[0m\n+\n+.PHONY: all\n+all: 0.1.0\n+\n+.PHONY: validate\n+validate:\n+ $(PACKER_BINARY) validate $(foreach packerVar,$(PACKER_VARIABLES), $(if $($(packerVar)),--var $(packerVar)='$($(packerVar))',)) eks-worker-al2.json\n+\n+.PHONY: krustlet\n+krustlet: validate\n+ @echo \"$(T_GREEN)Building AMI for Krustlet version $(T_YELLOW)$(KRUSTLET_VERSION)$(T_GREEN) on $(T_YELLOW)$(arch)$(T_RESET)\"\n+ $(PACKER_BINARY) build $(foreach packerVar,$(PACKER_VARIABLES), $(if $($(packerVar)),--var $(packerVar)='$($(packerVar))',)) eks-worker-al2.json\n+\n+.PHONY: 0.1.0\n+0.1.0:\n+ $(MAKE) krustlet KRUSTLET_VERSION=0.1.0\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/howto/assets/eks/eks-worker-al2.json",
"diff": "+{\n+ \"variables\": {\n+ \"aws_region\": \"us-west-2\",\n+ \"ami_name\": null,\n+ \"creator\": \"{{env `USER`}}\",\n+ \"encrypted\": \"false\",\n+ \"kms_key_id\": \"\",\n+\n+ \"aws_access_key_id\": \"{{env `AWS_ACCESS_KEY_ID`}}\",\n+ \"aws_secret_access_key\": \"{{env `AWS_SECRET_ACCESS_KEY`}}\",\n+ \"aws_session_token\": \"{{env `AWS_SESSION_TOKEN`}}\",\n+\n+ \"krustlet_version\": \"0.1.0\",\n+\n+ \"source_ami_id\": \"\",\n+ \"source_ami_owners\": \"137112412989\",\n+ \"source_ami_filter_name\": \"amzn2-ami-minimal-hvm-*\",\n+ \"arch\": null,\n+ \"instance_type\": null,\n+ \"ami_description\": \"EKS Kubernetes Worker AMI with AmazonLinux2 image\",\n+\n+ \"ssh_interface\": \"\",\n+ \"ssh_username\": \"ec2-user\",\n+ \"temporary_security_group_source_cidrs\": \"\",\n+ \"security_group_id\": \"\",\n+ \"associate_public_ip_address\": \"\",\n+ \"subnet_id\": \"\",\n+ \"remote_folder\": \"\",\n+ \"launch_block_device_mappings_volume_size\": \"4\",\n+ \"ami_users\": \"\"\n+ },\n+\n+ \"builders\": [\n+ {\n+ \"type\": \"amazon-ebs\",\n+ \"region\": \"{{user `aws_region`}}\",\n+ \"source_ami\": \"{{user `source_ami_id`}}\",\n+ \"ami_users\": \"{{user `ami_users`}}\",\n+ \"snapshot_users\": \"{{user `ami_users`}}\",\n+ \"source_ami_filter\": {\n+ \"filters\": {\n+ \"name\": \"{{user `source_ami_filter_name`}}\",\n+ \"architecture\": \"{{user `arch`}}\",\n+ \"root-device-type\": \"ebs\",\n+ \"state\": \"available\",\n+ \"virtualization-type\": \"hvm\"\n+ },\n+ \"owners\": [ \"{{user `source_ami_owners`}}\" ],\n+ \"most_recent\": true\n+ },\n+ \"instance_type\": \"{{user `instance_type`}}\",\n+ \"launch_block_device_mappings\": [\n+ {\n+ \"device_name\": \"/dev/xvda\",\n+ \"volume_type\": \"gp2\",\n+ \"volume_size\": \"{{user `launch_block_device_mappings_volume_size`}}\",\n+ \"delete_on_termination\": true\n+ }\n+ ],\n+ \"ami_block_device_mappings\": [\n+ {\n+ \"device_name\": \"/dev/xvda\",\n+ \"volume_type\": \"gp2\",\n+ \"volume_size\": 20,\n+ \"delete_on_termination\": true\n+ }\n+ ],\n+ \"ssh_username\": \"{{user `ssh_username`}}\",\n+ \"ssh_interface\": \"{{user `ssh_interface`}}\",\n+ \"temporary_security_group_source_cidrs\": \"{{user `temporary_security_group_source_cidrs`}}\",\n+ \"security_group_id\": \"{{user `security_group_id`}}\",\n+ \"associate_public_ip_address\": \"{{user `associate_public_ip_address`}}\",\n+ \"ssh_pty\": true,\n+ \"encrypt_boot\": \"{{user `encrypted`}}\",\n+ \"kms_key_id\": \"{{user `kms_key_id`}}\",\n+ \"run_tags\": {\n+ \"creator\": \"{{user `creator`}}\"\n+ },\n+ \"subnet_id\": \"{{user `subnet_id`}}\",\n+ \"tags\": {\n+ \"Name\": \"{{user `ami_name`}}\",\n+ \"created\": \"{{timestamp}}\",\n+ \"source_ami_id\": \"{{ user `source_ami_id`}}\",\n+ \"krustlet\": \"{{ user `krustlet_version`}}\"\n+ },\n+ \"ami_name\": \"{{user `ami_name`}}\",\n+ \"ami_description\": \"{{ user `ami_description` }}, (krustlet: {{ user `krustlet_version`}})\"\n+ }\n+ ],\n+\n+ \"provisioners\": [\n+ {\n+ \"type\": \"shell\",\n+ \"remote_folder\": \"{{ user `remote_folder`}}\",\n+ \"inline\": [\"mkdir -p /tmp/worker/\"]\n+ },\n+ {\n+ \"type\": \"file\",\n+ \"source\": \"{{template_dir}}/files/\",\n+ \"destination\": \"/tmp/worker/\"\n+ },\n+ {\n+ \"type\": \"shell\",\n+ \"remote_folder\": \"{{ user `remote_folder`}}\",\n+ \"script\": \"{{template_dir}}/scripts/install-worker.sh\",\n+ \"environment_vars\": [\n+ \"KRUSTLET_VERSION={{user `krustlet_version`}}\",\n+ \"AWS_ACCESS_KEY_ID={{user `aws_access_key_id`}}\",\n+ \"AWS_SECRET_ACCESS_KEY={{user `aws_secret_access_key`}}\",\n+ \"AWS_SESSION_TOKEN={{user `aws_session_token`}}\"\n+ ]\n+ },\n+ {\n+ \"type\": \"shell\",\n+ \"remote_folder\": \"{{ user `remote_folder`}}\",\n+ \"script\": \"{{template_dir}}/scripts/validate.sh\"\n+ }\n+ ],\n+ \"post-processors\": [\n+ {\n+ \"type\": \"manifest\",\n+ \"output\": \"manifest.json\",\n+ \"strip_path\": true\n+ }\n+ ]\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/howto/assets/eks/files/bootstrap.sh",
"diff": "+#!/usr/bin/env bash\n+# This file is based upon https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh\n+# The script is run when a node is started\n+# Krustlet doesn't yet support TLS bootstraping, so this will generate a server certificate\n+\n+set -o pipefail\n+set -o nounset\n+set -o errexit\n+\n+echo \"Generating certificate signing request...\"\n+openssl req -new -sha256 -newkey rsa:2048 -keyout /tmp/krustlet.key -out /tmp/krustlet.csr -nodes -config <(\n+cat <<-EOF\n+[req]\n+default_bits = 2048\n+prompt = no\n+default_md = sha256\n+req_extensions = req_ext\n+distinguished_name = dn\n+\n+[dn]\n+O=system:nodes\n+CN=system:node:$(hostname)\n+\n+[req_ext]\n+subjectAltName = @alt_names\n+\n+[alt_names]\n+IP.1 = $(ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1)\n+EOF\n+)\n+\n+cat <<EOF > /tmp/csr.yaml\n+apiVersion: certificates.k8s.io/v1beta1\n+kind: CertificateSigningRequest\n+metadata:\n+ name: $(hostname)\n+spec:\n+ request: $(cat /tmp/krustlet.csr | base64 | tr -d '\\n')\n+ usages:\n+ - digital signature\n+ - key encipherment\n+ - server auth\n+EOF\n+\n+RETRY_ATTEMPTS=3\n+\n+for attempt in `seq 0 $RETRY_ATTEMPTS`; do\n+ rc=0\n+\n+ if [[ $attempt -gt 0 ]]; then\n+ echo \"Retry $attempt of $RETRY_ATTEMPTS to request certificate signing...\"\n+ fi\n+\n+ echo \"Sending certificate signing request...\"\n+ /usr/local/bin/kubectl apply --kubeconfig /etc/eksctl/kubeconfig.yaml -f /tmp/csr.yaml || rc=$?\n+\n+ if [[ $rc -eq 0 ]]; then\n+ break\n+ fi\n+\n+ if [[ $attempt -eq $RETRY_ATTEMPTS ]]; then\n+ exit $rc\n+ fi\n+\n+ jitter=$((1 + RANDOM % 10))\n+ sleep_sec=\"$(( $(( 5 << $((1+$attempt)) )) + $jitter))\"\n+ sleep $sleep_sec\n+done\n+\n+for attempt in `seq 0 $RETRY_ATTEMPTS`; do\n+ rc=0\n+\n+ if [[ $attempt -gt 0 ]]; then\n+ echo \"Retry $attempt of $RETRY_ATTEMPTS to retrieve certificate...\"\n+ fi\n+\n+ echo \"Retrieving certificate from Kubernetes API server...\"\n+ /usr/local/bin/kubectl get --kubeconfig /etc/eksctl/kubeconfig.yaml csr $(hostname) -o jsonpath='{.status.certificate}' > /tmp/krustlet.cert.base64 || rc=$?\n+\n+ if [[ $rc -eq 0 ]] && [ -s /tmp/krustlet.cert.base64 ]; then\n+ base64 --decode /tmp/krustlet.cert.base64 > /tmp/krustlet.cert || rc=$?\n+\n+ if [[ $rc -eq 0 ]]; then\n+ break\n+ fi\n+ fi\n+\n+ if [[ $attempt -eq $RETRY_ATTEMPTS ]]; then\n+ exit $rc\n+ fi\n+\n+ jitter=$((1 + RANDOM % 10))\n+ sleep_sec=\"$(( $(( 5 << $((1+$attempt)) )) + $jitter))\"\n+ sleep $sleep_sec\n+done\n+\n+echo \"Creating server PFX file...\"\n+openssl pkcs12 -export -out /etc/krustlet/cert.pfx -inkey /tmp/krustlet.key -in /tmp/krustlet.cert -password \"pass:krustlet\"\n+chown root:root /etc/krustlet/cert.pfx\n+chmod 640 /etc/krustlet/cert.pfx\n+\n+rm /tmp/krustlet.key /tmp/krustlet.csr /tmp/krustlet.cert\n+\n+echo \"Starting krustlet service...\"\n+systemctl daemon-reload\n+systemctl enable krustlet\n+systemctl start krustlet\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/howto/assets/eks/files/krustlet.service",
"diff": "+[Unit]\n+Description=Krustlet - a kubelet implementation for running WebAssembly\n+\n+[Service]\n+Environment=KUBECONFIG=/etc/eksctl/kubeconfig.yaml\n+Environment=PFX_PATH=/etc/krustlet/cert.pfx\n+Environment=PFX_PASSWORD=krustlet\n+Environment=KRUSTLET_DATA_DIR=/var/cache/krustlet\n+Environment=RUST_LOG=wascc_provider=info,wasi_provider=info,main=info\n+ExecStart=/usr/local/bin/krustlet\n+Restart=on-failure\n+RestartForceExitStatus=SIGPIPE\n+RestartSec=5\n+KillMode=process\n+\n+[Install]\n+WantedBy=multi-user.target\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/howto/assets/eks/scripts/validate.sh",
"diff": "+#!/usr/bin/env bash\n+#\n+# Do basic validation of the generated AMI\n+\n+# Validates that a file or blob doesn't exist\n+#\n+# Arguments:\n+# a file name or blob\n+# Returns:\n+# 1 if a file exists, after printing an error\n+validate_file_nonexists() {\n+ local file_blob=$1\n+ for f in $file_blob; do\n+ if [ -e \"$f\" ]; then\n+ echo \"$f shouldn't exists\"\n+ exit 1\n+ fi\n+ done\n+}\n+\n+validate_file_nonexists '/etc/hostname'\n+validate_file_nonexists '/etc/resolv.conf'\n+validate_file_nonexists '/etc/ssh/ssh_host*'\n+validate_file_nonexists '/home/ec2-user/.ssh/authorized_keys'\n+validate_file_nonexists '/root/.ssh/authorized_keys'\n+validate_file_nonexists '/var/lib/cloud/data'\n+validate_file_nonexists '/var/lib/cloud/instance'\n+validate_file_nonexists '/var/lib/cloud/instances'\n+validate_file_nonexists '/var/lib/cloud/sem'\n+validate_file_nonexists '/var/lib/dhclient/*'\n+validate_file_nonexists '/var/lib/dhcp/dhclient.*'\n+validate_file_nonexists '/var/lib/yum/history'\n+validate_file_nonexists '/var/log/cloud-init-output.log'\n+validate_file_nonexists '/var/log/cloud-init.log'\n+validate_file_nonexists '/var/log/secure'\n+validate_file_nonexists '/var/log/wtmp'\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Add a HOWTO for running Krustlet on EKS.
This commit adds scripts for building a Krustlet-based AMI and defining an EKS
cluster using Krustet.
It also provides a walkthrough of how to get Krustlet running on EKS.
Closes #130. |
350,425 | 13.04.2020 11:23:48 | 25,200 | 1085c1aad92072e705f4da8613329648cf44ee97 | Code review feedback.
Some minor formatting changes for the HOWTO. | [
{
"change_type": "MODIFY",
"old_path": "docs/howto/assets/eks/scripts/install-worker.sh",
"new_path": "docs/howto/assets/eks/scripts/install-worker.sh",
"diff": "@@ -103,6 +103,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y\nexport PATH=$PATH:$HOME/.cargo/bin\n# Build krustlet to link against the system libssl\n+# Amazon Linux has an older openssl version than the krustlet release binary\n# TODO: make the krustlet to build (wasi or wascc) configurable\nmkdir /tmp/krustlet\ngit clone https://github.com/deislabs/krustlet /tmp/krustlet\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/howto/krustlet-on-eks.md",
"new_path": "docs/howto/krustlet-on-eks.md",
"diff": "@@ -6,7 +6,7 @@ However, it does appear the feature might be coming soon.\nUntil that time, we can use [eksctl](https://eksctl.io/) to create and manage a node group with a custom Krustlet-based AMI.\n-# Prerequisites\n+## Prerequisites\nThe following tools are needed to complete this walkthrough:\n@@ -18,7 +18,7 @@ The following tools are needed to complete this walkthrough:\nWe will be using [Packer](https://packer.io/) to spin up an EC2 instance to build the AMI.\n-There is a Makefile in `docs/howto/assets/eks` that will run packer for you. It will use a `c5.2xlarge` EC2 instance to build the AMI with. Use the `instance_type` variable to `make` to change the type of the EC2 instance used.\n+There is a Makefile in `docs/howto/assets/eks` that will run `packer` for you. It will use a `c5.2xlarge` EC2 instance to build the AMI with. Use the `instance_type` variable to `make` to change the type of the EC2 instance used.\nRun `make` to build the AMI:\n@@ -43,7 +43,7 @@ us-west-2: ami-07adf9ce893885a3d\nMake note of the AMI identifier (in the example output above it would be `ami-07adf9ce893885a3d`) as it will be used to create the EKS cluster.\n-# Creating the EKS cluster\n+## Creating the EKS cluster\nWe will be using [eksctl](https://eksctl.io/) to deploy the EKS cluster.\n@@ -112,7 +112,7 @@ $ kubectl label nodes $NODE_NAME alpha.eksctl.io/cluster-name=krustlet-demo alph\nOnce the labels are applied to the nodes, the `eksctl` command should continue and complete successfully.\n-# Running a WebAssembly application\n+## Running a WebAssembly application\nLet's deploy a demo WebAssembly application to the cluster:\n@@ -144,7 +144,7 @@ Args are: []\nCongratulations! You've run a WebAssembly program on an EKS cluster!\n-# Deleting the cluster\n+## Deleting the cluster\nUse `eksctl` to delete the cluster and the nodes:\n@@ -152,7 +152,7 @@ Use `eksctl` to delete the cluster and the nodes:\n$ eksctl delete cluster --name krustlet-demo\n```\n-# Deleting the Krustlet AMI\n+## Deleting the Krustlet AMI\nDetermine the snapshot identifier of the AMI, where `$AMI_ID` is the identifier of your Krustlet AMI:\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Code review feedback.
Some minor formatting changes for the HOWTO. |
350,445 | 15.04.2020 09:35:01 | -28,800 | 1ba06e5d79da321e2857cb8d601a5722c50a9725 | fixed: now krustlet can pull wasm image from standard registry | [
{
"change_type": "MODIFY",
"old_path": "crates/oci-distribution/src/client.rs",
"new_path": "crates/oci-distribution/src/client.rs",
"diff": "@@ -216,7 +216,7 @@ impl Client {\n/// be set on all OCI Registry request.\nfn auth_headers(&self) -> HeaderMap {\nlet mut headers = HeaderMap::new();\n- headers.insert(\"Accept\", \"application/vnd.docker.distribution.manifest.v2+json,application/vnd.docker.distribution.manifest.list.v2+json\".parse().unwrap());\n+ headers.insert(\"Accept\", \"application/vnd.docker.distribution.manifest.v2+json,application/vnd.docker.distribution.manifest.list.v2+json,application/vnd.oci.image.manifest.v1+json\".parse().unwrap());\nif let Some(bearer) = self.token.as_ref() {\nheaders.insert(\"Authorization\", bearer.bearer_token().parse().unwrap());\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | fixed: now krustlet can pull wasm image from standard registry |
350,409 | 20.04.2020 19:45:48 | -7,200 | 641f0c39cb85c51e67f1bbbd7bbf79dd933bee0a | Update kube to 0.32
* Update kube to 0.32
* Hack to get e2e tests working
* Fix typo in e2e tests
* Update integration_tests.rs
Remove delay in integration tests | [
{
"change_type": "MODIFY",
"old_path": "Cargo.lock",
"new_path": "Cargo.lock",
"diff": "@@ -1697,12 +1697,12 @@ dependencies = [\n[[package]]\nname = \"kube\"\n-version = \"0.31.0\"\n+version = \"0.32.1\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"66be960d78ad587c7f89b24eb5f85a2b83da0e11d0fd7ffb000ce7bc01c52514\"\n+checksum = \"16e835f693d595f87a01cc95ded567487d14bcacd84a8d98c10bb86730e76037\"\ndependencies = [\n\"Inflector\",\n- \"base64 0.11.0\",\n+ \"base64 0.12.0\",\n\"bytes 0.5.4\",\n\"chrono\",\n\"dirs\",\n@@ -1715,7 +1715,6 @@ dependencies = [\n\"openssl\",\n\"reqwest\",\n\"serde\",\n- \"serde_derive\",\n\"serde_json\",\n\"serde_yaml\",\n\"thiserror\",\n@@ -2109,9 +2108,9 @@ checksum = \"2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c\"\n[[package]]\nname = \"openssl\"\n-version = \"0.10.28\"\n+version = \"0.10.29\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"973293749822d7dd6370d6da1e523b0d1db19f06c459134c658b2a4261378b52\"\n+checksum = \"cee6d85f4cb4c4f59a6a85d5b68a233d280c82e29e822913b9c8b129fbf20bdd\"\ndependencies = [\n\"bitflags\",\n\"cfg-if\",\n@@ -2129,9 +2128,9 @@ checksum = \"77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de\"\n[[package]]\nname = \"openssl-sys\"\n-version = \"0.9.54\"\n+version = \"0.9.55\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"1024c0a59774200a555087a6da3f253a9095a5f344e353b212ac4c8b8e450986\"\n+checksum = \"7717097d810a0f2e2323f9e5d11e71608355e24828410b55b9d4f18aa5f9a5d8\"\ndependencies = [\n\"autocfg 1.0.0\",\n\"cc\",\n@@ -2752,9 +2751,9 @@ checksum = \"388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3\"\n[[package]]\nname = \"serde\"\n-version = \"1.0.105\"\n+version = \"1.0.106\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff\"\n+checksum = \"36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399\"\ndependencies = [\n\"serde_derive\",\n]\n@@ -2780,9 +2779,9 @@ dependencies = [\n[[package]]\nname = \"serde_derive\"\n-version = \"1.0.105\"\n+version = \"1.0.106\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8\"\n+checksum = \"9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c\"\ndependencies = [\n\"proc-macro2\",\n\"quote\",\n@@ -3138,18 +3137,18 @@ dependencies = [\n[[package]]\nname = \"thiserror\"\n-version = \"1.0.13\"\n+version = \"1.0.15\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"e3711fd1c4e75b3eff12ba5c40dba762b6b65c5476e8174c1a664772060c49bf\"\n+checksum = \"54b3d3d2ff68104100ab257bb6bb0cb26c901abe4bd4ba15961f3bf867924012\"\ndependencies = [\n\"thiserror-impl\",\n]\n[[package]]\nname = \"thiserror-impl\"\n-version = \"1.0.13\"\n+version = \"1.0.15\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"ae2b85ba4c9aa32dd3343bd80eb8d22e9b54b7688c17ea3907f236885353b233\"\n+checksum = \"ca972988113b7715266f91250ddb98070d033c62a011fa0fcc57434a649310dd\"\ndependencies = [\n\"proc-macro2\",\n\"quote\",\n@@ -3224,9 +3223,9 @@ dependencies = [\n[[package]]\nname = \"tokio\"\n-version = \"0.2.14\"\n+version = \"0.2.18\"\nsource = \"registry+https://github.com/rust-lang/crates.io-index\"\n-checksum = \"2751672f9da075d045c67fdb0068be9850ab7b231fa77bb51d6fd35da9c0bb0d\"\n+checksum = \"34ef16d072d2b6dc8b4a56c70f5c5ced1a37752116f8e7c1e80c659aa7cb6713\"\ndependencies = [\n\"bytes 0.5.4\",\n\"fnv\",\n"
},
{
"change_type": "MODIFY",
"old_path": "Cargo.toml",
"new_path": "Cargo.toml",
"diff": "@@ -16,7 +16,7 @@ default-run = \"krustlet-wascc\"\n[dependencies]\nanyhow = \"1.0\"\ntokio = { version = \"0.2\", features = [\"macros\"] }\n-kube = \"0.31\"\n+kube = \"0.32\"\nenv_logger = \"0.7\"\nkubelet = { path = \"./crates/kubelet\", version = \"0.1.0\", features = [\"cli\"] }\nwascc-provider = { path = \"./crates/wascc-provider\", version = \"0.1.0\" }\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/Cargo.toml",
"new_path": "crates/kubelet/Cargo.toml",
"diff": "@@ -23,7 +23,7 @@ hyper = { version = \"0.13\", default-features = false, features = [\"stream\"] }\nlog = \"0.4\"\nreqwest = \"0.10\"\ntokio = { version = \"0.2\", features = [\"fs\", \"stream\", \"macros\"] }\n-kube = \"0.31\"\n+kube = \"0.32\"\nk8s-openapi = { version = \"0.7\", default-features = false, features = [\"v1_17\"] }\nchrono = { version = \"0.4\", features = [\"serde\"] }\nstructopt = { version = \"0.3\", features = [\"wrap_help\"], optional = true }\n@@ -34,7 +34,6 @@ thiserror = \"1.0\"\nlazy_static = \"1.4\"\noci-distribution = { path = \"../oci-distribution\", version = \"0.1.0\" }\n-\n[features]\ncli = [\"structopt\"]\ndocs = [\"cli\"]\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/kubelet.rs",
"new_path": "crates/kubelet/src/kubelet.rs",
"diff": "@@ -7,7 +7,7 @@ use crate::Provider;\nuse futures::{StreamExt, TryStreamExt};\nuse k8s_openapi::api::core::v1::Pod as KubePod;\n-use kube::{api::ListParams, runtime::Informer, Resource};\n+use kube::{api::ListParams, runtime::Informer, Api};\nuse log::{debug, warn};\nuse tokio::sync::Mutex;\n@@ -27,14 +27,14 @@ use std::sync::Arc;\n/// thread to thread during the course of the Kubelet's lifetime.\npub struct Kubelet<P> {\nprovider: Arc<Mutex<P>>,\n- kube_config: kube::config::Configuration,\n+ kube_config: kube::Config,\nconfig: Config,\n}\nimpl<T: 'static + Provider + Sync + Send> Kubelet<T> {\n/// Create a new Kubelet with a provider, a kubernetes configuration,\n/// and a kubelet configuration\n- pub fn new(provider: T, kube_config: kube::config::Configuration, config: Config) -> Self {\n+ pub fn new(provider: T, kube_config: kube::Config, config: Config) -> Self {\nSelf {\nprovider: Arc::new(Mutex::new(provider)),\nkube_config,\n@@ -47,7 +47,7 @@ impl<T: 'static + Provider + Sync + Send> Kubelet<T> {\n/// This will listen on the given address, and will also begin watching for Pod\n/// events, which it will handle.\npub async fn start(&self) -> anyhow::Result<()> {\n- let client = kube::Client::from(self.kube_config.clone());\n+ let client = kube::Client::new(self.kube_config.clone());\n// Create the node. If it already exists, \"adopt\" the node definition\ncreate_node(&client, &self.config, T::ARCH).await;\n@@ -72,7 +72,8 @@ impl<T: 'static + Provider + Sync + Send> Kubelet<T> {\nfield_selector: Some(node_selector),\n..Default::default()\n};\n- let informer = Informer::new(client, params, Resource::all::<KubePod>());\n+ let api = Api::<KubePod>::all(client);\n+ let informer = Informer::new(api).params(params);\nloop {\nlet mut stream = informer.poll().await.expect(\"informer poll failed\").boxed();\nwhile let Some(event) = stream.try_next().await.unwrap() {\n@@ -125,12 +126,9 @@ mod test {\nuse std::collections::BTreeMap;\nfn mock_client() -> kube::Client {\n- kube::config::Configuration {\n- base_path: \".\".to_string(),\n- client: reqwest::Client::new(),\n- default_ns: \" \".to_string(),\n- }\n- .into()\n+ kube::Client::new(kube::Config::new(\n+ reqwest::Url::parse(\"http://127.0.0.1:8080\").unwrap(),\n+ ))\n}\nstruct MockProvider;\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/lib.rs",
"new_path": "crates/kubelet/src/lib.rs",
"diff": "//! let provider = MyProvider;\n//!\n//! // Load a kubernetes configuration\n-//! let kubeconfig = kube::config::load_kube_config().await.unwrap();\n+//! let kubeconfig = kube::Config::infer().await.unwrap();\n//! // Get a configuration for the Kubelet\n//! let kubelet_config = Config::default();\n//!\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/node.rs",
"new_path": "crates/kubelet/src/node.rs",
"diff": "@@ -4,6 +4,7 @@ use k8s_openapi::api::coordination::v1::Lease;\nuse k8s_openapi::api::core::v1::Node;\nuse k8s_openapi::apimachinery::pkg::apis::meta::v1::Time;\nuse kube::api::{Api, DeleteParams, PatchParams, PostParams};\n+use kube::error::ErrorResponse;\nuse kube::Error;\nuse log::{debug, error, info};\n@@ -62,7 +63,7 @@ pub async fn create_node(client: &kube::Client, config: &Config, arch: &str) {\nlet node =\nserde_json::from_value(node).expect(\"failed to deserialize node from node definition JSON\");\n- match retry!(node_client.create(&PostParams::default(), &node).await, times: 4, break_on: &Error::Api(kube::ErrorResponse { code: 409, .. }))\n+ match retry!(node_client.create(&PostParams::default(), &node).await, times: 4, break_on: &Error::Api(ErrorResponse { code: 409, .. }))\n{\nOk(node) => {\nlet node_uid = node.metadata.unwrap().uid.unwrap();\n@@ -71,7 +72,7 @@ pub async fn create_node(client: &kube::Client, config: &Config, arch: &str) {\nreturn;\n}\n}\n- Err(Error::Api(kube::ErrorResponse { code: 409, .. })) => {\n+ Err(Error::Api(ErrorResponse { code: 409, .. })) => {\ndebug!(\n\"Node '{}' exists already. Going to fetch existing node...\",\n&config.node_name\n@@ -149,14 +150,14 @@ async fn create_lease(node_uid: &str, node_name: &str, client: &kube::Client) ->\nleases.create(&PostParams::default(), &lease).await,\ntimes: 4,\nlog_error: |e| debug!(\"Lease could not be created: {}. Retrying...\", e),\n- break_on: &Error::Api(kube::ErrorResponse { code: 409, .. })\n+ break_on: &Error::Api(ErrorResponse { code: 409, .. })\n);\nmatch resp {\nOk(_) => {\ndebug!(\"Created lease for node '{}'\", node_name);\nOk(())\n}\n- Err(Error::Api(kube::ErrorResponse { code: 409, .. })) => {\n+ Err(Error::Api(ErrorResponse { code: 409, .. })) => {\ndebug!(\"Lease already existed for node '{}'\", node_name);\nOk(())\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/provider.rs",
"new_path": "crates/kubelet/src/provider.rs",
"diff": "@@ -102,6 +102,7 @@ pub trait Provider {\nerror!(\"Event error: {}\", e);\nErr(e.into())\n}\n+ WatchEvent::Bookmark(_) => Ok(()),\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/Cargo.toml",
"new_path": "crates/wascc-provider/Cargo.toml",
"diff": "@@ -19,7 +19,7 @@ wascc-host = \"0.6\"\nlog = \"0.4\"\nserde = \"1.0\"\nserde_derive = \"1.0\"\n-kube = \"0.31\"\n+kube = \"0.32\"\nkubelet = { path = \"../kubelet\", version = \"0.1.0\" }\ntokio = { version = \"0.2\", features = [\"fs\", \"macros\"] }\nchrono = { version = \"0.4\", features = [\"serde\"] }\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wascc-provider/src/lib.rs",
"new_path": "crates/wascc-provider/src/lib.rs",
"diff": "//! let store = FileModuleStore::new(client, &std::path::PathBuf::from(\"\"));\n//!\n//! // Load a kubernetes configuration\n-//! let kubeconfig = kube::config::load_kube_config().await.unwrap();\n+//! let kubeconfig = kube::Config::infer().await.unwrap();\n//!\n//! // Instantiate the provider type\n//! let provider = WasccProvider::new(store, &kubelet_config, kubeconfig.clone()).await.unwrap();\n@@ -102,7 +102,7 @@ pub struct WasccProvider<S> {\nhandles: Arc<RwLock<HashMap<String, PodHandle<ActorStopper, File>>>>,\nstore: S,\nlog_path: PathBuf,\n- kubeconfig: kube::config::Configuration,\n+ kubeconfig: kube::Config,\nhost: Arc<Mutex<WasccHost>>,\n}\n@@ -112,7 +112,7 @@ impl<S: ModuleStore + Send + Sync> WasccProvider<S> {\npub async fn new(\nstore: S,\nconfig: &kubelet::config::Config,\n- kubeconfig: kube::config::Configuration,\n+ kubeconfig: kube::Config,\n) -> anyhow::Result<Self> {\nlet host = Arc::new(Mutex::new(WasccHost::new()));\nlet log_path = config.data_dir.join(LOG_DIR_NAME);\n@@ -179,7 +179,7 @@ impl<S: ModuleStore + Send + Sync> Provider for WasccProvider<S> {\ninfo!(\"Starting containers for pod {:?}\", pod.name());\nlet mut modules = self.store.fetch_pod_modules(&pod).await?;\nlet mut container_handles = HashMap::new();\n- let client = kube::Client::from(self.kubeconfig.clone());\n+ let client = kube::Client::new(self.kubeconfig.clone());\nfor container in pod.containers() {\nlet env = Self::env_vars(&container, &pod, &client).await;\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wasi-provider/Cargo.toml",
"new_path": "crates/wasi-provider/Cargo.toml",
"diff": "@@ -15,7 +15,7 @@ edition = \"2018\"\n[dependencies]\nanyhow = \"1.0\"\nasync-trait = \"0.1\"\n-kube = \"0.31\"\n+kube = \"0.32\"\nlog = \"0.4\"\nwasmtime = \"0.15\"\nwasmtime-wasi = \"0.15\"\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/wasi-provider/src/lib.rs",
"new_path": "crates/wasi-provider/src/lib.rs",
"diff": "//! let store = FileModuleStore::new(client, &std::path::PathBuf::from(\"\"));\n//!\n//! // Load a kubernetes configuration\n-//! let kubeconfig = kube::config::load_kube_config().await.unwrap();\n+//! let kubeconfig = kube::Config::infer().await.unwrap();\n//!\n//! // Instantiate the provider type\n//! let provider = WasiProvider::new(store, &kubelet_config, kubeconfig.clone()).await.unwrap();\n@@ -56,7 +56,7 @@ pub struct WasiProvider<S> {\nhandles: Arc<RwLock<HashMap<String, PodHandle<HandleStopper, File>>>>,\nstore: S,\nlog_path: PathBuf,\n- kubeconfig: kube::config::Configuration,\n+ kubeconfig: kube::Config,\n}\nimpl<S: ModuleStore + Send + Sync> WasiProvider<S> {\n@@ -64,7 +64,7 @@ impl<S: ModuleStore + Send + Sync> WasiProvider<S> {\npub async fn new(\nstore: S,\nconfig: &kubelet::config::Config,\n- kubeconfig: kube::config::Configuration,\n+ kubeconfig: kube::Config,\n) -> anyhow::Result<Self> {\nlet log_path = config.data_dir.to_path_buf().join(LOG_DIR_NAME);\ntokio::fs::create_dir_all(&log_path).await?;\n@@ -106,7 +106,7 @@ impl<S: ModuleStore + Send + Sync> Provider for WasiProvider<S> {\nlet mut container_handles = HashMap::new();\nlet mut modules = self.store.fetch_pod_modules(&pod).await?;\n- let client = kube::Client::from(self.kubeconfig.clone());\n+ let client = kube::Client::new(self.kubeconfig.clone());\ninfo!(\"Starting containers for pod {:?}\", pod_name);\nfor container in pod.containers() {\nlet env = Self::env_vars(&container, &pod, &client).await;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/krustlet-wascc.rs",
"new_path": "src/krustlet-wascc.rs",
"diff": "-use kube::config;\nuse kubelet::config::Config;\nuse kubelet::module_store::FileModuleStore;\nuse kubelet::Kubelet;\n@@ -10,12 +9,7 @@ async fn main() -> anyhow::Result<()> {\n// a new Kubelet, all you need to implement is a provider.\nlet config = Config::new_from_flags(env!(\"CARGO_PKG_VERSION\"));\n- // Read the environment. Note that this tries a KubeConfig file first, then\n- // falls back on an in-cluster configuration.\n- let kubeconfig = config::load_kube_config()\n- .await\n- .or_else(|_| config::incluster_config())\n- .expect(\"kubeconfig failed to load\");\n+ let kubeconfig = kube::Config::infer().await?;\n// Initialize the logger\nenv_logger::init();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/krustlet-wasi.rs",
"new_path": "src/krustlet-wasi.rs",
"diff": "@@ -9,11 +9,7 @@ async fn main() -> anyhow::Result<()> {\n// a new Kubelet, all you need to implement is a provider.\nlet config = Config::new_from_flags(env!(\"CARGO_PKG_VERSION\"));\n- // Read the environment. Note that this tries a KubeConfig file first, then\n- // falls back on an in-cluster configuration.\n- let kubeconfig = kube::config::load_kube_config()\n- .await\n- .or_else(|_| kube::config::incluster_config())?;\n+ let kubeconfig = kube::Config::infer().await?;\n// Initialize the logger\nenv_logger::init();\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/integration_tests.rs",
"new_path": "tests/integration_tests.rs",
"diff": "use futures::{StreamExt, TryStreamExt};\nuse k8s_openapi::api::core::v1::{Node, Pod, Taint};\nuse kube::{\n- api::{Api, DeleteParams, ListParams, LogParams, PostParams, Resource, WatchEvent},\n- config,\n+ api::{Api, DeleteParams, ListParams, LogParams, PostParams, WatchEvent},\nruntime::Informer,\n};\nuse serde_json::json;\n#[tokio::test]\nasync fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\n- // Read the environment. Note that this tries a KubeConfig file first, then\n- // falls back on an in-cluster configuration.\n- let kubeconfig = config::load_kube_config()\n- .await\n- .or_else(|_| config::incluster_config())?;\n-\n- let client = kube::Client::from(kubeconfig);\n+ let client = kube::Client::try_default().await?;\n- let nodes: Api<Node> = Api::all(client.clone());\n+ let nodes: Api<Node> = Api::all(client);\nlet node = nodes.get(\"krustlet-wascc\").await?;\nlet node_status = node.status.expect(\"node reported no status\");\n@@ -61,6 +54,7 @@ async fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\n}\n);\n+ let client: kube::Client = nodes.into();\nlet pods: Api<Pod> = Api::namespaced(client.clone(), \"default\");\nlet p = serde_json::from_value(json!({\n\"apiVersion\": \"v1\",\n@@ -90,12 +84,11 @@ async fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\nassert_eq!(pod.status.unwrap().phase.unwrap(), \"Pending\");\n- let inf: Informer<Pod> = Informer::new(\n- client,\n+ let api = Api::namespaced(client, \"default\");\n+ let inf: Informer<Pod> = Informer::new(api).params(\nListParams::default()\n.fields(\"metadata.name=greet-wascc\")\n.timeout(30),\n- Resource::namespaced::<Pod>(\"default\"),\n);\nlet mut watcher = inf.poll().await?.boxed();\n@@ -140,15 +133,9 @@ async fn test_wascc_provider() -> Result<(), Box<dyn std::error::Error>> {\n#[tokio::test]\nasync fn test_wasi_provider() -> Result<(), Box<dyn std::error::Error>> {\n- // Read the environment. Note that this tries a KubeConfig file first, then\n- // falls back on an in-cluster configuration.\n- let kubeconfig = config::load_kube_config()\n- .await\n- .or_else(|_| config::incluster_config())?;\n-\n- let client = kube::Client::from(kubeconfig);\n+ let client = kube::Client::try_default().await?;\n- let nodes: Api<Node> = Api::all(client.clone());\n+ let nodes: Api<Node> = Api::all(client);\nlet node = nodes.get(\"krustlet-wasi\").await?;\n@@ -193,6 +180,7 @@ async fn test_wasi_provider() -> Result<(), Box<dyn std::error::Error>> {\n}\n);\n+ let client: kube::Client = nodes.into();\nlet pods: Api<Pod> = Api::namespaced(client.clone(), \"default\");\nlet p = serde_json::from_value(json!({\n\"apiVersion\": \"v1\",\n@@ -222,12 +210,11 @@ async fn test_wasi_provider() -> Result<(), Box<dyn std::error::Error>> {\nassert_eq!(pod.status.unwrap().phase.unwrap(), \"Pending\");\n- let inf: Informer<Pod> = Informer::new(\n- client,\n+ let api = Api::namespaced(client.clone(), \"default\");\n+ let inf: Informer<Pod> = Informer::new(api).params(\nListParams::default()\n.fields(\"metadata.name=hello-wasi\")\n.timeout(30),\n- Resource::namespaced::<Pod>(\"default\"),\n);\nlet mut watcher = inf.poll().await?.boxed();\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Update kube to 0.32 (#189)
* Update kube to 0.32
* Hack to get e2e tests working
* Fix typo in e2e tests
* Update integration_tests.rs
Remove delay in integration tests |
350,418 | 19.04.2020 17:18:51 | 25,200 | 1f08443071745499d1a203eade4c97eeb7610357 | Allow specifying labels
Add `--node-labels` to the command line option which appends extra
labels when registering a Krustlet node. | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/config.rs",
"new_path": "crates/kubelet/src/config.rs",
"diff": "@@ -11,6 +11,8 @@ use rpassword;\n#[cfg(feature = \"cli\")]\nuse structopt::StructOpt;\n+use std::collections::HashMap;\n+\nconst DEFAULT_PORT: u16 = 3000;\n/// The configuration needed for a kubelet to run properly.\n@@ -33,6 +35,8 @@ pub struct Config {\npub server_config: ServerConfig,\n/// The directory where the Kubelet will store data\npub data_dir: PathBuf,\n+ /// Labels to add when registering the node in the cluster\n+ pub node_labels: HashMap<String, String>,\n}\n/// The configuration for the Kubelet server.\n#[derive(Clone, Debug)]\n@@ -58,6 +62,7 @@ impl Config {\nOk(Config {\nnode_ip: default_node_ip(&mut hostname.clone(), preferred_ip_family)?,\nnode_name: sanitize_hostname(&hostname),\n+ node_labels: HashMap::new(),\nhostname,\ndata_dir: default_data_dir()?,\nserver_config: ServerConfig {\n@@ -97,6 +102,15 @@ impl Config {\nlet node_name = opts\n.node_name\n.unwrap_or_else(|| sanitize_hostname(&hostname));\n+\n+ let node_labels = opts\n+ .node_labels\n+ .unwrap_or_default()\n+ .split(',')\n+ .map(|i| split_one_label(i))\n+ .filter_map(Result::ok)\n+ .collect();\n+\nlet port = opts.port;\nlet pfx_path = opts.pfx_path.unwrap_or_else(default_pfx_path);\n@@ -110,6 +124,7 @@ impl Config {\nConfig {\nnode_ip,\nnode_name,\n+ node_labels,\nhostname,\ndata_dir,\nserver_config: ServerConfig {\n@@ -184,6 +199,13 @@ pub struct Opts {\n)]\nnode_ip: Option<IpAddr>,\n+ #[structopt(\n+ long = \"node-labels\",\n+ env = \"KRUSTLET_NODE_LABELS\",\n+ help = \"Labels to add when registering the node in the cluster. Labels must be key-value pairs separated by ','\"\n+ )]\n+ node_labels: Option<String>,\n+\n#[structopt(\nlong = \"hostname\",\nenv = \"KRUSTLET_HOSTNAME\",\n@@ -271,3 +293,12 @@ fn read_password_from_tty() -> String {\nlet password = rpassword::read_password_from_tty(Some(\"PFX file password: \")).unwrap();\nreturn password;\n}\n+\n+fn split_one_label(in_string: &str) -> Result<(String, String), String> {\n+ let mut splitter = in_string.splitn(2, '=');\n+ let key = splitter.next().unwrap();\n+ match splitter.next() {\n+ Some(val) => Ok((key.to_string(), val.to_string())),\n+ None => Err(\"empty label\".to_string()),\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/node.rs",
"new_path": "crates/kubelet/src/node.rs",
"diff": "@@ -244,7 +244,7 @@ async fn replace_node(client: &kube::Client, node_name: &str, node: &Node) -> Re\n/// use seems like a misstep. Ideally, we'll be able to support multiple runtimes.\nfn node_definition(config: &Config, arch: &str) -> serde_json::Value {\nlet ts = Time(Utc::now());\n- serde_json::json!({\n+ let mut json = serde_json::json!({\n\"apiVersion\": \"v1\",\n\"kind\": \"Node\",\n\"metadata\": {\n@@ -336,7 +336,14 @@ fn node_definition(config: &Config, arch: &str) -> serde_json::Value {\n}\n}\n}\n- })\n+ });\n+\n+ // extra labels from config\n+ for (key, val) in &config.node_labels {\n+ json[\"metadata\"][\"labels\"][key] = serde_json::json!(val);\n+ }\n+\n+ json\n}\n/// Define a new coordination.Lease object for Kubernetes\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Allow specifying labels
Add `--node-labels` to the command line option which appends extra
labels when registering a Krustlet node. |
350,418 | 19.04.2020 18:03:05 | 25,200 | 2c89eb55f08355a2d76704c510ce29b18e0d1c41 | Update docs with `--node-labels` | [
{
"change_type": "MODIFY",
"old_path": "docs/howto/assets/eks/files/bootstrap.sh",
"new_path": "docs/howto/assets/eks/files/bootstrap.sh",
"diff": "@@ -7,6 +7,35 @@ set -o pipefail\nset -o nounset\nset -o errexit\n+function print_help {\n+ echo \"usage: $0 [options] <cluster-name>\"\n+ echo \"Bootstraps a Krustlet instance into an EKS cluster\"\n+ echo \"\"\n+ echo \"-h,--help print this help\"\n+ echo \"--krustlet-node-labels Add extra labels to Krustlet.\"\n+}\n+\n+while [[ $# -gt 0 ]]; do\n+ key=\"$1\"\n+ case $key in\n+ -h|--help)\n+ print_help\n+ exit 1\n+ ;;\n+ --krustlet-node-labels)\n+ KRUSTLET_NODE_LABELS=$2\n+ shift\n+ shift\n+ ;;\n+ *) # unknown option\n+ print_help\n+ exit 1\n+ ;;\n+ esac\n+done\n+\n+KRUSTLET_NODE_LABELS=\"${KRUSTLET_NODE_LABELS:-}\"\n+\necho \"Generating certificate signing request...\"\nopenssl req -new -sha256 -newkey rsa:2048 -keyout /tmp/krustlet.key -out /tmp/krustlet.csr -nodes -config <(\ncat <<-EOF\n@@ -101,6 +130,13 @@ chmod 640 /etc/krustlet/cert.pfx\nrm /tmp/krustlet.key /tmp/krustlet.csr /tmp/krustlet.cert\n+if [[ -n \"$KRUSTLET_NODE_LABELS\" ]]; then\n+ cat <<EOF > /etc/eksctl/krustlet.local.env\n+KRUSTLET_NODE_LABELS=$KRUSTLET_NODE_LABELS\n+EOF\n+fi\n+chown root:root /etc/eksctl/krustlet.local.env\n+\necho \"Starting krustlet service...\"\nsystemctl daemon-reload\nsystemctl enable krustlet\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/howto/assets/eks/files/krustlet.service",
"new_path": "docs/howto/assets/eks/files/krustlet.service",
"diff": "Description=Krustlet - a kubelet implementation for running WebAssembly\n[Service]\n+# Global and static parameters: KRUSTLET_NODE_LABELS\n+EnvironmentFile=/etc/eksctl/krustlet.local.env\nEnvironment=KUBECONFIG=/etc/eksctl/kubeconfig.yaml\nEnvironment=PFX_PATH=/etc/krustlet/cert.pfx\nEnvironment=PFX_PASSWORD=krustlet\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/howto/krustlet-on-eks.md",
"new_path": "docs/howto/krustlet-on-eks.md",
"diff": "@@ -67,7 +67,7 @@ nodeGroups:\ndesiredCapacity: 2\nssh:\nallow: true\n- overrideBootstrapCommand: /etc/eks/bootstrap.sh\n+ overrideBootstrapCommand: /etc/eks/bootstrap.sh --krustlet-node-labels \"alpha.eksctl.io/cluster-name=krustlet-demo,alpha.eksctl.io/nodegroup-name=krustlet\"\n```\nThis will create a EKS cluster named `krustlet-demo` with a single unmanaged node group named `krustlet` with two `t3.small` nodes.\n@@ -100,18 +100,6 @@ ip-192-168-44-27.us-west-2.compute.internal Ready agent 17s v1.17.0\nYou should see two nodes with different names in the output.\n-`eksctl` expects the nodes to have been created with specific labels it uses to manage the nodes.\n-\n-Currently [Krustlet does not add these labels](https://github.com/deislabs/krustlet/issues/184) when it registers the node with the Kubernetes API server.\n-\n-To fix this, run `kubectl` to manually apply the labels to the nodes, where `$NODE_NAME` should be replaced with each of the two node names:\n-\n-```bash\n-$ kubectl label nodes $NODE_NAME alpha.eksctl.io/cluster-name=krustlet-demo alpha.eksctl.io/nodegroup-name=krustlet\n-```\n-\n-Once the labels are applied to the nodes, the `eksctl` command should continue and complete successfully.\n-\n## Running a WebAssembly application\nLet's deploy a demo WebAssembly application to the cluster:\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Update docs with `--node-labels` |
350,418 | 19.04.2020 21:46:51 | 25,200 | 37ae3f02311a09b3ffd131ca82fab7c469f6d91b | Rename KRUSTLET_NODE_LABELS to NODE_LABELS | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/config.rs",
"new_path": "crates/kubelet/src/config.rs",
"diff": "@@ -201,7 +201,7 @@ pub struct Opts {\n#[structopt(\nlong = \"node-labels\",\n- env = \"KRUSTLET_NODE_LABELS\",\n+ env = \"NODE_LABELS\",\nhelp = \"Labels to add when registering the node in the cluster. Labels must be key-value pairs separated by ','\"\n)]\nnode_labels: Option<String>,\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/howto/assets/eks/files/bootstrap.sh",
"new_path": "docs/howto/assets/eks/files/bootstrap.sh",
"diff": "@@ -23,7 +23,7 @@ while [[ $# -gt 0 ]]; do\nexit 1\n;;\n--krustlet-node-labels)\n- KRUSTLET_NODE_LABELS=$2\n+ NODE_LABELS=$2\nshift\nshift\n;;\n@@ -34,7 +34,7 @@ while [[ $# -gt 0 ]]; do\nesac\ndone\n-KRUSTLET_NODE_LABELS=\"${KRUSTLET_NODE_LABELS:-}\"\n+NODE_LABELS=\"${NODE_LABELS:-}\"\necho \"Generating certificate signing request...\"\nopenssl req -new -sha256 -newkey rsa:2048 -keyout /tmp/krustlet.key -out /tmp/krustlet.csr -nodes -config <(\n@@ -130,9 +130,9 @@ chmod 640 /etc/krustlet/cert.pfx\nrm /tmp/krustlet.key /tmp/krustlet.csr /tmp/krustlet.cert\n-if [[ -n \"$KRUSTLET_NODE_LABELS\" ]]; then\n+if [[ -n \"$NODE_LABELS\" ]]; then\ncat <<EOF > /etc/eksctl/krustlet.local.env\n-KRUSTLET_NODE_LABELS=$KRUSTLET_NODE_LABELS\n+NODE_LABELS=$NODE_LABELS\nEOF\nfi\nchown root:root /etc/eksctl/krustlet.local.env\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/howto/assets/eks/files/krustlet.service",
"new_path": "docs/howto/assets/eks/files/krustlet.service",
"diff": "Description=Krustlet - a kubelet implementation for running WebAssembly\n[Service]\n-# Global and static parameters: KRUSTLET_NODE_LABELS\n+# Global and static parameters: NODE_LABELS\nEnvironmentFile=/etc/eksctl/krustlet.local.env\nEnvironment=KUBECONFIG=/etc/eksctl/kubeconfig.yaml\nEnvironment=PFX_PATH=/etc/krustlet/cert.pfx\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Rename KRUSTLET_NODE_LABELS to NODE_LABELS |
350,418 | 19.04.2020 21:53:44 | 25,200 | c9addd12abbc1276dd7ff19cc86f9511104ecdf6 | Allow label with empty value | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/config.rs",
"new_path": "crates/kubelet/src/config.rs",
"diff": "@@ -297,8 +297,12 @@ fn read_password_from_tty() -> String {\nfn split_one_label(in_string: &str) -> Result<(String, String), String> {\nlet mut splitter = in_string.splitn(2, '=');\nlet key = splitter.next().unwrap();\n+ if key.is_empty() {\n+ Err(\"error splitting label\".to_string())\n+ } else {\nmatch splitter.next() {\nSome(val) => Ok((key.to_string(), val.to_string())),\n- None => Err(\"empty label\".to_string()),\n+ None => Ok((key.to_string(), \"\".to_string())),\n+ }\n}\n}\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Allow label with empty value |
350,418 | 20.04.2020 14:40:50 | 25,200 | f71f6e1f039710716127d1c38216c8b352b4e0c7 | Parse comma separate values from flags with `use_delimiter`
See | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/config.rs",
"new_path": "crates/kubelet/src/config.rs",
"diff": "@@ -105,8 +105,7 @@ impl Config {\nlet node_labels = opts\n.node_labels\n- .unwrap_or_default()\n- .split(',')\n+ .iter()\n.map(|i| split_one_label(i))\n.filter_map(Result::ok)\n.collect();\n@@ -202,9 +201,10 @@ pub struct Opts {\n#[structopt(\nlong = \"node-labels\",\nenv = \"NODE_LABELS\",\n+ use_delimiter = true,\nhelp = \"Labels to add when registering the node in the cluster. Labels must be key-value pairs separated by ','\"\n)]\n- node_labels: Option<String>,\n+ node_labels: Vec<String>,\n#[structopt(\nlong = \"hostname\",\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Parse comma separate values from flags with `use_delimiter`
See https://github.com/TeXitoi/structopt/issues/367#issuecomment-606619645 |
350,418 | 19.04.2020 23:31:44 | 25,200 | e7f407aed8501027830054c5800fa5d976126df7 | Allow building Krustlet from a different repo for EKS
Allow passing in `KRUSTLET_VERSION` & `KRUSTLET_SRC` when building
Krustlet AMI with packer. | [
{
"change_type": "MODIFY",
"old_path": "docs/howto/assets/eks/Makefile",
"new_path": "docs/howto/assets/eks/Makefile",
"diff": "PACKER_BINARY ?= packer\n-PACKER_VARIABLES := aws_region ami_name krustlet_version source_ami_id source_ami_owners arch instance_type security_group_id\n+PACKER_VARIABLES := aws_region ami_name krustlet_version krustlet_src source_ami_id source_ami_owners arch instance_type security_group_id\naws_region ?= $(AWS_DEFAULT_REGION)\nami_name ?= amazon-eks-node-krustlet-$(KRUSTLET_VERSION)-v$(shell date +'%Y%m%d')\n@@ -14,6 +14,11 @@ ifeq ($(aws_region), cn-northwest-1)\nsource_ami_owners ?= 141808717104\nendif\n+KRUSTLET_VERSION ?= 0.1.0\n+krustlet_version ?= $(KRUSTLET_VERSION)\n+KRUSTLET_SRC ?= https://github.com/deislabs/krustlet/archive/v$(krustlet_version).tar.gz\n+krustlet_src ?= $(KRUSTLET_SRC)\n+\nT_RED := \\e[0;31m\nT_GREEN := \\e[0;32m\nT_YELLOW := \\e[0;33m\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/howto/assets/eks/eks-worker-al2.json",
"new_path": "docs/howto/assets/eks/eks-worker-al2.json",
"diff": "\"aws_session_token\": \"{{env `AWS_SESSION_TOKEN`}}\",\n\"krustlet_version\": \"0.1.0\",\n+ \"krustlet_src\": \"https://github.com/deislabs/krustlet/archive/v{{ user `krustlet_version`}}.tar.gz\",\n\"source_ami_id\": \"\",\n\"source_ami_owners\": \"137112412989\",\n\"script\": \"{{template_dir}}/scripts/install-worker.sh\",\n\"environment_vars\": [\n\"KRUSTLET_VERSION={{user `krustlet_version`}}\",\n+ \"KRUSTLET_SRC={{user `krustlet_src`}}\",\n\"AWS_ACCESS_KEY_ID={{user `aws_access_key_id`}}\",\n\"AWS_SECRET_ACCESS_KEY={{user `aws_secret_access_key`}}\",\n\"AWS_SESSION_TOKEN={{user `aws_session_token`}}\"\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/howto/assets/eks/scripts/install-worker.sh",
"new_path": "docs/howto/assets/eks/scripts/install-worker.sh",
"diff": "@@ -22,6 +22,7 @@ validate_env_set() {\n}\nvalidate_env_set KRUSTLET_VERSION\n+validate_env_set KRUSTLET_SRC\n################################################################################\n### Machine Architecture #######################################################\n@@ -105,11 +106,16 @@ export PATH=$PATH:$HOME/.cargo/bin\n# Build krustlet to link against the system libssl\n# Amazon Linux has an older openssl version than the krustlet release binary\n# TODO: make the krustlet to build (wasi or wascc) configurable\n+echo \"Downloading Krustlet source from $KRUSTLET_SRC\"\n+curl $KRUSTLET_SRC -L -o /tmp/krustlet.tar.gz\n+\n+echo \"Unzipping Krustlet source\"\nmkdir /tmp/krustlet\n-git clone https://github.com/deislabs/krustlet /tmp/krustlet\n+tar xvzf /tmp/krustlet.tar.gz --strip=1 -C /tmp/krustlet\n+\ncargo build --release --manifest-path /tmp/krustlet/Cargo.toml --bin krustlet-wasi\nsudo mv /tmp/krustlet/target/release/krustlet-wasi /usr/local/bin/krustlet\n-rm -rf /tmp/krustlet\n+rm -rf /tmp/krustlet /tmp/krustlet.tar.gz\nsudo chown root:root /usr/local/bin/krustlet\nsudo chmod 755 /usr/local/bin/krustlet\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/howto/krustlet-on-eks.md",
"new_path": "docs/howto/krustlet-on-eks.md",
"diff": "@@ -27,6 +27,13 @@ $ cd docs/howto/assets/eks\n$ make\n```\n+You can also build the AMI with a different version of Krustlet from a forked repo. For example:\n+\n+```bash\n+$ cd docs/howto/assets/eks\n+$ KRUSTLET_VERSION=$(git rev-parse --short HEAD) KRUSTLET_SRC=https://github.com/jingweno/krustlet/archive/$(git rev-parse --short HEAD).tar.gz make krustlet\n+```\n+\nThis command will take a while to build Krustlet from source on the EC2 instance.\nIn the future, a prebuilt binary for Amazon Linux 2 might be available that would speed up the AMI creation process.\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | Allow building Krustlet from a different repo for EKS
Allow passing in `KRUSTLET_VERSION` & `KRUSTLET_SRC` when building
Krustlet AMI with packer. |
350,415 | 20.04.2020 20:51:00 | 14,400 | 0536ddeeaef4094168cebf00e18cb1f565172d89 | add node label constraints per man page
change to to_owned on static strings
warn for namespace constraint violations
add unit tests
inline configuration
improve key casting
borrow rather than clone node_labels
convert vecs to static arrays | [
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/config.rs",
"new_path": "crates/kubelet/src/config.rs",
"diff": "@@ -200,7 +200,15 @@ pub struct Opts {\nlong = \"node-labels\",\nenv = \"NODE_LABELS\",\nuse_delimiter = true,\n- help = \"Labels to add when registering the node in the cluster. Labels must be key-value pairs separated by ','\"\n+ help = \"Labels to add when registering the node in the cluster.\n+ Labels must be key=value pairs separated by ','.\n+ Labels in the 'kubernetes.io' namespace must begin with an allowed prefix\n+ (kubelet.kubernetes.io, node.kubernetes.io) or be in the specifically allowed set\n+ (beta.kubernetes.io/arch, beta.kubernetes.io/instance-type, beta.kubernetes.io/os,\n+ failure-domain.beta.kubernetes.io/region, failure-domain.beta.kubernetes.io/zone,\n+ failure-domain.kubernetes.io/region, failure-domain.kubernetes.io/zone,\n+ kubernetes.io/arch, kubernetes.io/hostname, kubernetes.io/instance-type,\n+ kubernetes.io/os)\"\n)]\nnode_labels: Vec<String>,\n"
},
{
"change_type": "MODIFY",
"old_path": "crates/kubelet/src/node.rs",
"new_path": "crates/kubelet/src/node.rs",
"diff": "@@ -6,7 +6,8 @@ use k8s_openapi::apimachinery::pkg::apis::meta::v1::Time;\nuse kube::api::{Api, DeleteParams, PatchParams, PostParams};\nuse kube::error::ErrorResponse;\nuse kube::Error;\n-use log::{debug, error, info};\n+use log::{debug, error, info, warn};\n+use std::collections::HashMap;\nmacro_rules! retry {\n($action:expr, times: $num_times:expr, error: $on_err:expr) => {{\n@@ -249,15 +250,7 @@ fn node_definition(config: &Config, arch: &str) -> serde_json::Value {\n\"kind\": \"Node\",\n\"metadata\": {\n\"name\": config.node_name,\n- \"labels\": {\n- \"beta.kubernetes.io/arch\": arch,\n- \"beta.kubernetes.io/os\": \"linux\",\n- \"kubernetes.io/arch\": arch,\n- \"kubernetes.io/os\": \"linux\",\n- \"kubernetes.io/hostname\": config.hostname,\n- \"kubernetes.io/role\": \"agent\",\n- \"type\": \"krustlet\"\n- },\n+ \"labels\": {},\n\"annotations\": {\n\"node.alpha.kubernetes.io/ttl\": \"0\",\n\"volumes.kubernetes.io/controller-managed-attach-detach\": \"true\"\n@@ -338,8 +331,9 @@ fn node_definition(config: &Config, arch: &str) -> serde_json::Value {\n}\n});\n+ let node_labels = node_labels_definition(arch, &config);\n// extra labels from config\n- for (key, val) in &config.node_labels {\n+ for (key, val) in node_labels {\njson[\"metadata\"][\"labels\"][key] = serde_json::json!(val);\n}\n@@ -389,3 +383,120 @@ fn lease_spec_definition(node_name: &str) -> serde_json::Value {\n}\n)\n}\n+\n+/// Defines the labels that will be applied to this node\n+///\n+/// Default values and passed node-labels arguments are injected by config.\n+fn node_labels_definition(arch: &str, config: &Config) -> HashMap<String, String> {\n+ // Add mandatory static labels\n+ let mut labels = HashMap::new();\n+ labels.insert(\"beta.kubernetes.io/os\".to_owned(), \"linux\".to_owned());\n+ labels.insert(\"kubernetes.io/os\".to_owned(), \"linux\".to_owned());\n+ labels.insert(\"kubernetes.io/role\".to_owned(), \"agent\".to_owned());\n+ labels.insert(\"type\".to_owned(), \"krustlet\".to_owned());\n+ // add the mandatory labels that are dependent on injected values\n+ labels.insert(\"beta.kubernetes.io/arch\".to_owned(), arch.to_owned());\n+ labels.insert(\"kubernetes.io/arch\".to_owned(), arch.to_owned());\n+ labels.insert(\n+ \"kubernetes.io/hostname\".to_owned(),\n+ config.hostname.to_owned(),\n+ );\n+\n+ let k8s_namespace = \"kubernetes.io\";\n+ // namespaces managed by this method - do not allow user injection\n+ let managed_namespace_labels = [\n+ \"beta.kubernetes.io/arch\",\n+ \"beta.kubernetes.io/os\",\n+ \"kubernetes.io/arch\",\n+ \"kubernetes.io/hostname\",\n+ \"kubernetes.io/os\",\n+ \"kubernetes.io/role\",\n+ \"type\",\n+ ];\n+ let allowed_k8s_namespace_labels = [\n+ \"beta.kubernetes.io/instance-type\",\n+ \"failure-domain.beta.kubernetes.io/region\",\n+ \"failure-domain.beta.kubernetes.io/zone\",\n+ \"failure-domain.kubernetes.io/region\",\n+ \"failure-domain.kubernetes.io/zone\",\n+ \"kubernetes.io/instance-type\",\n+ ];\n+\n+ // Attempt to append node labels from passed arguments.\n+ // First, check for managed namespace and log exclusion\n+ // Next, check if label contains k8s namespace and ensure it's allowable\n+ // Else, if not k8s namspace, insert\n+ let user_labels = &config.node_labels;\n+\n+ for (key, value) in user_labels.iter() {\n+ if managed_namespace_labels.contains(&key.as_str()) {\n+ warn!(\n+ \"User provided node label {} omitted. Namespace label managed by runtime.\",\n+ key\n+ );\n+ } else if key.contains(k8s_namespace)\n+ && !key.starts_with(\"kubelet.kubernetes.io\")\n+ && !key.starts_with(\"node.kubernetes.io\")\n+ && !allowed_k8s_namespace_labels.contains(&key.as_str())\n+ {\n+ warn!(\n+ \"User provided node label {} omitted. Namespace violates constraints.\",\n+ key\n+ );\n+ } else {\n+ labels.insert(key.to_owned(), value.to_owned());\n+ }\n+ }\n+ labels\n+}\n+\n+#[cfg(test)]\n+mod test {\n+ use super::*;\n+ use crate::config::{Config, ServerConfig};\n+ use std::net::{IpAddr, Ipv4Addr};\n+ use std::path::PathBuf;\n+\n+ #[test]\n+ fn test_node_labels_definition() {\n+ let mut node_labels = HashMap::new();\n+ node_labels.insert(\"foo\".to_owned(), \"custom\".to_owned());\n+ node_labels.insert(\n+ \"kubelet.kubernetes.io/allowed-prefix\".to_owned(),\n+ \"prefix\".to_owned(),\n+ );\n+ node_labels.insert(\n+ \"not-allowed.kubernetes.io\".to_owned(),\n+ \"not-allowed\".to_owned(),\n+ );\n+ node_labels.insert(\n+ \"kubernetes.io/instance-type\".to_owned(),\n+ \"allowed\".to_owned(),\n+ );\n+ node_labels.insert(\"beta.kubernetes.io/os\".to_owned(), \"managed\".to_owned());\n+\n+ let config = Config {\n+ node_ip: IpAddr::from(Ipv4Addr::new(127, 0, 0, 1)),\n+ hostname: String::from(\"foo\"),\n+ node_name: String::from(\"bar\"),\n+ server_config: ServerConfig {\n+ addr: IpAddr::from(Ipv4Addr::new(127, 0, 0, 1)),\n+ port: 8080,\n+ pfx_password: String::new(),\n+ pfx_path: PathBuf::new(),\n+ },\n+ data_dir: PathBuf::new(),\n+ node_labels,\n+ };\n+\n+ let result = node_labels_definition(\"linux\", &config);\n+\n+ assert!(result.contains_key(\"kubernetes.io/role\"));\n+ assert!(result.contains_key(\"foo\"));\n+ assert!(result.contains_key(\"kubelet.kubernetes.io/allowed-prefix\"));\n+ assert!(!result.contains_key(\"not-allowed.kubernetes.io\"));\n+ assert!(result.contains_key(\"kubernetes.io/instance-type\"));\n+ assert!(!result.get(\"beta.kubernetes.io/os\").unwrap().eq(\"managed\"));\n+ assert!(result.get(\"beta.kubernetes.io/os\").unwrap().eq(\"linux\"));\n+ }\n+}\n"
}
] | Rust | Apache License 2.0 | krustlet/krustlet | add node label constraints per man page
change to to_owned on static strings
warn for namespace constraint violations
add unit tests
inline configuration
improve key casting
borrow rather than clone node_labels
convert vecs to static arrays |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.