diff --git a/.circleci/config.pkl b/.circleci/config.pkl new file mode 100644 index 0000000..1e77eb7 --- /dev/null +++ b/.circleci/config.pkl @@ -0,0 +1,104 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +amends ".../pkl-project-commons/packages/pkl.impl.circleci/PklCI.pkl" + +triggerPackageDocsBuild = "main" + +jobs { + ["build"] { + docker { + new { image = "cimg/openjdk:17.0" } + } + steps { + "checkout" + new RunStep { + name = "Build" + command = "./gradlew build" + environment {} + } + new StoreTestResults { path = "build/test-results" } + new PersistToWorkspaceStep { + root = "." + paths { + "build" + } + } + } + } + ["release"] { + docker { + new { image = "maniator/gh:v2.40.1" } + } + steps { + "checkout" // for `git tag` querying + new AttachWorkspaceStep { at = "." } + new RunStep { + name = "Publish release on GitHub" + // language=bash + command = #""" + if [[ -d build/releases && -n "$(ls -A build/releases)" ]] + then + REPO="${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}" + for dir in build/releases/* + do + if [[ -d "$dir" ]] + then + pkg=$(basename "$dir") + if gh release view "$pkg" ; then + echo "Package $pkg already published" + else + # TODO we can be kinder to GitHub by querying once for all releases. + echo -n "> Releasing $pkg at SHA1 ${CIRCLE_SHA1}..." + gh release create "$pkg" \ + --title "$pkg" \ + --target "${CIRCLE_SHA1}" \ + --repo "$REPO" \ + --notes "Release of $pkg" \ + "$dir"/* + echo "DONE" + fi + else + echo "> SKIPPING $dir; not a directory" + fi + done + else + echo "No new packages to release." + fi + """# + } + } + } +} + +prb { + jobs { + "build" + } +} + +main { + jobs { + "build" + new { + ["release"] { + requires { + "build" + } + context = "pkl-github-release" + } + } + } +} diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000..b8862d1 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,102 @@ +# Generated from CircleCI.pkl. DO NOT EDIT. +version: '2.1' +orbs: + pr-approval: apple/pr-approval@0.1.0 +jobs: + build: + steps: + - checkout + - run: + command: ./gradlew build + name: Build + environment: {} + - store_test_results: + path: build/test-results + - persist_to_workspace: + root: '.' + paths: + - build + docker: + - image: cimg/openjdk:17.0 + release: + steps: + - checkout + - attach_workspace: + at: '.' + - run: + command: |- + if [[ -d build/releases && -n "$(ls -A build/releases)" ]] + then + REPO="${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}" + for dir in build/releases/* + do + if [[ -d "$dir" ]] + then + pkg=$(basename "$dir") + if gh release view "$pkg" ; then + echo "Package $pkg already published" + else + # TODO we can be kinder to GitHub by querying once for all releases. + echo -n "> Releasing $pkg at SHA1 ${CIRCLE_SHA1}..." + gh release create "$pkg" \ + --title "$pkg" \ + --target "${CIRCLE_SHA1}" \ + --repo "$REPO" \ + --notes "Release of $pkg" \ + "$dir"/* + echo "DONE" + fi + else + echo "> SKIPPING $dir; not a directory" + fi + done + else + echo "No new packages to release." + fi + name: Publish release on GitHub + docker: + - image: maniator/gh:v2.40.1 + trigger-package-docs-build: + steps: + - run: + command: |- + curl --location \ + --request POST \ + --header "Content-Type: application/json" \ + -u "${CIRCLE_TOKEN}:" \ + --data '{ "branch": "main" }' \ + "https://circleci.com/api/v2/project/github/apple/pkl-package-docs/pipeline" + name: Triggering docsite build + docker: + - image: cimg/base:current +workflows: + prb: + jobs: + - hold: + type: approval + - pr-approval/authenticate: + context: pkl-pr-approval + - build: + requires: + - hold + - pr-approval/authenticate + when: + matches: + value: << pipeline.git.branch >> + pattern: ^pull/\d+(/head)?$ + main: + jobs: + - build + - release: + requires: + - build + context: pkl-github-release + - trigger-package-docs-build: + requires: + - release + context: + - pkl-pr-approval + when: + equal: + - main + - << pipeline.git.branch >> diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..097f9f9 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,9 @@ +# +# https://help.github.com/articles/dealing-with-line-endings/ +# +# Linux start script should use lf +/gradlew text eol=lf + +# These are Windows script files and should use crlf +*.bat text eol=crlf + diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2efaf4b --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +.idea/ + +# Ignore Gradle project-specific cache directory +.gradle + +# Ignore Gradle build output directory +build + +.DS_Store + +.out/ diff --git a/CODE_OF_CONDUCT.adoc b/CODE_OF_CONDUCT.adoc new file mode 100644 index 0000000..3dee37a --- /dev/null +++ b/CODE_OF_CONDUCT.adoc @@ -0,0 +1,78 @@ +== Code of Conduct + +=== Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our +project and our community a harassment-free experience for everyone, +regardless of age, body size, disability, ethnicity, sex +characteristics, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +=== Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual +attention or advances +* Trolling, insulting/derogatory comments, and personal or political +attacks +* Public or private harassment +* Publishing others’ private information, such as a physical or +electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +=== Our Responsibilities + +Project maintainers are responsible for clarifying the standards of +acceptable behavior and are expected to take appropriate and fair +corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, +or reject comments, commits, code, wiki edits, issues, and other +contributions that are not aligned to this Code of Conduct, or to ban +temporarily or permanently any contributor for other behaviors that they +deem inappropriate, threatening, offensive, or harmful. + +=== Scope + +This Code of Conduct applies within all project spaces, and it also +applies when an individual is representing the project or its community +in public spaces. Examples of representing a project or community +include using an official project e-mail address, posting via an +official social media account, or acting as an appointed representative +at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +=== Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may +be reported by contacting the open source team at +opensource-conduct@group.apple.com. All complaints will be reviewed and +investigated and will result in a response that is deemed necessary and +appropriate to the circumstances. The project team is obligated to +maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted +separately. + +Project maintainers who do not follow or enforce the Code of Conduct in +good faith may face temporary or permanent repercussions as determined +by other members of the project’s leadership. + +=== Attribution + +This Code of Conduct is adapted from the +https://www.contributor-covenant.org[Contributor Covenant], version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/CONTRIBUTING.adoc b/CONTRIBUTING.adoc new file mode 100644 index 0000000..902dd78 --- /dev/null +++ b/CONTRIBUTING.adoc @@ -0,0 +1,52 @@ +:uri-github-issue-pkl: https://github.com/apple/pkl-pantry/issues/new +:uri-seven-rules: https://cbea.ms/git-commit/#seven-rules + += Pkl Pantry Contributors Guide + +Welcome to the Pkl community, and thank you for contributing! +This guide explains how to get involved. + +* <> +* <> +* <> + +== Licensing + +Pkl Pantry is released under the Apache 2.0 license. +This is why we require that, by submitting a pull request, you acknowledge that you have the right to license your contribution to Apple and the community, and agree that your contribution is licensed under the Apache 2.0 license. + +== Issue Tracking + +To file a bug or feature request, use {uri-github-issue-pkl}[GitHub]. +Be sure to include the following information: + +* Context +** What are/were you trying to achieve? +** What's the impact of this bug/feature? + +For bug reports, additionally include the following information: + +* The output of `pkl --version`. +* The complete error message. +* The simplest possible steps to reproduce. +* Output produced from the template. +* Error messages from the target system. + +== Pull Requests + +When preparing a pull request, follow this checklist: + +* Imitate the conventions of surrounding code. +* Format the files in your PR with the JetBrains IDE formatter. +* Follow the {uri-seven-rules}[seven rules] of great Git commit messages: +** Separate subject from body with a blank line. +** Limit the subject line to 50 characters. +** Capitalize the subject line. +** Do not end the subject line with a period. +** Use the imperative mood in the subject line. +** Wrap the body at 72 characters. +** Use the body to explain what and why vs. how. + +== Maintainers + +The project’s maintainers (those with write access to the upstream repository) are listed in link:MAINTAINERS.adoc[]. diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTAINERS.adoc b/MAINTAINERS.adoc new file mode 100644 index 0000000..b79b8d2 --- /dev/null +++ b/MAINTAINERS.adoc @@ -0,0 +1,11 @@ += MAINTAINERS + +This page lists all active Maintainers of this repository. + +See link:CONTRIBUTING.adoc[] for general contribution guidelines. + +== Maintainers (in alphabetical order) + +* https://github.com/bioball[Daniel Chao] +* https://github.com/stackoverflow[Islon Scherer] +* https://github.com/holzensp[Philip Hölzenspies] diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 0000000..eb66b11 --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,17 @@ +Copyright © 2024 Apple Inc. and the Pkl project authors + +Portions of this software includes code from "Gradle" by Gradle, Inc. + +Copyright 2015 the original author or authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/README.adoc b/README.adoc new file mode 100644 index 0000000..bfba5cf --- /dev/null +++ b/README.adoc @@ -0,0 +1,34 @@ += Pkl Pantry + +This is a monorepo of packages that are maintained and published by the Pkl team. + +== Using packages + +Packages within this repository are published as `package://pkg.pkl-lang.org/pkl-pantry/@`. + +=== Direct imports + +Modules from package can be imported directly. For example, the below line imports module `toml.pkl` from package link:packages/pkl.toml/[`pkl.toml`] at version `1.0.0`: + +[source,pkl] +---- +import "package://pkg.pkl-lang.org/pkl-pantry/pkl.toml@1.0.0#/toml.pkl" +---- + +=== In a project + +If using a link:https://pkl-lang.org/main/current/language-reference/index.html#projects[project], add a package as a dependency: + +[source,pkl] +---- +amends "pkl:Project" + +dependencies { + ["toml"] { uri = "package://pkg.pkl-lang.org/pkl-pantry/pkl.toml@1.0.0" } +} +---- + +== Documentation + +The documentation for all packages published here can be found in our +link:https://pkl-lang.org/package-docs[Package Docs] website. diff --git a/SECURITY.adoc b/SECURITY.adoc new file mode 100644 index 0000000..a002851 --- /dev/null +++ b/SECURITY.adoc @@ -0,0 +1,13 @@ += Security + +For the protection of our community, the Pkl team does not disclose, discuss, or confirm security issues until our investigation is complete and any necessary updates are generally available. + +== Reporting a security vulnerability + +If you have discovered a security vulnerability within the Pkl Pantry project, please report it to us. +We welcome reports from everyone, including security researchers, developers, and users. + +Security vulnerabilities may be reported on the link:https://security.apple.com/submit[Report a vulnerability] form. +When submitting a vulnerability, select "Apple Devices and Software" as the affected platform, and "Open Source" as the affected area. + +For more information, see https://pkl-lang.org/security.html. diff --git a/build.gradle.kts b/build.gradle.kts new file mode 100644 index 0000000..88900e3 --- /dev/null +++ b/build.gradle.kts @@ -0,0 +1,180 @@ +import org.pkl.core.Version +import java.io.OutputStream +import java.net.URI +import java.nio.file.Files +import java.nio.file.Path +import javax.net.ssl.HttpsURLConnection +import kotlin.io.path.isDirectory +import kotlin.math.ceil +import kotlin.math.log10 + +plugins { + kotlin("jvm").version(libs.versions.kotlin) + alias(libs.plugins.pkl) + alias(libs.plugins.spotless) +} + +spotless { + kotlin { + licenseHeader(""" + /** + * Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + """.trimIndent()) + } + format("pkl") { + licenseHeader(""" + //===----------------------------------------------------------------------===// + // Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. + // + // Licensed under the Apache License, Version 2.0 (the "License"); + // you may not use this file except in compliance with the License. + // You may obtain a copy of the License at + // + // https://www.apache.org/licenses/LICENSE-2.0 + // + // Unless required by applicable law or agreed to in writing, software + // distributed under the License is distributed on an "AS IS" BASIS, + // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + // See the License for the specific language governing permissions and + // limitations under the License. + //===----------------------------------------------------------------------===// + """.trimIndent(), "(/// |/\\*\\*|module |import |amends |(\\w+))") + target("**/*.pkl", "**/PklProject") + } +} + +kotlin { + jvmToolchain(17) +} + +repositories { + mavenCentral() +} + +dependencies { + testImplementation(libs.pklCore) + testImplementation(libs.junitEngine) + testImplementation(libs.junitParams) +} + +val repositoryUrl = "https://github.com/apple/pkl-pantry" + +val repositoryApiUrl = repositoryUrl.replace(Regex("github.com/"), "api.github.com/repos/") + +val projectDirs: List = + Files.list(Path.of("packages")) + .filter { it.isDirectory() } + .map { it.toFile() } + .toList() + +val outputDir = layout.buildDirectory + +pkl { + project { + resolvers { + register("resolveProjects") { + projectDirectories.from(projectDirs) + } + } + packagers { + register("createPackages") { + projectDirectories.from(projectDirs) + outputPath.set(outputDir.dir("generated/packages/%{name}/%{version}")) + junitReportsDir.set(outputDir.dir("test-results")) + } + } + } +} + +val resolveProjects = tasks.named("resolveProjects") { + group = "build" +} + +val createPackages = tasks.named("createPackages") { + group = "build" + dependsOn.add(resolveProjects) +} + +val isInCircleCi = System.getenv("CIRCLE_PROJECT_REPONAME") != null + +val prepareCiGit by tasks.registering { + enabled = isInCircleCi + exec { + commandLine("git", "config", "user.email", "pkl-oss@groups.apple.com") + } + exec { + commandLine("git", "config", "user.name", "The Pkl Team (automation)") + } +} + +repositories { + mavenCentral() +} + +val prepareReleases by tasks.registering { + group = "build" + dependsOn(createPackages, prepareCiGit) + inputs.files(projectDirs) + + doLast { + val releaseDir = file(outputDir.dir("releases")) + releaseDir.deleteRecursively() + val count = projectDirs.count() + val fmt = "%${ceil(log10(count.toDouble())).toInt()}d" + for (i in projectDirs.indices) { + val dir = projectDirs[i] + print(" [${fmt.format(i + 1)}/$count] $dir: ") + val allVersions = file(outputDir.dir("generated/packages/${dir.name}")).list() + if (allVersions == null) { + println("∅") + continue + } + val latestVersion = allVersions.map(Version::parse).sortedWith(Version.comparator()).last() + val pkg = "${dir.name}@$latestVersion" + print("$pkg: ") + val conn = URI("${repositoryUrl}/releases/tag/${dir.name}@$latestVersion") + .toURL() + .openConnection() as HttpsURLConnection + if (conn.responseCode == 200) { + println("⏩") + continue + } + val taskOutput = StringBuilder() + exec { + commandLine("git", "tag", "-l", pkg) + logging.addStandardOutputListener { taskOutput.append(it) } + standardOutput = OutputStream.nullOutputStream() + } + if (taskOutput.contains(pkg)) { + println("☑️") + continue + } + for (artifact in file(outputDir.dir("generated/packages/${dir.name}/$latestVersion")).listFiles()!!) { + artifact.copyTo(releaseDir.resolve("$pkg/${artifact.name}"), true) + } + println("✅") + } + } +} + +tasks.test { + useJUnitPlatform() + dependsOn(createPackages) +} + +tasks.build { + dependsOn(prepareReleases) +} diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml new file mode 100644 index 0000000..545eee4 --- /dev/null +++ b/gradle/libs.versions.toml @@ -0,0 +1,14 @@ +[versions] +pkl = "0.25.1" +kotlin = "1.9.0" +junit = "5.10.0" +spotless = "6.25.0" + +[libraries] +junitEngine = { group = "org.junit.jupiter", name = "junit-jupiter-engine", version.ref = "junit" } +junitParams = { group = "org.junit.jupiter", name = "junit-jupiter-params", version.ref = "junit" } +pklCore = { group = "org.pkl-lang", name = "pkl-core", version.ref = "pkl" } + +[plugins] +pkl = { id = "org.pkl-lang", version.ref = "pkl" } +spotless = { id = "com.diffplug.spotless", version.ref = "spotless" } diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..c1962a7 Binary files /dev/null and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..3499ded --- /dev/null +++ b/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-bin.zip +networkTimeout=10000 +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew new file mode 100755 index 0000000..aeb74cb --- /dev/null +++ b/gradlew @@ -0,0 +1,245 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 0000000..93e3f59 --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/packages/basePklProject.pkl b/packages/basePklProject.pkl new file mode 100644 index 0000000..099081b --- /dev/null +++ b/packages/basePklProject.pkl @@ -0,0 +1,61 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module basePklProject + +amends "pkl:Project" + +import "pkl:reflect" + +local myModule = reflect.Module(module) + +local packageName: String = + findRootModule(reflect.Module(module)) + .relativePathTo(module) + .last + +local function findRootModule(mod: reflect.Module): Module = + let (supermodule = mod.supermodule) + if (supermodule == null || !supermodule.isAmend) mod.reflectee + else findRootModule(supermodule) + +local allTests = import*("**/tests/**.pkl").keys.filter((it) -> !it.contains("tests/fixtures/")) + +package { + name = packageName + apiTests = tests // api tests are shared with module tests + baseUri = "package://pkg.pkl-lang.org/pkl-pantry/\(name)" + packageZipUrl = "https://github.com/apple/pkl-pantry/releases/download/\(name)@\(version)/\(name)@\(version).zip" + license = "Apache-2.0" + authors { + "The Pkl Authors " + } + exclude { + "examples/**" + "tests/**" + } + description = myModule.docComment + issueTracker = "https://github.com/apple/pkl-pantry/issues" + sourceCode = "https://github.com/apple/pkl-pantry/tree/\(name)@\(version)/packages/\(name)" + sourceCodeUrlScheme = "https://github.com/apple/pkl-pantry/blob/\(name)@\(version)/packages/\(name)%{path}#L%{line}-%{endLine}" +} + +tests { + for (test in allTests) { + when (test.split("/").first == package.name) { + test.replaceFirst(package.name, ".") + } + } +} diff --git a/packages/com.circleci.v2/Config.pkl b/packages/com.circleci.v2/Config.pkl new file mode 100644 index 0000000..ff50b54 --- /dev/null +++ b/packages/com.circleci.v2/Config.pkl @@ -0,0 +1,726 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Schema for CircleCI's `config.yml`. +/// +/// This module is a work in progress, if you find some unsuported feature +/// feel free to add it. +module com.circleci.v2.Config + +/// Version of the `config.yml` schema to target. +/// +/// Currently, only version 2.1 is supported by this template. +version: "2.1" + +/// Designates the config.yaml for use of CircleCI’s +/// [dynamic configuration](https://circleci.com/docs/dynamic-config/) feature. +setup: Boolean? + +/// A map of user-selected names to either: orb references (strings) or orb definitions (maps). +/// +/// Orb definitions must be the orb-relevant subset of 2.1 config. +/// See the [Creating Orbs](https://circleci.com/docs/creating-orbs/) documentation for details. +orbs: Mapping? + +/// A command defines a sequence of steps as a map to be executed in a job, enabling you to reuse +/// a single command definition across multiple jobs. +/// +/// For more information see the +/// [Reusable Config Reference Guide](https://circleci.com/docs/reusing-config/). +commands: Mapping? + +/// A Workflow is comprised of one or more uniquely named jobs. +/// +/// Jobs are specified in the jobs map, see +/// [Sample config.yml](https://circleci.com/docs/sample-config/) for two examples of a job map. +/// The name of the job is the key in the map, and the value is a map describing the job. +jobs: Mapping? + +/// Used for orchestrating all jobs. +/// +/// Each workflow consists of the workflow name as a key and a map as a value. +/// A name should be unique within the current config.yml. +/// The top-level keys for the Workflows configuration are version and jobs. +/// For more information, see the +/// [Using Workflows to Orchestrate Jobs](https://circleci.com/docs/workflows/) page. +workflows: Mapping? + +/// Pipeline parameters declared for use in the configuration. +/// +/// See [Pipeline Values and Parameters](https://circleci.com/docs/pipeline-variables/#pipeline-parameters-in-configuration) +/// for usage details. +parameters: Mapping? + +class Orb { + /// A map of user-selected names to either: orb references (strings) or orb definitions (maps). + /// + /// Orb definitions must be the orb-relevant subset of 2.1 config. + /// See the [Creating Orbs](https://circleci.com/docs/creating-orbs/) documentation for details. + orbs: Mapping? + + /// A command defines a sequence of steps as a map to be executed in a job, enabling you to reuse + /// a single command definition across multiple jobs. + /// + /// For more information see the + /// [Reusable Config Reference Guide](https://circleci.com/docs/reusing-config/). + commands: Mapping? + + /// A Workflow is comprised of one or more uniquely named jobs. + /// + /// Jobs are specified in the jobs map, see + /// [Sample config.yml](https://circleci.com/docs/sample-config/) for two examples of a job map. + /// The name of the job is the key in the map, and the value is a map describing the job. + jobs: Mapping? +} + +class Job { + /// Shell to use for execution command in all steps. + /// + /// Can be overridden by shell in each step (default: See Default Shell Options) + shell: String? + + /// A list of [steps](https://circleci.com/docs/configuration-reference/#steps) to be performed + steps: Listing(!isEmpty) + + /// In which directory to run the steps. + /// + /// Will be interpreted as an absolute path. + /// + /// Default: `~/project` (where project is a literal string, not the name of your specific project). + /// + /// Processes run during the job can use the `$CIRCLE_WORKING_DIRECTORY` environment variable to + /// refer to this directory. + /// + /// Note: Paths written in your YAML configuration file will not be expanded; if your + /// store_test_results.path is $CIRCLE_WORKING_DIRECTORY/tests, then CircleCI will attempt to + /// store the test subdirectory of the directory literally named $CIRCLE_WORKING_DIRECTORY, + /// dollar sign $ and all. working_directory will be created automatically if it doesn’t exist. + working_directory: String? + + /// Number of parallel instances of this job to run (default: 1) + parallelism: Int? + + /// A map of environment variable names and values. + environment: Mapping? + + /// The [resource_class](https://circleci.com/docs/resource-class-overview/) feature allows you to configure CPU and RAM resources for each job. + /// + /// Resource classes are available for each execution environment, as described in the tables below. + /// + /// We implement soft concurrency limits for each resource class to ensure our system remains + /// stable for all customers. + /// If you are on a Performance or custom plan and experience queuing for certain resource + /// classes, it is possible you are hitting these limits. + /// Contact [CircleCI support](https://support.circleci.com/hc/en-us/requests/new) to request a + /// raise on these limits for your account. + /// + /// If you do not specify a resource class, CircleCI will use a default value that is subject + /// to change. + /// It is best practice to specify a resource class as opposed to relying on a default. + resource_class: ResourceClass? + + /// Options for [docker executor](https://circleci.com/docs/configuration-reference/#docker) + docker: Listing(!isEmpty)?(onlyOneSet(List(this, macos, machine))) + + /// CircleCI supports running jobs on macOS, to allow you to build, test, and deploy apps for + /// macOS, iOS, tvOS and watchOS. + /// + /// To run a job in a macOS virtual machine, add the macos key to the top-level configuration + /// for your job and specify the version of Xcode you would like to use. + macos: MacOSExecutor? + + /// Options for [machine executor](https://circleci.com/docs/configuration-reference/#machine) + machine: Machine? +} + +typealias ResourceClass = + "small" + |"medium" + |"medium+" + |"large" + |"xlarge" + |"2xlarge" + |"2xlarge+" + |"arm.medium" + |"arm.large" + |"arm.xlarge" + |"arm.2xlarge" + |"macos.m1.medium.gen1" + |"macos.m1.large.gen1" + +class MacOSExecutor { + /// The version of Xcode that is installed on the virtual machine, see the + /// [Supported Xcode Versions section of the Testing iOS document](https://circleci.com/docs/using-macos/#supported-xcode-versions) + /// for the complete list. + xcode: String +} + +class Machine { + /// The virtual machine image to use. + /// + /// View [available images](https://circleci.com/developer/images?imageType=machine). + /// + /// Note: This key is *not* supported for Linux VMs on installations of CircleCI server. + /// For information about customizing `machine` executor images on CircleCI installed on your + /// servers, see our [VM Service documentation](https://circleci.com/docs/server/v4.1/operator/manage-virtual-machines-with-vm-service/) + image: String + + /// Set this to `true` to enable [Docker Layer Caching](https://circleci.com/docs/docker-layer-caching/). + docker_layer_caching: Boolean? +} + +class DockerImage { + /// The name of a custom docker image to use. + /// + /// The first `image` listed under a job defines the job’s own primary container image where all + /// steps will run. + image: String + + /// `name` defines the the hostname for the container (the default is `localhost`), which is used + /// for reaching secondary (service) containers. + /// + /// By default, all services are exposed directly on `localhost`. + /// This field is useful if you would rather have a different hostname instead of `localhost`, + /// for example, if you are starting multiple versions of the same service. + name: String? + + /// The command used as executable when launching the container. + /// + /// [entrypoint] overrides the image’s `ENTRYPOINT` + entrypoint: (*Listing|String)? + + /// The command used as pid 1 (or args for entrypoint) when launching the container. + /// + /// [command] overrides the image’s `COMMAND`. + /// It will be used as arguments to the image `ENTRYPOINT` if it has one, or as the executable + /// if the image has no `ENTRYPOINT`. + command: (*Listing|String)? + + /// Which user to run commands as within the Docker container + user: String? + + /// environment variable names and values. + /// + /// The [environment] settings apply to the entrypoint/command run by the docker container, not + /// the job steps + environment: Mapping? + + /// Authentication for registries using standard docker login credentials + auth: Mapping? + + /// Authentication for AWS Elastic Container Registry (ECR) + aws_auth: Mapping? +} + +class ScheduleTrigger { + /// The cron key is defined using POSIX crontab syntax + @SourceCode { language = "cronexp" } + cron: String + + /// Trigger Filters can have the key branches. + filters: ScheduleTriggerFilter +} + +class ScheduleTriggerFilter { + /// The `branches` key controls whether the _current_ branch should have a schedule trigger + /// created for it, where _current_ branch is the branch containing the `config.yml` file with + /// the trigger stanza. + /// That is, a push on the `main` branch will only schedule a + /// [workflow](https://circleci.com/docs/workflows/#using-contexts-and-filtering-in-your-workflows) + /// for the `main` branch. + /// + /// Branches can have the keys only and ignore which each map to a single string naming a branch. + /// You may also use regular expressions to match against branches by enclosing them with `/`’s, + /// or map to a list of such strings. Regular expressions must match the **entire** string. + /// + /// * Any branches that match only will run the job. + /// * Any branches that match ignore will not run the job. + /// * If neither only nor ignore are specified then all branches will run the job. + /// If both only and ignore are specified, the only is used and ignore will have no effect. + branches: ScheduleTriggerFilterBranches +} + +class ScheduleTriggerFilterBranches { + /// Either a single branch specifier, or a list of branch specifiers + only: *Listing|String + + /// Either a single branch specifier, or a list of branch specifiers + ignore: (*Listing|String)? +} + +class Workflow { + /// A job can have the keys `requires`, `name`, `context`, `type`, and `filters`. + jobs: Listing<*Mapping(length == 1)|String>(!isEmpty) + + /// Specifies which triggers will cause this workflow to be executed. + /// + /// Default behavior is to trigger the workflow when pushing to a branch + triggers: Listing? + + `when`: (*LogicStatement|Boolean|String)? + + `unless`: (*LogicStatement|Boolean|String)? +} + +class WorkflowJob { + /// The name key can be used to invoke reusable jobs across any number of workflows. + /// + /// Using the name key ensures numbers are not appended to your job name (i.e. sayhello-1, + /// sayhello-2, etc.). + /// The name you assign to the name key needs to be unique, otherwise the numbers will still be + /// appended to the job name. + name: String? + + /// A list of jobs that must succeed for the job to start. + /// + /// Note: When jobs in the current workflow that are listed as dependencies are not executed + /// (due to a filter function for example), their requirement as a dependency for other jobs will + /// be ignored by the requires option. + /// However, if all dependencies of a job are filtered, then that job will not be executed either. + requires: Listing? + + /// The name of the context(s). + /// + /// Jobs may be configured to use global environment variables set for an organization, see the + /// [Contexts](https://circleci.com/docs/contexts/) document for adding a context in the + /// application settings. + /// + /// The initial default name is org-global. + /// Each context name must be unique. + /// If using CircleCI Server, only a single Context per workflow is supported. + /// Note: A maximum of 100 unique contexts across all workflows is allowed + context: (*Listing|String)? + + /// A job may have a type of `approval` indicating it must be manually approved before downstream + /// jobs may proceed. + /// + /// For more information see the Using Workflows to Orchestrate Jobs page. + type: "approval"? + + /// Job Filters can have the key branches or tags + filters: JobFilters? +} + +class JobFilters { + /// Branches can have the keys only and ignore which either map to a single string naming a branch. + /// You may also use regular expressions to match against branches by enclosing them with slashes, + /// or map to a list of such strings. + /// Regular expressions must match the *entire* string. + /// + /// Any branches that match `only` will run the job. + /// Any branches that match `ignore` will not run the job. + /// If neither `only` nor `ignore` are specified then all branches will run the job. + /// If both `only` and `ignore` are specified the `only` is considered before `ignore`. + branches: FilterSpec? + + /// CircleCI does not run workflows for tags unless you explicitly specify tag filters. + /// Additionally, if a job requires any other jobs (directly or indirectly), you must specify tag + /// filters for those jobs. + /// + /// Tags can have the keys only and ignore. + /// You may also use regular expressions to match against tags by enclosing them with slashes, + /// or map to a list of such strings. + /// Regular expressions must match the entire string. + /// Both lightweight and annotated tags are supported. + /// + /// Any tags that match only will run the job. + /// Any tags that match ignore will not run the job. + /// If neither only nor ignore are specified then the job is skipped for all tags. + /// If both only and ignore are specified the only is considered before ignore. + tags: FilterSpec? +} + +class FilterSpec { + /// Either a single branch specifier, or a list of branch specifiers + only: (String|Listing)?(this != null || ignore != null) + + /// Either a single branch specifier, or a list of branch specifiers + ignore: (String|Listing)? +} + +typealias Step = AbstractStep|SimpleStepName + +typealias SimpleStepName = "checkout"|"setup_remote_docker"|"add_ssh_keyes"|String + +local abstract class AbstractStep { + fixed hidden __name__: String +} + +function run(_command: String): RunStep = new { + command = _command +} + +/// Used for invoking all command-line programs. +/// +/// Run commands are executed using non-login shells by default, so you must explicitly source any +/// dotfiles as part of the command. +class RunStep extends AbstractStep { + fixed hidden __name__ = "run" + + /// Command to run via the shell + command: String(!isEmpty) + + /// Title of the step to be shown in the CircleCI UI (default: full [command]) + name: String? + + /// Shell to use for execution command (default: See [Default Shell Options](https://circleci.com/docs/configuration-reference/#default-shell-options)) + shell: String? + + /// Additional environmental variables, locally scoped to command + environment: Mapping? + + /// Whether or not this step should run in the background (default: [false]) + background: Boolean? + + /// In which directory to run this step. + /// + /// Will be interpreted relative to the working_directory + /// of the job. (default: `.`) + working_directory: String? + + /// Elapsed time the command can run without output. + /// + /// The default is 10 minutes and the maximum is governed by the maximum time a job is allowed to run. + no_output_timeout: Duration? + + /// Specify when to enable or disable the step. + /// + /// Takes the following values: `always`, `on_success`, `on_fail` (default: on_success) + `when`: ("always"|"on_success"|"on_fail")? +} + +/// Generates and stores a cache of a file or directory of files such as dependencies or source +/// code in our object storage. +/// +/// Later jobs can [restore this cache](https://circleci.com/docs/configuration-reference/#restore_cache). +/// Learn more on the [Caching Dependencies](https://circleci.com/docs/caching/) page. +/// +/// Cache retention can be customized on the [CircleCI web app](https://app.circleci.com/) by +/// navigating to Plan > Usage Controls. +class SaveCacheStep extends AbstractStep { + fixed hidden __name__ = "save_cache" + + /// List of directories which should be added to the cache + paths: Listing + + /// Unique identifier for this cache + key: String + + /// Title of the step to be shown in the CircleCI UI (default: “Saving Cache”) + name: String? + + /// [Specify when to enable or disable the step](https://circleci.com/docs/configuration-reference/#the-when-attribute). + /// + /// Takes the following values: always, on_success, on_fail (default: on_success) + `when`: ("always"|"on_success"|"on_fail")? +} + +/// Restores a previously saved cache based on a key. +/// +/// Cache needs to have been saved first for this key using the [save_cache] step. +/// +/// Learn more in [the caching documentation](https://circleci.com/docs/caching/). +class RestoreCacheStep extends AbstractStep { + fixed hidden __name__ = "restore_cache" + + /// Single cache key to restore + key: String?(this != null || keys != null) + + /// List of cache keys to lookup for a cache to restore. + /// + /// Only first existing key will be restored. + keys: Listing? + + /// Title of the step to be shown in the CircleCI UI (default: “Restoring Cache”) + name: String? +} + +/// Allows Docker commands to be run locally. +/// +/// See [Running Docker Commands](https://circleci.com/docs/building-docker-images/) for details. +class SetupRemoteDockerStep extends AbstractStep { + fixed hidden __name__ = "setup_remote_docker" + + /// Version string of Docker you would like to use (default: 20.10.17). + /// + /// View the list of supported docker versions + /// [here](https://circleci.com/docs/building-docker-images/#docker-version). + version: String + + /// Set this to true to enable [Docker Layer Caching](https://circleci.com/docs/docker-layer-caching/) + /// in the Remote Docker Environment (default: false) + docker_layer_cacheing: Boolean +} + +/// Special step used to persist a temporary file to be used by another job in the workflow. +/// For more information on using workspaces, see the +/// [Using Workspaces to Share Data Between Jobs](https://circleci.com/docs/workspaces/) page. +/// +/// `persist_to_workspace` adopts the storage settings from the storage customization controls on +/// the CircleCI web app. +/// If no custom setting is provided, `persist_to_workspace` defaults to 15 days. +/// +/// Workspace storage retention can be customized on the CircleCI web app by navigating to Plan > Usage Controls. +class PersistToWorkspaceStep extends AbstractStep { + fixed hidden __name__ = "persist_to_workspace" + + /// Either an absolute path or a path relative to `working_directory` + root: String + + /// Glob identifying file(s), or a non-glob path to a directory to add to the shared workspace. + /// + /// Interpreted as relative to the workspace root. + /// Must not be the workspace root itself + paths: Listing +} + +/// Special step used to attach the workflow’s workspace to the current container. +/// +/// The full contents of the workspace are downloaded and copied into the directory the workspace +/// is being attached at. +/// For more information on using workspaces, see the +/// [Using Workspaces to Share Data Between Jobs](https://circleci.com/docs/workspaces/) page. +class AttachWorkspaceStep extends AbstractStep { + fixed hidden __name__ = "attach_workspace" + + /// Directory to attach the workspace to. + at: String +} + +/// A conditional step consists of a step with the key `when`. +/// +/// Under the `when` key are the subkeys `condition` and `steps`. +/// The purpose of the `when` step is customizing commands and job configuration to run on custom +/// conditions (determined at config-compile time) that are checked before a workflow runs. +/// See the [Conditional Steps section of the Reusing Config](https://circleci.com/docs/reusing-config/#defining-conditional-steps) +/// document for more details. +class WhenStep { + fixed hidden __name__ = "when" + + /// The logic statement that determines whether to execute. + condition: (*LogicStatement|Boolean|String)? + + /// A list of steps to execute when the condition is true + steps: Listing? +} + +/// A conditional step consists of a step with the key `unless`. +/// +/// Under the `unless` key are the subkeys `condition` and `steps`. +/// The purpose of the `unless` step is customizing commands and job configuration to run on custom +/// conditions (determined at config-compile time) that are checked before a workflow runs. +/// See the [Conditional Steps section of the Reusing Config](https://circleci.com/docs/reusing-config/#defining-conditional-steps) +/// document for more details. +class UnlessStep { + fixed hidden __name__ = "unless" + + /// The logic statement that determines whether to execute. + condition: (*LogicStatement|Boolean|String)? + + /// A list of steps to execute when the condition is true + steps: Listing? +} + +class Command { + /// A sequence of steps run inside the calling job of the command. + steps: Listing(!isEmpty) + + /// A map of parameter keys. + /// + /// See the [Parameter Syntax](https://circleci.com/docs/reusing-config/#parameter-syntax) + /// section of the [Reusing Config](https://circleci.com/docs/reusing-config/) document for details. + parameters: Mapping? + + /// A string that describes the purpose of the command. + description: String? +} + +class Parameter { + /// Optional. Used to generate documentation for your orb. + description: String? + + /// The default value for the parameter. If not present, the parameter is implied to be required. + default: (String|Number|Boolean)? + + /// Required. See [Parameter Types](https://circleci.com/docs/reusing-config/#parameter-types) + /// for details. + type: "string"|"boolean"|"integer"|"enum"|"executor"|"steps"|"env_var_name" +} + +/// Special step used to upload and store test results for a build. +/// +/// Test results are visible on the CircleCI web application under each build’s *Test Summary* +/// section. +/// Storing test results is useful for timing analysis of your test suites. +/// For more information on storing test results, see the +/// [Collecting Test Data](https://circleci.com/docs/collect-test-data/) page. +/// +/// It is also possible to store test results as a build artifact; to do so, please refer to the +/// [store_artifacts] step. +class StoreTestResults extends AbstractStep { + fixed hidden __name__ = "store_test_results" + + /// Path (absolute, or relative to your `working_directory`) to directory containing JUnit XML + /// test metadata files, or to a single test file. + path: String +} + +/// Step to store artifacts (for example logs, binaries, etc) to be available in the web app +/// or through the API. +/// +/// See the [Uploading Artifacts](https://circleci.com/docs/artifacts/) document for more information. +class StoreArtifacts extends AbstractStep { + fixed hidden __name__ = "store_artifacts" + + /// Directory in the primary container to save as job artifacts + path: String + + /// Prefix added to the artifact paths in the artifacts API (default: the directory of the file + /// specified in path) + destination: String? +} + +/// Certain dynamic configuration features accept logic statements as arguments. +/// +/// Logic statements are evaluated to boolean values at configuration compilation time, that is, +/// before the workflow is run. +class LogicStatement { + /// True if all arguments are truthy. + and: Listing<*LogicStatement|Boolean|String>?(onlyOneSet(List(this, or, not, equal, matches))) + + /// True if any arguments are truthy. + or: Listing<*LogicStatement|Boolean|String>? + + /// True if the argument is not truthy. + not: (*LogicStatement|Boolean|String)? + + /// True if all arguments evaluate to equal values. + equal: Listing? + + /// True if [value][Match.value] matches the [pattern][Match.pattern]. + matches: Match? +} + +/// Tells if only one of the values is not null. +local const function onlyOneSet(values: List): Boolean = + values.filter((it) -> it != null).length == 1 + +class Match { + /// The value to match against + value: String + + /// A [Java regular expression](https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html) + /// used to test against the value. + /// + /// A full match pattern must be provided, prefix matching is not an option. + /// Though, it is recommended to enclose a pattern in `^` and `$` to avoid accidental partial matches. + @SourceCode { language = "RegExp" } + pattern: String +} + +typealias CalledCommand = Mapping(length == 1) + +typealias CommandCall = Mapping + +class PipelineValues { + /// A globally unique id representing for the pipeline. + /// + /// Type: string + `pipeline.id`: "<< pipeline.id >>" + + /// A project unique integer id for the pipeline. + /// + /// Type: integer + `pipeline.number`: "<< pipeline.number >>" + + /// The URL where the current project is hosted. + /// + /// For example, `https://github.com/circleci/circleci-docs`. + `pipeline.project.git_url`: "<< pipeline.project.git_url >>" + `pipeline.project.type`: "<< pipeline.project.type >>" + `pipeline.git.tag`: "<< pipeline.git.tag >>" + `pipeline.git.branch`: "<< pipeline.git.branch >>" + `pipeline.git.revision`: "<< pipeline.git.revision >>" + `pipeline.git.base_revision`: "<< pipeline.git.base_revision >>" + `pipeline.in_setup`: "<< pipeline.in_setup >>" + `pipeline.trigger_source`: "<< pipeline.trigger_source >>" + `pipeline.schedule.name`: "<< pipeline.schedule.name >>" + `pipeline.schedule.id`: "<< pipeline.schedule.id >>" + `pipeline.trigger_parameters.circleci.trigger_type`: "<< pipeline.trigger_parameters.circleci.trigger_type >>" + `pipeline.trigger_parameters.circleci.event_time`: "<< pipeline.trigger_parameters.circleci.event_time >>" + `pipeline.trigger_parameters.circleci.event_type`: "<< pipeline.trigger_parameters.circleci.event_type >>" + `pipeline.trigger_parameters.circleci.project_id`: "<< pipeline.trigger_parameters.circleci.project_id >>" + `pipeline.trigger_parameters.circleci.actor_id`: "<< pipeline.trigger_parameters.circleci.actor_id >>" + `pipeline.trigger_parameters.gitlab.type`: "<< pipeline.trigger_parameters.gitlab.type >>" + `pipeline.trigger_parameters.github_app.type`: "<< pipeline.trigger_parameters.github_app.type >>" + `pipeline.trigger_parameters.gitlab.project_id`: "<< pipeline.trigger_parameters.gitlab.project_id >>" + `pipeline.trigger_parameters.github_app.project_id`: "<< pipeline.trigger_parameters.github_app.project_id >>" + `pipeline.trigger_parameters.gitlab.ref`: "<< pipeline.trigger_parameters.gitlab.ref >>" + `pipeline.trigger_parameters.github_app.ref`: "<< pipeline.trigger_parameters.github_app.ref >>" + `pipeline.trigger_parameters.gitlab.checkout_sha`: "<< pipeline.trigger_parameters.gitlab.checkout_sha >>" + `pipeline.trigger_parameters.github_app.checkout_sha`: "<< pipeline.trigger_parameters.github_app.checkout_sha >>" + `pipeline.trigger_parameters.gitlab.user_id`: "<< pipeline.trigger_parameters.gitlab.user_id >>" + `pipeline.trigger_parameters.github_app.user_id`: "<< pipeline.trigger_parameters.github_app.user_id >>" + `pipeline.trigger_parameters.gitlab.user_name`: "<< pipeline.trigger_parameters.gitlab.user_name >>" + `pipeline.trigger_parameters.github_app.user_name`: "<< pipeline.trigger_parameters.github_app.user_name >>" + `pipeline.trigger_parameters.gitlab.user_username`: "<< pipeline.trigger_parameters.gitlab.user_username >>" + `pipeline.trigger_parameters.github_app.user_username`: "<< pipeline.trigger_parameters.github_app.user_username >>" + `pipeline.trigger_parameters.gitlab.user_avatar`: "<< pipeline.trigger_parameters.gitlab.user_avatar >>" + `pipeline.trigger_parameters.github_app.user_avatar`: "<< pipeline.trigger_parameters.github_app.user_avatar >>" + `pipeline.trigger_parameters.gitlab.repo_name`: "<< pipeline.trigger_parameters.gitlab.repo_name >>" + `pipeline.trigger_parameters.github_app.repo_name`: "<< pipeline.trigger_parameters.github_app.repo_name >>" + `pipeline.trigger_parameters.gitlab.repo_url`: "<< pipeline.trigger_parameters.gitlab.repo_url >>" + `pipeline.trigger_parameters.github_app.repo_url`: "<< pipeline.trigger_parameters.github_app.repo_url >>" + `pipeline.trigger_parameters.gitlab.web_url`: "<< pipeline.trigger_parameters.gitlab.web_url >>" + `pipeline.trigger_parameters.github_app.web_url`: "<< pipeline.trigger_parameters.github_app.web_url >>" + `pipeline.trigger_parameters.gitlab.commit_sha`: "<< pipeline.trigger_parameters.gitlab.commit_sha >>" + `pipeline.trigger_parameters.github_app.commit_sha`: "<< pipeline.trigger_parameters.github_app.commit_sha >>" + `pipeline.trigger_parameters.gitlab.commit_title`: "<< pipeline.trigger_parameters.gitlab.commit_title >>" + `pipeline.trigger_parameters.github_app.commit_title`: "<< pipeline.trigger_parameters.github_app.commit_title >>" + `pipeline.trigger_parameters.gitlab.commit_message`: "<< pipeline.trigger_parameters.gitlab.commit_message >>" + `pipeline.trigger_parameters.github_app.commit_message`: "<< pipeline.trigger_parameters.github_app.commit_message >>" + `pipeline.trigger_parameters.gitlab.commit_timestamp`: "<< pipeline.trigger_parameters.gitlab.commit_timestamp >>" + `pipeline.trigger_parameters.github_app.commit_timestamp`: "<< pipeline.trigger_parameters.github_app.commit_timestamp >>" + `pipeline.trigger_parameters.gitlab.commit_author_name`: "<< pipeline.trigger_parameters.gitlab.commit_author_name >>" + `pipeline.trigger_parameters.github_app.commit_author_name`: "<< pipeline.trigger_parameters.github_app.commit_author_name >>" + `pipeline.trigger_parameters.gitlab.commit_author_email`: "<< pipeline.trigger_parameters.gitlab.commit_author_email >>" + `pipeline.trigger_parameters.github_app.commit_author_email`: "<< pipeline.trigger_parameters.github_app.commit_author_email >>" + `pipeline.trigger_parameters.gitlab.total_commits_count`: "<< pipeline.trigger_parameters.gitlab.total_commits_count >>" + `pipeline.trigger_parameters.github_app.total_commits_count`: "<< pipeline.trigger_parameters.github_app.total_commits_count >>" + `pipeline.trigger_parameters.gitlab.branch`: "<< pipeline.trigger_parameters.gitlab.branch >>" + `pipeline.trigger_parameters.github_app.branch`: "<< pipeline.trigger_parameters.github_app.branch >>" + `pipeline.trigger_parameters.gitlab.default_branch`: "<< pipeline.trigger_parameters.gitlab.default_branch >>" + `pipeline.trigger_parameters.github_app.default_branch`: "<< pipeline.trigger_parameters.github_app.default_branch >>" + `pipeline.trigger_parameters.gitlab.x_gitlab_event_id`: "<< pipeline.trigger_parameters.gitlab.x_gitlab_event_id >>" + `pipeline.trigger_parameters.gitlab.is_fork_merge_request`: "<< pipeline.trigger_parameters.gitlab.is_fork_merge_request >>" +} + +/// Pipeline values are available to all pipeline configurations and can be used without previous +/// declaration. +/// +/// For a full list of values and built-in environment variables, see the +/// [Project values and variables guide](https://circleci.com/docs/variables/). +/// +/// For more reference: +hidden pipelineValues: PipelineValues + +output { + text = "# Generated from CircleCI.pkl. DO NOT EDIT.\n" + super.text + renderer = new YamlRenderer { + converters { + [AbstractStep] = (it) -> Map(it.__name__, it.toMap()) + } + } +} diff --git a/packages/com.circleci.v2/PklProject b/packages/com.circleci.v2/PklProject new file mode 100644 index 0000000..1d91977 --- /dev/null +++ b/packages/com.circleci.v2/PklProject @@ -0,0 +1,21 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Templates for defining [CircleCI](https://circleci.com) configuration. +amends "../basePklProject.pkl" + +package { + version = "1.0.0" +} diff --git a/packages/com.circleci.v2/PklProject.deps.json b/packages/com.circleci.v2/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/com.circleci.v2/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/com.influxdata.telegraf/PklProject b/packages/com.influxdata.telegraf/PklProject new file mode 100644 index 0000000..e3b08c4 --- /dev/null +++ b/packages/com.influxdata.telegraf/PklProject @@ -0,0 +1,26 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Templates for configuring [Telegraf](https://docs.influxdata.com/telegraf), +/// a plugin-driven server agent for collecting and reporting metrics. +amends "../basePklProject.pkl" + +dependencies { + ["toml"] = import("../pkl.toml/PklProject") +} + +package { + version = "1.0.0" +} diff --git a/packages/com.influxdata.telegraf/PklProject.deps.json b/packages/com.influxdata.telegraf/PklProject.deps.json new file mode 100644 index 0000000..d15381a --- /dev/null +++ b/packages/com.influxdata.telegraf/PklProject.deps.json @@ -0,0 +1,10 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": { + "package://pkg.pkl-lang.org/pkl-pantry/pkl.toml@1": { + "type": "local", + "uri": "projectpackage://pkg.pkl-lang.org/pkl-pantry/pkl.toml@1.0.0", + "path": "../pkl.toml" + } + } +} \ No newline at end of file diff --git a/packages/com.influxdata.telegraf/plugins/Plugin.pkl b/packages/com.influxdata.telegraf/plugins/Plugin.pkl new file mode 100644 index 0000000..8ad52c2 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/Plugin.pkl @@ -0,0 +1,90 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics. +/// +/// Telegraf supports four categories of plugins including input, output, aggregator, and processor. +/// +/// Docs: +/// +/// Filters can be configured per input, output, processor, or aggregator. +/// +/// Docs: +@ModuleInfo { minPklVersion = "0.25.0" } +abstract module com.influxdata.telegraf.plugins.Plugin + +import "@toml/toml.pkl" + +/// An array of glob pattern strings. +/// +/// Only metrics whose measurement name matches a pattern in this list are emitted. +namepass: Listing? + +/// The inverse of [namepass]. +/// +/// If a match is found the metric is discarded. +/// This is tested on metrics after they have passed the [namepass] test. +namedrop: Listing? + +/// A table mapping tag keys to arrays of glob pattern strings. +/// +/// Only metrics that contain a tag key in the table and a tag value matching one of its patterns is emitted. +tagpass: Mapping>? + +/// The inverse of [tagpass]. +/// +/// If a match is found the metric is discarded. +/// This is tested on metrics after they have passed the [tagpass] test. +tagdrop: Mapping>? + +/// An array of glob pattern strings. +/// +/// Only fields whose field key matches a pattern in this list are emitted. +fieldpass: Listing? + +/// The inverse of [fieldpass]. +/// +/// Fields with a field key matching one of the patterns will be discarded from the metric. +/// This is tested on metrics after they have passed the [fieldpass] test. +fielddrop: Listing? + +/// An array of glob pattern strings. +/// +/// Only tags with a tag key matching one of the patterns are emitted. +/// In contrast to [tagpass], which will pass an entire metric based on its tag, +/// [taginclude] removes all non matching tags from the metric. +/// Any tag can be filtered including global tags and the agent `host` tag. +taginclude: Listing? + +/// The inverse of [taginclude]. +/// +/// Tags with a tag key matching one of the patterns will be discarded from the metric. +/// Any tag can be filtered including global tags and the agent `host` tag +tagexclude: Listing? + +output { + renderer = new toml.Renderer { + converters { + // The Pkl config defines data formats as a nested object. + // Telegraf defines this as a flat object, + // so we inline the data format into the top-level plugin. + ["*"] = (value) -> + if (value is module) + let (m = value.toMap()) + m.remove("data_format") + ((m.getOrNull("data_format") as Typed?)?.toMap() ?? Map()) + else value + } + } +} diff --git a/packages/com.influxdata.telegraf/plugins/inputs/CpuInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/CpuInput.pkl new file mode 100644 index 0000000..8eacbde --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/CpuInput.pkl @@ -0,0 +1,40 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The `cpu` plugin gather metrics on the system CPUs. +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.inputs.CpuInput + +extends "Input.pkl" + +/// Whether to report per-CPU stats or not. +/// +/// Default: `true` +percpu: Boolean? + +/// Whether to report total system CPU stats or not. +/// +/// Default: `true` +totalcpu: Boolean? + +/// If true, collect raw CPU time metrics. +/// +/// Default: `false` +collect_cpu_time: Boolean? + +/// If true, compute and report the sum of all non-idle CPU states. +/// +/// Default: `false` +report_active: Boolean? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/DiskInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/DiskInput.pkl new file mode 100644 index 0000000..f6e7cc0 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/DiskInput.pkl @@ -0,0 +1,35 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The [disk input plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/disk/README.md) +/// gathers metrics about disk usage. +/// +/// Note that `used_percent` is calculated by doing `used / (used + free)`, +/// _not_ `used / total`, which is how the unix `df` command does it. +/// See for more details. +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.inputs.DiskInput + +extends "Input.pkl" + +/// By default stats will be gathered for all mount points. +/// +/// Set mount_points will restrict the stats to only the specified mount points. +mount_points: Listing? + +typealias FsType = "tmpfs"|"devtmpfs"|"devfs"|"iso9660"|"overlay"|"aufs"|"squashfs" + +/// Ignore mount points by filesystem type. +ignore_fs: Listing? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/ExecInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/ExecInput.pkl new file mode 100644 index 0000000..04b2e02 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/ExecInput.pkl @@ -0,0 +1,51 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// [Exec Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/exec/README.md) +/// +/// The exec plugin executes all the commands in parallel on every interval and parses metrics from their output in any one of the accepted Input Data Formats. +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.inputs.ExecInput + +extends "Input.pkl" + +import ".../parsers/InputDataFormat.pkl" + +/// Commands array. +commands: Listing + +/// Environment variables. +hidden env: Mapping + +/// The environment variables as exposed to the exec input plugin. +/// +/// This is an internal property that is derived from [env]. +fixed environment: Listing = env + .toMap() + .entries + .map((entry) -> "\(entry.key)=\(entry.value)") + .toListing() + +/// Timeout for each command to complete. +timeout: Duration? + +/// Measurement name suffix (for separating different commands). +name_suffix: String? + +/// The [input data format](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) +/// to consume. +/// +/// Each data format has its own unique set of configuration options. +data_format: InputDataFormat? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/FileInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/FileInput.pkl new file mode 100644 index 0000000..6f9dc04 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/FileInput.pkl @@ -0,0 +1,41 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The `file` plugin parses the **complete** contents of a file **every interval** +/// using the selected input data format. +/// +/// **Note:** If you wish to parse only newly appended lines use the `tail` input plugin instead. +@ModuleInfo { minPklVersion = "0.25.0" } +open module com.influxdata.telegraf.plugins.inputs.FileInput + +extends "Input.pkl" + +import "../parsers/InputDataFormat.pkl" + +/// Files to parse each interval. +/// +/// Accept standard unix glob matching rules, as well as `**` to match recursive files and directories. +files: Listing + +/// Name a tag containing the name of the file the data was parsed from. +/// +/// Leave empty to disable. +file_tag: String? + +/// The [input data format](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md) +/// to consume. +/// +/// Each data format has its own unique set of configuration options. +data_format: InputDataFormat? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/HttpInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/HttpInput.pkl new file mode 100644 index 0000000..404521c --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/HttpInput.pkl @@ -0,0 +1,73 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. +/// +/// The endpoint should have metrics formatted in one of the supported +/// [input data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +/// Each data format has its own unique set of configuration options which can be added to the input configuration. +@ModuleInfo { minPklVersion = "0.25.0" } +open module com.influxdata.telegraf.plugins.inputs.HttpInput + +extends "./Input.pkl" + +import "../parsers/InputDataFormat.pkl" + +/// One or more URLs from which to read formatted metrics. +urls: List + +/// HTTP method. +method: "GET"|"POST"|"PUT"|"PATCH" + +/// Optional HTTP headers. +headers: Mapping? + +/// HTTP entity-body to send with POST/PUT requests. +body: String? + +/// HTTP Content-Encoding for write request body. +/// +/// Can be set to "gzip" to compress body or "identity" to apply no encoding. +content_encoding: ("gzip"|"identity")? + +/// Optional file with Bearer token file content is added as an Authorization header. +bearer_token: String? + +/// Optional HTTP Basic Auth Credentials. +username: String? +password: String? + +/// HTTP Proxy support. +http_proxy_url: String? + +/// Optional TLS Config. +tls_ca: String? +tls_cert: String? +tls_key: String? + +/// Use TLS but skip chain & host verification. +insecure_skip_verify: Boolean? + +/// Amount of time allowed to complete the HTTP request. +timeout: Duration? + +/// List of success status codes. +success_status_codes: List? + +/// Data format to consume. +/// +/// Each data format has its own unique set of configuration options, read more about them here: +/// +data_format: InputDataFormat? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/Input.pkl b/packages/com.influxdata.telegraf/plugins/inputs/Input.pkl new file mode 100644 index 0000000..469737b --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/Input.pkl @@ -0,0 +1,44 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Telegraf input plugins are used with the InfluxData time series platform +/// to collect metrics from the system, services, or third party APIs. +/// +/// All metrics are gathered from the inputs you enable and configure in the configuration file. +@ModuleInfo { minPklVersion = "0.25.0" } +abstract module com.influxdata.telegraf.plugins.inputs.Input + +extends "../Plugin.pkl" + +/// How often to gather this metric. +/// +/// Normal plugins use a single global interval, +/// but if one particular input should be run less or more often, +/// you can configure that here. +interval: Duration? + +/// Override the base name of the measurement. +/// +/// Default: name of the input +name_override: String? + +/// Specifies a prefix to attach to the measurement name. +name_prefix: String? + +/// Specifies a suffix to attach to the measurement name. +name_suffix: String? + +/// A map of tags to apply to a specific input’s measurements. +tags: Mapping? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/NetInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/NetInput.pkl new file mode 100644 index 0000000..05170ae --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/NetInput.pkl @@ -0,0 +1,39 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The [net input plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/net/NET_README.md) +/// gathers metrics about network interface and protocol usage (Linux only). +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.inputs.NetInput + +extends "Input.pkl" + +/// By default, telegraf gathers stats from any up interface (excluding loopback). +/// +/// Setting interfaces will tell it to gather these explicit interfaces, regardless of status. +/// When specifying an interface, glob-style patterns are also supported. +/// +/// Example: +/// ``` +/// interfaces { "eth*", "enp0s[0-1]", "lo" } +/// ``` +interfaces: Listing? + +/// On linux systems telegraf also collects protocol stats. +/// +/// Setting this property to `true` will skip reporting of protocol metrics. +/// +/// Default: `false` +ignore_protocol_stats: Boolean? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/OpenTelemetry.pkl b/packages/com.influxdata.telegraf/plugins/inputs/OpenTelemetry.pkl new file mode 100644 index 0000000..fa8354a --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/OpenTelemetry.pkl @@ -0,0 +1,53 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// This [OpenTelemetry input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) +/// receives traces, metrics and logs from OpenTelemetry clients and agents via gRPC. +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.inputs.OpenTelemetry + +extends "Input.pkl" + +/// Address and port to listen on. +/// +/// Default if unset: `0.0.0.0:4317` +service_address: String? + +/// New connection timeout +/// +/// Default if unset: 5s +timeout: Duration? + +/// Supports: "prometheus-v1", "prometheus-v2" +/// +/// Default: "prometheus-v1" +metrics_schema: (*"prometheus-v1"|"prometheus-v2")? + +/// Optional TLS Config. +/// +/// Set one or more allowed client CA certificate file names to +/// enable mutually authenticated TLS connections +tls_allowed_cacerts: Listing? + +/// Add service certificate +tls_cert: String? + +/// Add service key +tls_key: String? + +/// Maximum allowed body size of gRPC _receive_ request. +/// +/// This is only available in `mosaic-agent`. Default if unset is `4.mb`. +max_recv_msg_size: DataSize? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/PrometheusInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/PrometheusInput.pkl new file mode 100644 index 0000000..f41f47c --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/PrometheusInput.pkl @@ -0,0 +1,110 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The [prometheus input plugin](https://github.com/influxdata/telegraf/blob/release-1.17/plugins/inputs/prometheus/README.md) +/// gathers metrics from HTTP servers exposing metrics in Prometheus format. +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.inputs.PrometheusInput + +extends "Input.pkl" + +/// An array of urls to scrape metrics from. +urls: Listing + +/// Tag name for the scrapped url. +/// +/// Optional, default is "url". +url_tag: String? + +/// Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +/// +/// When using the prometheus_client output, +/// use the same value in both plugins to ensure metrics are round-tripped without modification. +/// +/// Examples: +/// ``` +/// metric_version = 1 // deprecated in 1.13 +/// metric_version = 2 // recommended version +/// ``` +metric_version: Int(isBetween(1, 2))? + +/// An array of Kubernetes services to scrape metrics from. +kubernetes_services: Listing? + +/// Kubernetes config file to create client from. +kube_config: String? + +/// Scrape Kubernetes pods for the following prometheus annotations: +/// +/// - prometheus.io/scrape: Enable scraping for this pod. +/// - prometheus.io/scheme: If the metrics endpoint is secured +/// then you will need to set this to `https` and most likely set the tls config. +/// - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +/// - prometheus.io/port: If port is not 9102 use this annotation. +monitor_kubernetes_pods: Boolean? + +/// Get the list of pods to scrape given the following scope. +/// +/// Possible values: +/// - `cluster`: the kubernetes watch api (default, no need to specify) +/// - `node`: the local cadvisor api; for scalability. Note that the config [node_ip] or the +/// environment variable `NODE_IP` must be set to the host IP. +pod_scrape_scope: ("cluster"|"node")? + +/// Only for node scrape scope: node IP of the node that telegraf is running on. +/// +/// Either this config or the environment variable `NODE_IP` must be set. +node_ip: String(pod_scrape_scope == "node")? + +/// Restricts Kubernetes monitoring to a single namespace +/// +/// Example: `monitor_kubernetes_pods_namespace = "default"` +monitor_kubernetes_pods_namespace: String? + +/// Label selector to target pods which have the label. +/// +/// Field selector to target pods, e.g., to scrape pods on a specific node: +/// `kubernetes_field_selector = "env=dev,app=nginx"` +kubernetes_field_selector: String? + +/// Use bearer token for authorization ([bearer_token] takes priority). +/// +/// Path to the bearer token file. +bearer_token: String? + +/// Use bearer token for authorization ([bearer_token] takes priority). +bearer_token_string: String? + +/// HTTP Basic Authentication username and password ([bearer_token] and +/// [bearer_token_string] take priority). +username: String? +password: String? + +/// Specify timeout duration for slower prometheus clients. +/// +/// Default: `3.s` +response_timeout: Duration? + +/// Optional TLS CA. +tls_ca: String? + +/// Optional TLS certificate. +tls_cert: String? + +/// Optional TLS private key. +tls_key: String? + +/// Use TLS but skip chain & host verification. +insecure_skip_verify: Boolean? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/SocketListenerInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/SocketListenerInput.pkl new file mode 100644 index 0000000..eebe129 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/SocketListenerInput.pkl @@ -0,0 +1,111 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The Socket Listener is a service input plugin that listens for messages +/// from streaming (tcp, unix) or datagram (udp, unixgram) protocols. +/// +/// The plugin expects messages in the +/// [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +@ModuleInfo { minPklVersion = "0.25.0" } +open module com.influxdata.telegraf.plugins.inputs.SocketListenerInput + +extends "Input.pkl" + +import "../parsers/InputDataFormat.pkl" + +/// URL to listen on. +/// +/// Examples: +/// ``` +/// service_address = "tcp://:8094" +/// service_address = "tcp://127.0.0.1:http" +/// service_address = "tcp4://:8094" +/// service_address = "tcp6://:8094" +/// service_address = "tcp6://[2001:db8::1]:8094" +/// service_address = "udp://:8094" +/// service_address = "udp4://:8094" +/// service_address = "udp6://:8094" +/// service_address = "unix:///tmp/telegraf.sock" +/// service_address = "unixgram:///tmp/telegraf.sock" +/// ``` +service_address: Uri + +/// Change the file mode bits on unix sockets. +/// +/// These permissions may not be respected by some platforms. +/// To safely restrict write permissions, it is best to place the socket +/// into a directory that has previously been created with the desired permissions. +/// +/// Example: +/// ``` +/// socket_mode = "777" +/// ``` +socket_mode: String? + +/// Maximum number of concurrent connections. +/// +/// Only applies to stream sockets (e.g. TCP). +/// 0 (default) is unlimited. +/// +/// Default: `1024` +max_connections: UInt16? + +/// Read timeout. +/// Only applies to stream sockets (e.g. TCP). +/// 0 (default) is unlimited. +/// +/// Default: `30.s` +read_timeout: Duration? + +/// Optional TLS configuration. +/// +/// Only applies to stream sockets (e.g. TCP). +tls_cert: String? +tls_key: String? + +/// Enables client authentication if set. +/// +/// Example: +/// ``` +/// tls_allowed_cacerts { "/etc/telegraf/clientca.pem" } +/// ``` +tls_allowed_cacerts: Listing? + +/// Maximum socket buffer size (in bytes when no unit specified). +/// +/// For stream sockets, once the buffer fills up, the sender will start backing up. +/// For datagram sockets, once the buffer fills up, metrics will start dropping. +/// +/// Default: OS default +read_buffer_size: DataSize? + +/// Period between keep alive probes. +/// +/// Only applies to TCP sockets. +/// 0 disables keep alive probes. +/// +/// Default: OS configuration +keep_alive_period: Duration? + +/// Data format to consume. +/// +/// Each data format has its own unique set of configuration options, read more about them here: +/// +data_format: InputDataFormat? + +/// Content encoding for message payloads. +/// +/// Can be set to "gzip" to or "identity" to apply no encoding. +content_encoding: ("gzip"|"identity")? diff --git a/packages/com.influxdata.telegraf/plugins/inputs/TailInput.pkl b/packages/com.influxdata.telegraf/plugins/inputs/TailInput.pkl new file mode 100644 index 0000000..688ffc8 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/inputs/TailInput.pkl @@ -0,0 +1,91 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The tail plugin "tails" a logfile and parses each log message. +/// +/// By default, the tail plugin acts like the following unix tail command: +/// +/// ``` +/// tail -F --lines=0 myfile.log +/// ``` +/// +/// - `-F` means that it will follow the _name_ of the given file, so +/// that it will be compatible with log-rotated files, and that it will retry on +/// inaccessible files. +/// - `--lines=0` means that it will start at the end of the file (unless +/// the `from_beginning` option is set). +/// +/// see http://man7.org/linux/man-pages/man1/tail.1.html for more details. +/// +/// The plugin expects messages in one of the +/// [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +@ModuleInfo { minPklVersion = "0.25.0" } +open module com.influxdata.telegraf.plugins.inputs.TailInput + +extends "Input.pkl" + +import "../parsers/InputDataFormat.pkl" + +/// File names or a pattern to tail. +/// +/// These accept standard unix glob matching rules, but with the addition of `**` as a "super asterisk": +/// +/// - `"/var/log/**.log"`: recursively find all .log files in /var/log +/// - `"/var/log/*/*.log"`: find all .log files with a parent dir in /var/log +/// - `"/var/log/apache.log"`: just tail the apache log file +/// +/// See https://github.com/gobwas/glob for more examples. +files: Listing + +/// Read file from beginning. +/// +/// Default: `false` +from_beginning: Boolean? + +/// Whether file is a named pipe. +/// +/// Default: `false` +pipe: Boolean? + +/// Method used to watch for file updates. +/// +/// Default: `"inotify"` +watch_method: (*"inotify"|"poll")? + +/// Maximum lines of the file to process that have not yet be written by the output. +/// +/// For best throughput set based on the number of metrics on each line +/// and the size of the output's metric_batch_size. +max_undelivered_lines: Int(isPositive)? + +/// Character encoding to use when interpreting the file contents. +/// +/// Invalid characters are replaced using the unicode replacement character. +/// When set to the empty string the data is not decoded to text. +/// +/// Examples: +/// ``` +/// character_encoding = "utf-8" +/// character_encoding = "utf-16le" +/// character_encoding = "utf-16be" +/// character_encoding = "" +/// ``` +character_encoding: String? + +/// Data format to consume. +/// +/// Each data format has its own unique set of configuration options, read more about them here: +/// https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +data_format: InputDataFormat? diff --git a/packages/com.influxdata.telegraf/plugins/outputs/DiscardOutput.pkl b/packages/com.influxdata.telegraf/plugins/outputs/DiscardOutput.pkl new file mode 100644 index 0000000..68e50ed --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/outputs/DiscardOutput.pkl @@ -0,0 +1,22 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The Discard output plugin simply drops all metrics that are sent to it. +/// +/// It is only meant to be used for testing purposes. +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.outputs.DiscardOutput + +extends "Output.pkl" diff --git a/packages/com.influxdata.telegraf/plugins/outputs/FileOutput.pkl b/packages/com.influxdata.telegraf/plugins/outputs/FileOutput.pkl new file mode 100644 index 0000000..5621cdc --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/outputs/FileOutput.pkl @@ -0,0 +1,55 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// This plugin writes telegraf metrics to files. +@ModuleInfo { minPklVersion = "0.25.0" } +open module com.influxdata.telegraf.plugins.outputs.FileOutput + +extends "Output.pkl" + +import ".../serializers/OutputDataFormat.pkl" + +files: Listing? + +/// Use batch serialization format instead of line based delimiting. +/// +/// The batch format allows for the production of non line based output formats +/// and may more efficiently encode and write metrics. +/// +/// Defaults to false. +use_batch_format: Boolean? + +/// The file will be rotated after the time interval specified. +/// +/// When set to 0 no time based rotation is performed. +/// +/// Defaults to no rotation +rotation_interval: Duration? + +/// The logfile will be rotated when it becomes larger than the specified size. +/// +/// When set to 0, no size based rotation is performed. +rotation_max_size: DataSize? + +/// Maximum number of rotated archives to keep, any older logs are deleted. +/// +/// If set to -1, no archives are removed. +rotation_max_archives: Int? + +/// The [data format](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) +/// to output. +/// +/// Each data format has its own unique set of configuration options. +data_format: OutputDataFormat diff --git a/packages/com.influxdata.telegraf/plugins/outputs/Output.pkl b/packages/com.influxdata.telegraf/plugins/outputs/Output.pkl new file mode 100644 index 0000000..74c004e --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/outputs/Output.pkl @@ -0,0 +1,52 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Telegraf processor plugins write metrics to various destinations +@ModuleInfo { minPklVersion = "0.25.0" } +abstract module com.influxdata.telegraf.plugins.outputs.Output + +extends "../Plugin.pkl" + +/// Name an instance of a plugin. +alias: String? + +/// The maximum time between flushes. +/// +/// Use this setting to override the agent `flush_interval` on a per plugin basis. +flush_interval: Duration? + +/// The amount of time to jitter the flush interval. +/// +/// Use this setting to override the agent `flush_jitter` on a per plugin basis. +flush_jitter: Duration? + +/// The maximum number of metrics to send at once. +/// +/// Use this setting to override the agent `metric_batch_size` on a per plugin basis. +metric_batch_size: Number? + +/// The maximum number of unsent metrics to buffer. +/// +/// Use this setting to override the agent `metric_buffer_limit` on a per plugin basis. +metric_buffer_limit: Number? + +/// Override the original name of the measurement. +name_override: String? + +/// Specifies a prefix to attach to the measurement name. +name_prefix: String? + +/// Specifies a suffix to attach to the measurement name. +name_suffix: String? diff --git a/packages/com.influxdata.telegraf/plugins/outputs/PrometheusClientOutput.pkl b/packages/com.influxdata.telegraf/plugins/outputs/PrometheusClientOutput.pkl new file mode 100644 index 0000000..f8f31df --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/outputs/PrometheusClientOutput.pkl @@ -0,0 +1,75 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// This plugin starts a Prometheus Client, it exposes all metrics on /metrics (default) to be polled by a Prometheus server. +@ModuleInfo { minPklVersion = "0.25.0" } +open module com.influxdata.telegraf.plugins.outputs.PrometheusClientOutput + +extends "Output.pkl" + +/// Address to listen on. +listen: String? + +/// Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +/// +/// See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +/// Defaults to `1`. +metric_version: Int(isBetween(1,2))? + +/// Username for optional basic authentication. +basic_username: String? + +/// Password for optional basic authentication. +basic_password: String? + +/// If set, the IP Ranges which are allowed to access metrics. +/// +/// Defaults to no restrictions. +ip_range: Listing? + +/// Path to publish the metrics on. +/// +/// Defaults to `/metrics`. +path: String? + +/// Expiration interval for each metric. +/// +/// Set to `0` for no expiration. +/// Defaults to 60 seconds. +expiration_interval: Duration? + +/// Collectors to disable, valid entries are "gocollector" and "process". +/// +/// Defaults to `gocollector' and `process' being enabled. +collectors_exclude: Listing<"gocollector"|"process">(isDistinct)? + +/// Send string metrics as Prometheus labels. +/// +/// Defaults to `true`. +string_as_label: Boolean? + +/// Path to certificate for optional TLS. +tls_cert: String? + +/// Path to key for optional TLS. +tls_key: String? + +/// Path to CA Certificate files to enable optional mTLS. +tls_allowed_cacerts: Listing? + +/// Export metric collection time. +/// +/// Defaults to `false`. +export_timestamp: Boolean? diff --git a/packages/com.influxdata.telegraf/plugins/parsers/CsvInputDataFormat.pkl b/packages/com.influxdata.telegraf/plugins/parsers/CsvInputDataFormat.pkl new file mode 100644 index 0000000..f4ec450 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/parsers/CsvInputDataFormat.pkl @@ -0,0 +1,99 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The `csv` parser creates metrics from a document containing comma separated values. +@ModuleInfo { minPklVersion = "0.25.0" } +open module com.influxdata.telegraf.plugins.parsers.CsvInputDataFormat + +extends "InputDataFormat.pkl" + +files: Listing + +data_format: "csv" + +/// Indicates how many rows to treat as a header. +/// +/// By default, the parser assumes there is no header and will parse the first row as data. +/// If set to anything more than 1, column names will be concatenated with the name listed in the next header row. +/// If [csv_column_names] is specified, the column names in header will be overridden. +csv_header_row_count: Int(isPositive)? + +/// For assigning custom names to columns. +/// +/// If this is specified, all columns should have a name. +/// Unnamed columns will be ignored by the parser. +/// If [csv_header_row_count] is set to 0, this config must be used. +csv_column_names: Listing? + +/// For assigning explicit data types to columns. +/// +/// Specify types in order by column (e.g., `csv_column_types { "string"; "int"; "float" }`). +/// If this is not specified, type conversion will be done on the types above. +csv_column_types: Listing<"int"|"float"|"bool"|"string">? + +/// Indicates the number of rows to skip before looking for header information. +csv_skip_rows: Int(isPositive)? + +/// Indicates the number of columns to skip before looking for data to parse. +/// +/// These columns will be skipped in the header as well. +csv_skip_columns: Int(isPositive)? + +/// The separator between csv fields. +/// +/// Default: `","` +csv_delimiter: String? + +/// The character reserved for marking a row as a comment row. +/// +/// Commented rows are skipped and not parsed. +csv_comment: String? + +/// If set to true, the parser will remove leading whitespace from fields. +/// +/// Default: `false` +csv_trim_space: Boolean? + +/// Columns listed here will be added as tags. +/// +/// Any other columns will be added as fields. +csv_tag_columns: Listing? + +/// The column to extract the name of the metric from. +/// +/// Will not be included as field in metric. +csv_measurement_column: String? + +/// The column to extract time information for the metric. +/// +/// [csv_timestamp_format] must be specified if this is used. +/// Will not be included as field in metric. +csv_timestamp_column: String? + +/// The format of time data extracted from [csv_timestamp_column]. +/// +/// This must be specified if [csv_timestamp_column] is specified. +csv_timestamp_format: String? + +/// The timezone of time data extracted from [csv_timestamp_column] +/// in case there is no timezone information. +/// +/// It follows the IANA Time Zone database. +csv_timezone: String? + +/// Indicates values to skip, such as an empty string value `""`. +/// +/// The field will be skipped entirely where it matches any values inserted here. +csv_skip_values: Listing? diff --git a/packages/com.influxdata.telegraf/plugins/parsers/InputDataFormat.pkl b/packages/com.influxdata.telegraf/plugins/parsers/InputDataFormat.pkl new file mode 100644 index 0000000..a54febb --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/parsers/InputDataFormat.pkl @@ -0,0 +1,20 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Base class for [input data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +@ModuleInfo { minPklVersion = "0.25.0" } +abstract module com.influxdata.telegraf.plugins.parsers.InputDataFormat + +data_format: String diff --git a/packages/com.influxdata.telegraf/plugins/parsers/JsonInputDataFormat.pkl b/packages/com.influxdata.telegraf/plugins/parsers/JsonInputDataFormat.pkl new file mode 100644 index 0000000..71b7b52 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/parsers/JsonInputDataFormat.pkl @@ -0,0 +1,82 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The JSON data format parses a JSON object or an array of objects into metric fields. +/// +/// **NOTE:** All JSON numbers are converted to float fields. +/// JSON strings and booleans are ignored unless specified in the [tag_keys] or [json_string_fields] options. +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.parsers.JsonInputDataFormat + +extends "InputDataFormat.pkl" + +data_format: "json" + +/// When strict is true and a JSON array is being parsed, +/// all objects within the array must be valid. +/// +/// Default: `true` +json_strict: Boolean? + +/// A GJSON path that specifies a specific chunk of JSON to be parsed. +/// +/// If not specified, the whole document will be parsed. +/// +/// GJSON query paths are described here: +/// +json_query: String? + +/// An array of keys that should be added as tags. +/// +/// Matching keys are no longer saved as fields. +/// Supports wildcard glob matching. +tag_keys: Listing? + +/// Array of glob pattern strings or booleans keys that should be added as string fields. +json_string_fields: Listing? + +/// The key to use as the measurement name. +json_name_key: String? + +/// The key containing the time that should be used to create the metric. +json_time_key: String? + +/// The time layout that should be used to interpret [json_time_key]. +/// +/// The time must be `"unix"`, `"unix_ms"`, `"unix_us"`, `"unix_ns"`, or a time in the "reference time". +/// To define a different format, arrange the values from the "reference time" +/// in the example to match the format you will be using. +/// For more information on the "reference time", visit . +/// +/// Examples: +/// ``` +/// json_time_format = "Mon Jan 2 15:04:05 -0700 MST 2006" +/// json_time_format = "2006-01-02T15:04:05Z07:00" +/// json_time_format = "01/02/2006 15:04:05" +/// json_time_format = "unix" +/// json_time_format = "unix_ms" +/// ``` +json_time_format: ("unix"|"unix_ms"|"unix_us"|"unix_ns"|String)? + +/// Allows you to provide an override for timestamps +/// that don't already include an offset, e.g., `"04/06/2016 12:41:45"`. +/// +/// Default: `""` (renders UTC) +/// +/// Options are as follows: +/// - `"Local"`: interpret based on machine localtime +/// - `"America/New_York"`: Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +/// - `"UTC"` (or blank/unspecified): will return timestamp in UTC +json_timezone: String? diff --git a/packages/com.influxdata.telegraf/plugins/processors/Processor.pkl b/packages/com.influxdata.telegraf/plugins/processors/Processor.pkl new file mode 100644 index 0000000..2807d67 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/processors/Processor.pkl @@ -0,0 +1,26 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Processor plugins transform, decorate, and/or filter metrics collected by input plugins, +/// passing the transformed metrics to the output plugins +@ModuleInfo { minPklVersion = "0.25.0" } +abstract module com.influxdata.telegraf.plugins.processors.Processor + +extends "../Plugin.pkl" + +/// This is the order in which processors are executed. +/// +/// If this is not specified, then processor execution order will be random. +order: Number? diff --git a/packages/com.influxdata.telegraf/plugins/processors/StarlarkProcessor.pkl b/packages/com.influxdata.telegraf/plugins/processors/StarlarkProcessor.pkl new file mode 100644 index 0000000..d9d3a39 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/processors/StarlarkProcessor.pkl @@ -0,0 +1,30 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The starlark processor calls a Starlark function for each matched metric, allowing for custom programmatic metric processing. +@ModuleInfo { minPklVersion = "0.25.0" } +open module com.influxdata.telegraf.plugins.processors.StarlarkProcessor + +extends "Processor.pkl" + +/// Source of the Starlark script. +@SourceCode { language = "Starlark" } +source: String? + +/// File containing a Starlark script. +script: String? + +/// The constants of the Starlark script. +constants: Mapping? diff --git a/packages/com.influxdata.telegraf/plugins/serializers/JsonOutputDataFormat.pkl b/packages/com.influxdata.telegraf/plugins/serializers/JsonOutputDataFormat.pkl new file mode 100644 index 0000000..8ae4593 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/serializers/JsonOutputDataFormat.pkl @@ -0,0 +1,33 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The [JSON output data format](https://docs.influxdata.com/telegraf/v1.17/data_formats/output/json/) +/// serializes Telegraf metrics into JSON documents. +@ModuleInfo { minPklVersion = "0.25.0" } +module com.influxdata.telegraf.plugins.serializers.JsonOutputDataFormat + +extends "OutputDataFormat.pkl" + +data_format: "json" + +/// Files to write to. +/// +/// `"stdout"` is a specially handled file. +files: Listing + +/// The resolution to use for the metric timestamp. +/// +/// Durations are truncated to the power of 10 less than the specified units. +json_timestamp_units: Duration diff --git a/packages/com.influxdata.telegraf/plugins/serializers/OutputDataFormat.pkl b/packages/com.influxdata.telegraf/plugins/serializers/OutputDataFormat.pkl new file mode 100644 index 0000000..5a63506 --- /dev/null +++ b/packages/com.influxdata.telegraf/plugins/serializers/OutputDataFormat.pkl @@ -0,0 +1,20 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Base class for [output data formats](https://docs.influxdata.com/telegraf/v1.17/data_formats/output/). +@ModuleInfo { minPklVersion = "0.25.0" } +abstract module com.influxdata.telegraf.plugins.serializers.OutputDataFormat + +data_format: String diff --git a/packages/io.prometheus/Configuration.pkl b/packages/io.prometheus/Configuration.pkl new file mode 100644 index 0000000..1a4d1c6 --- /dev/null +++ b/packages/io.prometheus/Configuration.pkl @@ -0,0 +1,648 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Prometheus is configured via command-line flags and a configuration file. +/// +/// While the command-line flags configure immutable system parameters +/// (such as storage locations, amount of data to keep on disk and in memory, etc.), +/// the configuration file defines everything related to scraping +/// [jobs and their instances](https://prometheus.io/docs/concepts/jobs_instances/), +/// as well as which +/// [rule files to load](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#configuring-rules). +/// +/// To view all available command-line flags, run `./prometheus -h`. +/// +/// Prometheus can reload its configuration at runtime. +/// If the new configuration is not well-formed, the changes will not be applied. +/// A configuration reload is triggered by sending a `SIGHUP` to the Prometheus process +/// or sending a HTTP POST request to the `/-/reload` endpoint +/// (when the `--web.enable-lifecycle` flag is enabled). +/// This will also reload any configured rule files. +/// +/// More details: +@ModuleInfo { minPklVersion = "0.25.0" } +open module io.prometheus.Configuration + +extends "PrometheusObject.pkl" + +/// The global configuration specifies parameters that are valid in all other configuration contexts. +/// +/// They also serve as defaults for other configuration sections. +global: GlobalConfig? + +/// Alerting specifies settings related to the Alertmanager. +alerting: AlertingConfig? + +/// A list of scrape configurations. +scrape_configs: Listing? + +/// Settings related to the remote write feature. +remote_write: Listing? + +/// Settings related to the remote read feature. +remote_read: Listing? + +/// A list of globs. Rules and alerts are read from all matching files. +rule_files: Listing? + +class GlobalConfig { + /// How frequently to evaluate rules + /// + /// Default: `1.min` + evaluation_interval: Duration? + + /// The labels to add to any time series or alerts when communicating + /// with external systems (federation, remote storage, Alertmanager). + external_labels: Labels? + + /// File to which PromQL queries are logged. + /// + /// Reloading the configuration will reopen the file. + query_log_file: String? + + /// How long until a scrape request times out. + /// + /// Default if omitted: `10.s` + scrape_timeout: Duration? + + /// How frequently to evaluate rules. + /// + /// Default if omitted: `1.min` + scrape_interval: Duration? +} + +/// A set of targets and parameters describing how to scrape them. +/// +/// In the general case, one scrape configuration specifies a single job. +/// In advanced configurations, this may change. +/// +/// Targets may be statically configured via [static_configs] +/// or dynamically discovered using one of the supported service-discovery mechanisms. +/// +/// Additionally, [relabel_configs] allow advanced modifications to any target and its labels before scraping. +/// +/// More details: +open class ScrapeConfig { + /// The job name assigned to scraped metrics by default. + job_name: String + + /// How frequently to scrape targets from this job. + /// + /// Default if omitted: [GlobalConfig.scrape_interval] + scrape_interval: Duration? + + /// Per-scrape timeout when scraping this job. + /// + /// Default if omitted: [GlobalConfig.scrape_timeout] + scrape_timeout: Duration? + + /// The HTTP resource path on which to fetch metrics from targets. + /// + /// Default if omitted: `"/metrics"` + metrics_path: String? + + /// Controls how Prometheus handles conflicts between labels that are + /// already present in scraped data and labels that Prometheus would attach + /// server-side ("job" and "instance" labels, manually configured target + /// labels, and labels generated by service discovery implementations). + /// + /// If [true], label conflicts are resolved by keeping label + /// values from the scraped data and ignoring the conflicting server-side labels. + /// + /// If [false], label conflicts are resolved by renaming conflicting labels + /// in the scraped data to "exported_" (for example "exported_instance", "exported_job") + /// and then attaching server-side labels. + /// + /// Setting [honor_labels] to [true] is useful for use cases such as federation + /// and scraping the Pushgateway, where all labels specified in the target should be preserved. + /// + /// Note that any globally configured [GlobalConfig.external_labels] are unaffected by this setting. + /// In communication with external systems, they are always applied only + /// when a time series does not have a given label yet and are ignored otherwise. + /// + /// Default if omitted: [false] + honor_labels: Boolean? + + /// Controls whether Prometheus respects the timestamps present in scraped data. + /// + /// If [true], the timestamps of the metrics exposed by the target will be used. + /// + /// If [false], the timestamps of the metrics exposed by the target will be ignored. + /// + /// Default if omitted: [true] + honor_timestamps: Boolean? + + /// Configures the protocol scheme used for requests. + /// + /// Default if omitted: `"http"` + scheme: Scheme? + + /// Optional HTTP URL parameters. + params: Mapping>? + + /// Sets the `Authorization` header on every scrape request + /// with the configured username and password. + basic_auth: BasicAuth? + + /// Sets the `Authorization` header on every scrape request with the configured bearer token. + /// + /// Mutually exclusive with [bearer_token_file]. + bearer_token: String(bearer_token_file == null)? + + /// Sets the `Authorization` header on every scrape request + /// with the bearer token read from the configured file. + /// + /// Mutually exclusive with [bearer_token]. + bearer_token_file: String? + + /// Configures the scrape request's TLS settings. + tls_config: TLSConfig? + + /// Optional proxy URL. + proxy_url: String? + + /// List of Kubernetes service discovery configurations. + kubernetes_sd_configs: Listing? + + /// List of file service discovery configurations. + file_sd_configs: Listing? + + /// List of labeled statically configured targets for this job. + static_configs: Listing? + + /// List of target relabel configurations. + relabel_configs: Listing? + + /// List of metric relabel configurations. + metric_relabel_configs: Listing? + + /// Per-scrape limit on number of scraped samples that will be accepted. + /// + /// If more than this number of samples are present after metric relabeling, + /// the entire scrape will be treated as failed. + /// `0` means no limit. + /// + /// Default if omitted: `0` + sample_limit: UInt? + + /// Per-scrape config limit on number of unique targets that will be accepted. + /// + /// If more than this number of targets are present after target relabeling, + /// Prometheus will mark the targets as failed without scraping them. + /// `0` means no limit. + /// + /// This is an experimental feature, this behaviour could change in the future. + /// + /// Default if omitted: `0` + target_limit: UInt? +} + +/// Allow retrieving scrape targets from Kubernetes' REST API +/// and always staying synchronized with the cluster state. +/// +/// One of the following role types can be configured to discover targets: +/// +/// More details: +class KubernetesSdConfig { + /// The Kubernetes role of entities that should be discovered. + role: *"pod"|"service"|"node"|"endpoints"|"ingress" + + /// Optional namespace discovery. + /// + /// Default if omitted: all namespaces are used + namespaces: NamespaceSpec? + + /// The API server addresses. + /// + /// If left empty, Prometheus is assumed to run inside of the cluster + /// and will discover API servers automatically and use the pod's CA certificate + /// and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + api_server: String? + + /// Optional label and field selectors to limit the discovery process to a subset of available resources. + /// + /// See + /// and + /// to learn more about the possible filters that can be used. + /// Endpoints role supports pod, service and endpoints selectors, + /// other roles only support selectors matching the role itself + /// (e.g. node role can only contain node selectors). + /// + /// Note: When making decision about using field/label selector, + /// make sure that this is the best approach - + /// it will prevent Prometheus from reusing single list/watch for all scrape configs. + /// This might result in a bigger load on the Kubernetes API, + /// because per each selector combination there will be additional LIST/WATCH. + /// On the other hand, if you just want to monitor small subset of pods in large cluster, + /// it's recommended to use selectors. + /// If selectors should be used or not depends on the particular situation. + selectors: Listing? +} + +class KubernetesSdConfigSelector { + role: *"pod"|"service"|"node"|"endpoints"|"ingress" + + /// A `key=value` pair describing a Kubernetes resource label. + ///. + /// See . + label: String? + + /// A `key=value` pair describing a Kubernetes field selector. + /// + /// See . + field: String? +} + +/// File-based service discovery provides a more generic way to configure static targets and +/// serves as an interface to plug in custom service discovery mechanisms. +/// +/// It reads a set of files containing a list of zero or more ``s. Changes to all +/// defined files are detected via disk watches and applied immediately. Files may be provided in +/// YAML or JSON format. Only changes resulting in well-formed target groups are applied. +class FileSdConfig { + /// Patterns for files from which target groups are extracted. + /// + /// Files may end with `.json`, `.yaml` or `.yml`. The last path segment may contain a single `*` + /// that matches any character sequence, e.g. `my/path/tg_*.json`. + files: Listing + + /// Refresh interval to re-read the files. + /// + /// Defaults to `5.m`. + refresh_interval: Duration? +} + +class NamespaceSpec { + names: Listing +} + +/// Relabeling is a powerful tool to dynamically rewrite the label set of a target before it gets scraped. +/// +/// Multiple relabeling steps can be configured per scrape configuration. +/// They are applied to the label set of each target in order of their appearance in the configuration file. +/// +/// Initially, aside from the configured per-target labels, +/// a target's `job` label is set to the `job_name` value of the respective scrape configuration. +/// The `__address__` label is set to the `:` address of the target. +/// After relabeling, the `instance` label is set to the value of `__address__` +/// by default if it was not set during relabeling. +/// The `__scheme__` and `__metrics_path__` labels +/// are set to the scheme and metrics path of the target respectively. +/// The `__param_` label is set to the value of the first passed URL parameter called ``. +/// +/// Additional labels prefixed with `__meta_` may be available during the relabeling phase. +/// They are set by the service discovery mechanism that provided the target and vary between mechanisms. +/// +/// Labels starting with `__` will be removed from the label set after target relabeling is completed. +/// +/// If a relabeling step needs to store a label value only temporarily +/// (as the input to a subsequent relabeling step), use the `__tmp` label name prefix. +/// This prefix is guaranteed to never be used by Prometheus itself. +/// +/// More details: +class RelabelConfig { + /// The source labels select values from existing labels. + /// + /// Their content is concatenated using the configured separator + /// and matched against the configured regular expression for the replace, keep, and drop actions. + source_labels: Listing? + + /// Separator placed between concatenated source label values. + separator: String? + + /// Action to perform based on regex matching. + action: RelabelAction? + + /// Regular expression against which the extracted value is matched. + @SourceCode { language = "RegExp" } + regex: String(isRegex)? + + /// Label to which the resulting value is written in a replace action. + /// + /// Mandatory for replace actions. + /// Regex capture groups are available. + target_label: String? + + /// Modulus to take of the hash of the source label values. + modulus: Int? + + /// Replacement value against which a regex replace is performed if the regular expression matches. + /// + /// Regex capture groups are available. + replacement: String? +} + +/// Which relabel action to perform. +/// +/// * `"replace"`: Match `regex` against the concatenated `source_labels`. +/// Then, set `target_label` to `replacement`, with match group references +/// (`${1}`, `${2}`, ...) in `replacement` substituted by their value. +/// If `regex` does not match, no replacement takes place. +/// * `"keep"`: Drop targets for which `regex` does not match the concatenated `source_labels`. +/// * `"drop"`: Drop targets for which `regex` matches the concatenated `source_labels`. +/// * `"hashmod"`: Set `target_label` to the `modulus` of a hash of the concatenated `source_labels`. +/// * `"labelmap"`: Match `regex` against all label names. +/// Then copy the values of the matching labels to label names given by `replacement` +/// with match group references (`${1}`, `${2}`, ...) in `replacement` substituted by their value. +/// * `"labeldrop"`: Match `regex` against all label names. +/// Any label that matches will be removed from the set of labels. +/// * `"labelkeep"`: Match `regex` against all label names. +typealias RelabelAction = *"replace"|"keep"|"drop"|"hashmod"|"labelmap"|"labeldrop"|"labelkeep" + +/// Allows specifying a list of targets and a common label set for them. +/// +/// It is the canonical way to specify static targets in a scrape configuration. +/// +/// More details: +class StaticConfig { + /// The targets specified by the static config. + targets: Listing? + + /// Labels assigned to all metrics scraped from the targets. + labels: Labels? +} + +/// [write_relabel_configs] is relabeling applied to samples +/// before sending them to the remote endpoint. +/// +/// Write relabeling is applied after external labels. +/// This could be used to limit which samples are sent. +/// +/// There is a small demo (/documentation/examples/remote_storage) of how to use this functionality. +/// +/// More detail: +class RemoteWriteConfig { + /// The URL of the endpoint to send samples to. + url: String + + /// Timeout for requests to the remote write endpoint. + /// + /// Default if omitted: `30.s` + remote_timeout: Duration? + + /// Custom HTTP headers to be sent along with each remote write request. + /// + /// Be aware that headers that are set by Prometheus itself can't be overwritten. + headers: Labels? + + /// List of remote write relabel configurations. + write_relabel_configs: Listing? + + /// Name of the remote write config, which if specified must be unique among remote write configs. + /// + /// The name will be used in metrics and logging in place of a generated value + /// to help users distinguish between remote write configs. + name: String? + + /// Sets the `Authorization` header on every remote write request + /// with the configured username and password. + basic_auth: BasicAuth? + + /// Sets the `Authorization` header on every remote write request with the configured bearer token. + /// + /// Mutually exclusive with [bearer_token_file]. + bearer_token: String(bearer_token_file == null)? + + /// Sets the `Authorization` header on every remote write request + /// with the bearer token read from the configured file. + /// + /// Mutually exclusive with [bearer_token]. + bearer_token_file: String? + + /// Configures the remote write request's TLS settings. + tls_config: TLSConfig? + + /// Optional proxy URL. + proxy_url: String? + + /// Configures the queue used to write to remote storage. + queue_config: QueueConfig? + + /// Configures the sending of series metadata to remote storage. + /// + /// Metadata configuration is subject to change at any point or be removed in future releases. + metadata_config: MetadataConfig? + + /// Configures AWS's Signature Verification 4 signing process. + /// + /// Signature Verification signs requests. + /// To use the default credentials from the AWS SDK, use `sigv4 {}`. + sigv4: Sigv4Config((basic_auth ?? bearer_token) == null)? +} + +class MetadataConfig { + /// Whether metric metadata is sent to remote storage or not. + /// + /// Default if omitted: [true] + send: Boolean? + + /// How frequently metric metadata is sent to remote storage. + /// + /// Default if omitted: `1.min` + send_interval: Duration? +} + +class QueueConfig { + /// Number of samples to buffer per shard before we block reading of more samples from the WAL. + /// + /// It is recommended to have enough capacity in each shard to buffer several requests + /// to keep throughput up while processing occasional slow remote requests. + /// + /// Default if omitted: `2500` + capacity: Int? + + /// Maximum number of shards, i.e. amount of concurrency. + /// + /// Default if omitted: `200` + max_shards: Int? + + /// Minimum number of shards, i.e. amount of concurrency. + /// + /// Default if omitted: `1` + min_shards: Int? + + /// Maximum number of samples per send. + /// + /// Default if omitted: `500` + max_samples_per_send: Int? + + /// Maximum time a sample will wait in buffer. + /// + /// Default if omitted: `5.s` + batch_send_deadline: Duration? + + /// Initial retry delay. Gets doubled for every retry. + /// + /// Default if omitted: `30.ms` + min_backoff: Duration? + + /// Maximum retry delay. + /// + /// Default if omitted: `100.ms` + max_backoff: Duration? +} + +class AlertingConfig { + alertmanagers: Listing? +} + +/// Specifies Alertmanager instances the Prometheus server sends alerts to. +/// +/// Also provides parameters to configure how to communicate with these Alertmanagers. +/// +/// Alertmanagers may be statically configured via the [static_configs] parameter +/// or dynamically discovered using one of the supported service-discovery mechanisms. +/// +/// Additionally, [relabel_configs] allow selecting Alertmanagers +/// from discovered entities and provide advanced modifications to the used API path, +/// which is exposed through the `__alerts_path__` label. +/// +/// More details: +class AlertManagerConfig { + /// The api version of Alertmanager. + /// + /// Default if omitted: `"v1"` + api_version: String? + + /// Prefix for the HTTP path alerts are pushed to. + path_prefix: String? + + /// Configures the protocol scheme used for requests. + scheme: Scheme? + + /// Per-target Alertmanager timeout when pushing alerts. + /// + /// Default if omitted: `10.s` + timeout: Duration? + + /// Sets the `Authorization` header on every request with the configured username and password. + basic_auth: BasicAuth? + + /// Sets the `Authorization` header on every request with the configured bearer token. + /// + /// Mutually exclusive with [bearer_token_file]. + bearer_token: String(bearer_token_file == null)? + + /// Sets the `Authorization` header on every request with the bearer token read from the configured file. + /// + /// Mutually exclusive with [bearer_token]. + bearer_token_file: String? + + /// Configures the scrape request's TLS settings. + tls_config: TLSConfig? + + /// Optional proxy URL. + proxy_url: String? + + /// List of Kubernetes service discovery configurations. + kubernetes_sd_configs: Listing? + + /// List of labeled statically configured Alertmanagers. + static_configs: Listing? + + /// List of Alertmanager relabel configurations. + relabel_configs: Listing? +} + +/// Allows configuring TLS connections. +class TLSConfig { + /// CA certificate to validate API server certificate with. + ca_file: String? + + /// Certificate file for client cert authentication to the server. + cert_file: String? + + /// Key file for client cert authentication to the server. + key_file: String? + + /// ServerName extension to indicate the name of the server. + /// + /// + server_name: String? + + /// Disable validation of the server certificate. + insecure_skip_verify: Boolean? +} + +class RemoteReadConfig { + /// The URL of the endpoint to query from. + url: String + + /// Name of the remote read config, which if specified must be unique among remote read configs. + /// + /// The name will be used in metrics and logging in place of a generated value + /// to help users distinguish between remote read configs. + name: String? + + /// An optional list of equality matchers which have to be present + /// in a selector to query the remote read endpoint. + required_matchers: Labels? + + /// Timeout for requests to the remote read endpoint. + /// + /// Default if omitted: `1.min` + remote_timeout: Duration? + + /// Whether reads should be made for queries for time ranges + /// that the local storage should have complete data for. + read_recent: Boolean? + + /// Sets the `Authorization` header on every remote read request + /// with the configured username and password. + basic_auth: BasicAuth? + + /// Sets the `Authorization` header on every remote read request + /// witb the configured bearer token. + /// + /// Mutually exclusive with [bearer_token_file]. + bearer_token: String(bearer_token_file == null)? + + /// Sets the `Authorization` header on every remote read request + /// with the bearer token read from the configured file. + /// + /// Mutually exclusive with [bearer_token]. + bearer_token_file: String? + + /// Configures the remote read request's TLS settings. + tls_config: TLSConfig? + + /// Optional proxy URL. + proxy_url: String? +} + +class BasicAuth { + username: String? + + /// Mutually exclusive with [password_file]. + password: String(password_file == null)? + + /// Mutually exclusive with [password]. + password_file: String? +} + +class Sigv4Config { + /// The AWS region (if blank, the region from the default credentials chain is used). + region: String? + + /// The AWS API keys. + access_key: String? + secret_key: String? + + /// AWS profile used to authenticate. + profile: String? + + /// AWS Role ARN, an alternative to using AWS API Keys. + role_arn: String? +} diff --git a/packages/io.prometheus/PklProject b/packages/io.prometheus/PklProject new file mode 100644 index 0000000..dc0fa4d --- /dev/null +++ b/packages/io.prometheus/PklProject @@ -0,0 +1,22 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Templates for configuring [Prometheus](https://prometheus.io), +/// an open-source systems monitoring and alerting toolkit. +amends "../basePklProject.pkl" + +package { + version = "1.0.0" +} diff --git a/packages/io.prometheus/PklProject.deps.json b/packages/io.prometheus/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/io.prometheus/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/io.prometheus/PrometheusObject.pkl b/packages/io.prometheus/PrometheusObject.pkl new file mode 100644 index 0000000..09b89cf --- /dev/null +++ b/packages/io.prometheus/PrometheusObject.pkl @@ -0,0 +1,40 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Base module for Prometheus configuration objects +@ModuleInfo { minPklVersion = "0.25.0" } +abstract module io.prometheus.PrometheusObject + +typealias MetricNameFormat = String(matches(Regex(#"[a-zA-Z_:][a-zA-Z0-9_:]*"#))) + +typealias LabelNameFormat = String(matches(Regex(#"[a-zA-Z_][a-zA-Z0-9_]*"#))) + +typealias Labels = Mapping + +typealias Scheme = *"http"|"https" + +local function convertDuration(dur: Duration) = + if (dur.unit == "min") dur.value.toString() + "m" + else if (dur.unit == "ns") convertDuration(dur.toUnit("ms")) + else if (dur.unit == "us") convertDuration(dur.toUnit("ms")) + else dur.value.toString() + dur.unit + +output { + renderer = new YamlRenderer { + converters { + [Duration] = (dur: Duration) -> convertDuration(dur) + } + } +} diff --git a/packages/io.prometheus/Rule.pkl b/packages/io.prometheus/Rule.pkl new file mode 100644 index 0000000..08b5dfd --- /dev/null +++ b/packages/io.prometheus/Rule.pkl @@ -0,0 +1,118 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Prometheus supports two types of rules which may be configured +/// and then evaluated at regular intervals: recording rules and alerting rules. +/// +/// To include rules in Prometheus, create a file containing the necessary rule statements +/// and have Prometheus load the file via [Configuration.rule_files]. +/// +/// The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus process. +/// The changes are only applied if all rule files are well-formatted. +/// +/// More details: +@ModuleInfo { minPklVersion = "0.25.0" } +open module io.prometheus.Rule + +extends "PrometheusObject.pkl" + +import "Configuration.pkl" + +groups: Listing? + +typealias RuleGroup = *AlertingRuleGroup|RecordingRuleGroup + +class RecordingRuleGroup { + /// The name of the group. Must be unique within a file. + name: MetricNameFormat + + /// How often rules in the group are evaluated. + interval: Duration? + + rules: Listing +} + +class AlertingRuleGroup { + /// The name of the group. Must be unique within a file. + name: LabelNameFormat + + /// How often rules in the group are evaluated. + interval: Duration? + + rules: Listing +} + +/// Alerting rules allow you to define alert conditions +/// based on Prometheus expression language expressions +/// and to send notifications about firing alerts to an external service. +/// +/// Whenever the alert expression results in one or more vector elements at a given point in time, +/// the alert counts as active for these elements' label sets. +/// +/// More details: +class AlertingRule { + /// The name of the alert. Must be a valid label value. + alert: String + + /// The PromQL expression to evaluate. + /// + /// Every evaluation cycle this is evaluated at the current time, + /// and all resultant time series become pending/firing alerts. + expr: String + + /// Alerts are considered firing once they have been returned for this long. + /// + /// Alerts which have not yet fired for long enough are considered pending. + /// + /// Default if omitted: `0.s` + `for`: Duration? + + /// Labels to add or overwrite for each alert. + labels: Labels? + + /// Annotations to add to each alert. + annotations: Mapping? +} + +/// Recording rules allow you to precompute frequently needed or computationally +/// expensive expressions and save their result as a new set of time series. +/// +/// Querying the precomputed result will then often be much faster +/// than executing the original expression every time it is needed. +/// This is especially useful for dashboards, +/// which need to query the same expression repeatedly every time they refresh. +/// +/// Recording and alerting rules exist in a rule group. +/// Rules within a group are run sequentially at a regular interval, +/// with the same evaluation time. +/// The names of recording rules must be +/// [valid metric names](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). +/// The names of alerting rules must be +/// [valid label values](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). +/// +/// More details: +class RecordingRule { + /// The name of the time series to output to. Must be a valid metric name. + `record`: String + + /// The PromQL expression to evaluate. + /// + /// Every evaluation cycle this is evaluated at the current time, + /// and the result recorded as a new set of time series with the metric name as given by 'record'. + expr: String + + /// Labels to add or overwrite before storing the result. + labels: Labels? +} diff --git a/packages/io.prometheus/examples/configuration.pkl b/packages/io.prometheus/examples/configuration.pkl new file mode 100644 index 0000000..e9cf107 --- /dev/null +++ b/packages/io.prometheus/examples/configuration.pkl @@ -0,0 +1,47 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module io.prometheus.examples.configuration + +amends "../Configuration.pkl" + +global { + scrape_timeout = 10.s +} + +scrape_configs { + new { + job_name = "my-job" + kubernetes_sd_configs { + new { + role = "pod" + } + } + relabel_configs { + new { + regex = ".*?" + } + } + } +} + +remote_write { + new { + url = "https://example.com/remote_write" + sigv4 { + region = "us-west-2" + } + } +} diff --git a/packages/io.prometheus/examples/rule.pkl b/packages/io.prometheus/examples/rule.pkl new file mode 100644 index 0000000..8ebab92 --- /dev/null +++ b/packages/io.prometheus/examples/rule.pkl @@ -0,0 +1,50 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module io.prometheus.examples.rule + +amends "../Rule.pkl" + +groups { + new AlertingRuleGroup { + name = "alerting_rules" + interval = 5.min + rules { + new { + alert = "HighRequestLatency" + expr = #"job:request_latency_seconds:mean5m{job="myjob"} > 0.5"# + `for` = 10.min + labels { + ["prod"] = true + ["priority"] = 1 + ["severity"] = "page" + } + annotations { + ["summary"] = "High request latency" + } + } + } + } + new RecordingRuleGroup { + name = "recording_rules" + interval = 10.h + rules { + new { + `record` = "job:http_inprogress_requests:sum" + expr = "sum by (job) (http_inprogress_requests)" + } + } + } +} diff --git a/packages/io.prometheus/tests/configuration.pkl b/packages/io.prometheus/tests/configuration.pkl new file mode 100644 index 0000000..d9c16de --- /dev/null +++ b/packages/io.prometheus/tests/configuration.pkl @@ -0,0 +1,24 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module io.prometheus.tests.configuration + +amends "pkl:test" + +examples { + ["basic configuration"] { + import("../examples/configuration.pkl").output.text + } +} diff --git a/packages/io.prometheus/tests/configuration.pkl-expected.pcf b/packages/io.prometheus/tests/configuration.pkl-expected.pcf new file mode 100644 index 0000000..6d3cbbb --- /dev/null +++ b/packages/io.prometheus/tests/configuration.pkl-expected.pcf @@ -0,0 +1,19 @@ +examples { + ["basic configuration"] { + """ + global: + scrape_timeout: 10s + scrape_configs: + - job_name: my-job + kubernetes_sd_configs: + - role: pod + relabel_configs: + - regex: .*? + remote_write: + - url: https://example.com/remote_write + sigv4: + region: us-west-2 + + """ + } +} diff --git a/packages/io.prometheus/tests/rule.pkl b/packages/io.prometheus/tests/rule.pkl new file mode 100644 index 0000000..7561a29 --- /dev/null +++ b/packages/io.prometheus/tests/rule.pkl @@ -0,0 +1,24 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module io.prometheus.tests.rule + +amends "pkl:test" + +examples { + ["basic rule"] { + import("../examples/rule.pkl").output.text + } +} diff --git a/packages/io.prometheus/tests/rule.pkl-expected.pcf b/packages/io.prometheus/tests/rule.pkl-expected.pcf new file mode 100644 index 0000000..3b246cb --- /dev/null +++ b/packages/io.prometheus/tests/rule.pkl-expected.pcf @@ -0,0 +1,25 @@ +examples { + ["basic rule"] { + """ + groups: + - name: alerting_rules + interval: 5m + rules: + - alert: HighRequestLatency + expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 + for: 10m + labels: + prod: true + priority: 1 + severity: page + annotations: + summary: High request latency + - name: recording_rules + interval: 10h + rules: + - record: job:http_inprogress_requests:sum + expr: sum by (job) (http_inprogress_requests) + + """ + } +} diff --git a/packages/org.apache.spark/PklProject b/packages/org.apache.spark/PklProject new file mode 100644 index 0000000..05f464d --- /dev/null +++ b/packages/org.apache.spark/PklProject @@ -0,0 +1,32 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Definitions for [Apache Spark](https://spark.apache.org). +/// +/// Apache Spark is a distributed computing framework used for processing and analyzing large datasets. +/// It can be applied to data that is structured (like database tables) or unstructured (like log files). +/// Data can be processed one-off or on a schedule, or continuously as a stream of items from a queue. +amends "../basePklProject.pkl" + +package { + version = "1.0.1" + description = """ + Definitions for [Apache Spark](https://spark.apache.org). + + Apache Spark is a distributed computing framework used for processing and analyzing large datasets. + It can be applied to data that is structured (like database tables) or unstructured (like log files). + Data can be processed one-off or on a schedule, or continuously as a stream of items from a queue. + """ +} diff --git a/packages/org.apache.spark/PklProject.deps.json b/packages/org.apache.spark/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/org.apache.spark/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/org.apache.spark/Properties.pkl b/packages/org.apache.spark/Properties.pkl new file mode 100644 index 0000000..c7dea85 --- /dev/null +++ b/packages/org.apache.spark/Properties.pkl @@ -0,0 +1,2370 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +open module org.apache.spark.Properties + +extends "PropertiesBase.pkl" + +/// The number of executors to use. +/// +/// Default: (undefined) +`spark.executor.instances`: UInt? + +/// The name of your application. +/// +/// This will appear in the UI and in log data. +/// +/// Default: `null` +@Reserved +@Since { version = "0.9.0" } +`spark.app.name`: Null? + +/// Number of cores to use for the driver process, only in cluster mode. +/// +/// Default: `1` +@Since { version = "1.3.0" } +`spark.driver.cores`: Int? + +/// Limit of total size of serialized results of all partitions for each Spark action (e.g. +/// +/// collect) in bytes. +/// Should be at least 1M, or 0 for unlimited. +/// Jobs will be aborted if the total size is above this limit. +/// Having a high limit may cause out-of-memory errors in driver (depends on spark.driver.memory and memory overhead of objects in JVM). +/// Setting a proper limit can protect the driver from out-of-memory errors. +/// +/// Default: `1.gib` +@Since { version = "1.2.0" } +`spark.driver.maxResultSize`: DataSize? + +/// Amount of memory to use for the driver process, i.e. +/// +/// where SparkContext is initialized, in the same format as JVM memory strings with a size unit suffix ("k", "m", "g" or "t") (e.g. `512m`, `2g`). +/// *Note:* In client mode, this config must not be set through the `SparkConf` directly in your application, because the driver JVM has already started at that point. +/// Instead, please set this through the `--driver-memory` command line option or in your default properties file. +/// +/// Default: `1.gib` +@Since { version = "1.1.1" } +`spark.driver.memory`: DataSize? + +/// Amount of non-heap memory to be allocated per driver process in cluster mode, in MiB unless otherwise specified. +/// +/// This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. +/// This tends to grow with the container size (typically 6-10%). +/// This option is currently supported on YARN, Mesos and Kubernetes. +/// *Note:* Non-heap memory includes off-heap memory (when `spark.memory.offHeap.enabled=true`) and memory used by other driver processes (e.g. python process that goes with a PySpark driver) and memory used by other non-driver processes running in the same container. +/// The maximum memory size of container to running driver is determined by the sum of `spark.driver.memoryOverhead` and `spark.driver.memory`. +/// +/// Default: driverMemory * spark.driver.memoryOverheadFactor, with minimum of 384 +@Since { version = "2.3.0" } +`spark.driver.memoryOverhead`: DataSize? + +/// Fraction of driver memory to be allocated as additional non-heap memory per driver process in cluster mode. +/// +/// This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. +/// This tends to grow with the container size. +/// This value defaults to 0.10 except for Kubernetes non-JVM jobs, which defaults to 0.40. This is done as non-JVM tasks need more non-JVM heap space and such tasks commonly fail with "Memory Overhead Exceeded" errors. +/// This preempts this error with a higher default. +/// This value is ignored if `spark.driver.memoryOverhead` is set directly. +/// +/// Default: `0.1` +@Since { version = "3.3.0" } +`spark.driver.memoryOverheadFactor`: DataSize? + +/// Amount of a particular resource type to use on the driver. +/// +/// If this is used, you must also specify the `spark.driver.resource.{resourceName}.discoveryScript` for the driver to find the resource on startup. +/// +/// Default: `0` +@Since { version = "3.0.0" } +`spark.driver.resource.{resourceName}.amount`: Mapping? + +/// A script for the driver to run to discover a particular resource type. +/// +/// This should write to STDOUT a JSON string in the format of the ResourceInformation class. +/// This has a name and an array of addresses. +/// For a client-submitted driver, discovery script must assign different resource addresses to this driver comparing to other drivers on the same host. +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.driver.resource.{resourceName}.discoveryScript`: Mapping? + +/// Vendor of the resources to use for the driver. +/// +/// This option is currently only supported on Kubernetes and is actually both the vendor and domain following the Kubernetes device plugin naming convention. +/// (e.g. For GPUs on Kubernetes this config would be set to nvidia.com or amd.com) +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.driver.resource.{resourceName}.vendor`: Mapping? + +/// Comma-separated list of class names implementing org.apache.spark.api.resource.ResourceDiscoveryPlugin to load into the application. +/// +/// This is for advanced users to replace the resource discovery class with a custom implementation. +/// Spark will try each class specified until one of them returns the resource information for that resource. +/// It tries the discovery script last if none of the plugins return information for that resource. +/// +/// Default: `"org.apache.spark.resource.ResourceDiscoveryScriptPlugin"` +@Since { version = "3.0.0" } +`spark.resources.discoveryPlugin`: String? + +/// Amount of memory to use per executor process, in the same format as JVM memory strings with a size unit suffix ("k", "m", "g" or "t") (e.g. +/// +/// `512m`, `2g`). +/// +/// Default: `1.gib` +@Since { version = "0.7.0" } +`spark.executor.memory`: DataSize? + +/// The amount of memory to be allocated to PySpark in each executor, in MiB unless otherwise specified. +/// +/// If set, PySpark memory for an executor will be limited to this amount. +/// If not set, Spark will not limit Python's memory use and it is up to the application to avoid exceeding the overhead memory space shared with other non-JVM processes. +/// When PySpark is run in YARN or Kubernetes, this memory is added to executor resource requests. +/// *Note:* This feature is dependent on Python's \`resource\` module; therefore, the behaviors and limitations are inherited. +/// For instance, Windows does not support resource limiting and actual resource is not limited on MacOS. +/// +/// Default: `null` +@Since { version = "2.4.0" } +`spark.executor.pyspark.memory`: DataSize? + +/// Amount of additional memory to be allocated per executor process, in MiB unless otherwise specified. +/// +/// This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. +/// This tends to grow with the executor size (typically 6-10%). +/// This option is currently supported on YARN and Kubernetes. +/// *Note:* Additional memory includes PySpark executor memory (when `spark.executor.pyspark.memory` is not configured) and memory used by other non-executor processes running in the same container. +/// The maximum memory size of container to running executor is determined by the sum of `spark.executor.memoryOverhead`, `spark.executor.memory`, `spark.memory.offHeap.size` and `spark.executor.pyspark.memory`. +/// +/// Default: executorMemory * spark.executor.memoryOverheadFactor, with minimum of 384 +@Since { version = "2.3.0" } +`spark.executor.memoryOverhead`: DataSize? + +/// Fraction of executor memory to be allocated as additional non-heap memory per executor process. +/// +/// This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. +/// This tends to grow with the container size. +/// This value defaults to 0.10 except for Kubernetes non-JVM jobs, which defaults to 0.40. This is done as non-JVM tasks need more non-JVM heap space and such tasks commonly fail with "Memory Overhead Exceeded" errors. +/// This preempts this error with a higher default. +/// This value is ignored if `spark.executor.memoryOverhead` is set directly. +/// +/// Default: `0.1` +@Since { version = "3.3.0" } +`spark.executor.memoryOverheadFactor`: DataSize? + +/// Amount of a particular resource type to use per executor process. +/// +/// If this is used, you must also specify the `spark.executor.resource.{resourceName}.discoveryScript` for the executor to find the resource on startup. +/// +/// Default: `0` +@Since { version = "3.0.0" } +`spark.executor.resource.{resourceName}.amount`: Mapping? + +/// A script for the executor to run to discover a particular resource type. +/// +/// This should write to STDOUT a JSON string in the format of the ResourceInformation class. +/// This has a name and an array of addresses. +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.executor.resource.{resourceName}.discoveryScript`: Mapping? + +/// Vendor of the resources to use for the executors. +/// +/// This option is currently only supported on Kubernetes and is actually both the vendor and domain following the Kubernetes device plugin naming convention. +/// (e.g. For GPUs on Kubernetes this config would be set to nvidia.com or amd.com) +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.executor.resource.{resourceName}.vendor`: Mapping? + +/// A comma-separated list of classes that implement `SparkListener`; when initializing SparkContext, instances of these classes will be created and registered with Spark's listener bus. +/// +/// If a class has a single-argument constructor that accepts a SparkConf, that constructor will be called; otherwise, a zero-argument constructor will be called. +/// If no valid constructor can be found, the SparkContext creation will fail with an exception. +/// +/// Default: `null` +@Since { version = "1.3.0" } +`spark.extraListeners`: String? + +/// Directory to use for "scratch" space in Spark, including map output files and RDDs that get stored on disk. +/// +/// This should be on a fast, local disk in your system. +/// It can also be a comma-separated list of multiple directories on different disks. +/// +///
+/// Note: This will be overridden by SPARK_LOCAL_DIRS (Standalone), MESOS_SANDBOX (Mesos) or +/// LOCAL_DIRS (YARN) environment variables set by the cluster manager. +/// +/// Default: `"/tmp"` +@Since { version = "0.5.0" } +`spark.local.dir`: String? + +/// Logs the effective SparkConf as INFO when a SparkContext is started. +/// +/// Default: `false` +@Since { version = "0.9.0" } +`spark.logConf`: Boolean? + +/// The cluster manager to connect to. +/// +/// See the list of [allowed master URL's](https://spark.apache.org/docs/latest/submitting-applications.html#master-urls). +/// +/// Default: `null` +@Since { version = "0.9.0" } +`spark.master`: String? + +/// The deploy mode of Spark driver program, either "client" or "cluster", Which means to launch driver program locally ("client") or remotely ("cluster") on one of the nodes inside the cluster. +/// +/// Default: `null` +@Since { version = "1.5.0" } +`spark.submit.deployMode`: String? + +/// Application information that will be written into Yarn RM log/HDFS audit log when running on Yarn/HDFS. +/// +/// Its length depends on the Hadoop configuration `hadoop.caller.context.max.size`. +/// It should be concise, and typically can have up to 50 characters. +/// +/// Default: `null` +@Since { version = "2.2.0" } +`spark.log.callerContext`: String? + +/// If true, restarts the driver automatically if it fails with a non-zero exit status. +/// +/// Only has effect in Spark standalone mode or Mesos cluster deploy mode. +/// +/// Default: `false` +@Since { version = "1.3.0" } +`spark.driver.supervise`: Boolean? + +/// Base directory in which Spark driver logs are synced, if `spark.driver.log.persistToDfs.enabled` is true. +/// +/// Within this base directory, each application logs the driver logs to an application specific file. +/// Users may want to set this to a unified location like an HDFS directory so driver log files can be persisted for later usage. +/// This directory should allow any Spark user to read/write files and the Spark History Server user to delete files. +/// Additionally, older logs from this directory are cleaned by the [Spark History Server](https://spark.apache.org/docs/latest/monitoring.html#spark-history-server-configuration-options) if `spark.history.fs.driverlog.cleaner.enabled` is true and, if they are older than max age configured by setting `spark.history.fs.driverlog.cleaner.maxAge`. +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.driver.log.dfsDir`: String? + +/// If true, spark application running in client mode will write driver logs to a persistent storage, configured in `spark.driver.log.dfsDir`. +/// +/// If `spark.driver.log.dfsDir` is not configured, driver logs will not be persisted. +/// Additionally, enable the cleaner by setting `spark.history.fs.driverlog.cleaner.enabled` to true in [Spark History Server](https://spark.apache.org/docs/latest/monitoring.html#spark-history-server-configuration-options). +/// +/// Default: `false` +@Since { version = "3.0.0" } +`spark.driver.log.persistToDfs.enabled`: Boolean? + +/// The layout for the driver logs that are synced to `spark.driver.log.dfsDir`. +/// +/// If this is not configured, it uses the layout for the first appender defined in log4j2.properties. +/// If that is also not configured, driver logs use the default layout. +/// +/// Default: %d{yy/MM/dd HH:mm:ss.SSS} %t %p %c{1}: %m%n%ex +@Since { version = "3.0.0" } +`spark.driver.log.layout`: String? + +/// Whether to allow driver logs to use erasure coding. +/// +/// On HDFS, erasure coded files will not update as quickly as regular replicated files, so they make take longer to reflect changes written by the application. +/// Note that even if this is true, Spark will still not force the file to use erasure coding, it will simply use file system defaults. +/// +/// Default: `false` +@Since { version = "3.0.0" } +`spark.driver.log.allowErasureCoding`: Boolean? + +/// Extra classpath entries to prepend to the classpath of the driver. +/// +/// +///
Note: In client mode, this config must not be set through the SparkConf +/// directly in your application, because the driver JVM has already started at that point. +/// Instead, please set this through the --driver-class-path command line option or in +/// your default properties file. +/// +/// Default: `null` +@Since { version = "1.0.0" } +`spark.driver.extraClassPath`: String? + +/// A string of default JVM options to prepend to `spark.driver.extraJavaOptions`. +/// +/// This is intended to be set by administrators. +/// +/// For instance, GC settings or other logging. +/// Note that it is illegal to set maximum heap size (-Xmx) settings with this option. +/// Maximum heap +/// size settings can be set with spark.driver.memory in the cluster mode and through +/// the --driver-memory command line option in the client mode. +/// +///
Note: In client mode, this config must not be set through the SparkConf +/// directly in your application, because the driver JVM has already started at that point. +/// Instead, please set this through the --driver-java-options command line option or in +/// your default properties file. +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.driver.defaultJavaOptions`: String? + +/// A string of extra JVM options to pass to the driver. +/// +/// This is intended to be set by users. +/// +/// For instance, GC settings or other logging. +/// Note that it is illegal to set maximum heap size (-Xmx) settings with this option. +/// Maximum heap +/// size settings can be set with spark.driver.memory in the cluster mode and through +/// the --driver-memory command line option in the client mode. +/// +///
Note: In client mode, this config must not be set through the SparkConf +/// directly in your application, because the driver JVM has already started at that point. +/// Instead, please set this through the --driver-java-options command line option or in +/// your default properties file. +/// +/// spark.driver.defaultJavaOptions will be prepended to this configuration. +/// +/// Default: `null` +@Since { version = "1.0.0" } +`spark.driver.extraJavaOptions`: String? + +/// Set a special library path to use when launching the driver JVM. +/// +/// +///
Note: In client mode, this config must not be set through the SparkConf +/// directly in your application, because the driver JVM has already started at that point. +/// Instead, please set this through the --driver-library-path command line option or in +/// your default properties file. +/// +/// Default: `null` +@Since { version = "1.0.0" } +`spark.driver.extraLibraryPath`: String? + +/// (Experimental) Whether to give user-added jars precedence over Spark's own jars when loading classes in the driver. +/// +/// This feature can be used to mitigate conflicts between Spark's dependencies and user dependencies. +/// It is currently an experimental feature. +/// +/// This is used in cluster mode only. +/// +/// Default: `false` +@Since { version = "1.3.0" } +`spark.driver.userClassPathFirst`: Boolean? + +/// Extra classpath entries to prepend to the classpath of executors. +/// +/// This exists primarily for backwards-compatibility with older versions of Spark. +/// Users typically should not need to set this option. +/// +/// Default: `null` +@Since { version = "1.0.0" } +`spark.executor.extraClassPath`: String? + +/// A string of default JVM options to prepend to `spark.executor.extraJavaOptions`. +/// +/// This is intended to be set by administrators. +/// +/// For instance, GC settings or other logging. +/// Note that it is illegal to set Spark properties or maximum heap size (-Xmx) settings with this +/// option. +/// Spark properties should be set using a SparkConf object or the spark-defaults.conf file +/// used with the spark-submit script. +/// Maximum heap size settings can be set with spark.executor.memory. +/// +/// The following symbols, if present will be interpolated: {{APP_ID}} will be replaced by +/// application ID and {{EXECUTOR_ID}} will be replaced by executor ID. For example, to enable +/// verbose gc logging to a file named for the executor ID of the app in /tmp, pass a 'value' of: +/// -verbose:gc -Xloggc:/tmp/{{APP_ID}}-{{EXECUTOR_ID}}.gc +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.executor.defaultJavaOptions`: String? + +/// A string of extra JVM options to pass to executors. +/// +/// This is intended to be set by users. +/// +/// For instance, GC settings or other logging. +/// Note that it is illegal to set Spark properties or maximum heap size (-Xmx) settings with this +/// option. +/// Spark properties should be set using a SparkConf object or the spark-defaults.conf file +/// used with the spark-submit script. +/// Maximum heap size settings can be set with spark.executor.memory. +/// +/// The following symbols, if present will be interpolated: {{APP_ID}} will be replaced by +/// application ID and {{EXECUTOR_ID}} will be replaced by executor ID. For example, to enable +/// verbose gc logging to a file named for the executor ID of the app in /tmp, pass a 'value' of: +/// -verbose:gc -Xloggc:/tmp/{{APP_ID}}-{{EXECUTOR_ID}}.gc +/// +/// spark.executor.defaultJavaOptions will be prepended to this configuration. +/// +/// Default: `null` +@Since { version = "1.0.0" } +`spark.executor.extraJavaOptions`: String? + +/// Set a special library path to use when launching executor JVM's. +/// +/// Default: `null` +@Since { version = "1.0.0" } +`spark.executor.extraLibraryPath`: String? + +/// Sets the number of latest rolling log files that are going to be retained by the system. +/// +/// Older log files will be deleted. +/// Disabled by default. +/// +/// Default: `null` +@Since { version = "1.1.0" } +`spark.executor.logs.rolling.maxRetainedFiles`: String? + +/// Enable executor log compression. +/// +/// If it is enabled, the rolled executor logs will be compressed. +/// Disabled by default. +/// +/// Default: `false` +@Since { version = "2.0.2" } +`spark.executor.logs.rolling.enableCompression`: Boolean? + +/// Set the max size of the file in bytes by which the executor logs will be rolled over. +/// +/// Rolling is disabled by default. +/// See `spark.executor.logs.rolling.maxRetainedFiles` for automatic cleaning of old logs. +/// +/// Default: `null` +@Since { version = "1.4.0" } +`spark.executor.logs.rolling.maxSize`: String? + +/// Set the strategy of rolling of executor logs. +/// +/// By default it is disabled. +/// It can be set to "time" (time-based rolling) or "size" (size-based rolling). +/// For "time", use `spark.executor.logs.rolling.time.interval` to set the rolling interval. +/// For "size", use `spark.executor.logs.rolling.maxSize` to set the maximum file size for rolling. +/// +/// Default: `null` +@Since { version = "1.1.0" } +`spark.executor.logs.rolling.strategy`: UInt? + +/// Set the time interval by which the executor logs will be rolled over. +/// +/// Rolling is disabled by default. +/// Valid values are `daily`, `hourly`, `minutely` or any interval in seconds. +/// See `spark.executor.logs.rolling.maxRetainedFiles` for automatic cleaning of old logs. +/// +/// Default: `"daily"` +@Since { version = "1.1.0" } +`spark.executor.logs.rolling.time.interval`: String? + +/// (Experimental) Same functionality as `spark.driver.userClassPathFirst`, but applied to executor instances. +/// +/// Default: `false` +@Since { version = "1.3.0" } +`spark.executor.userClassPathFirst`: Boolean? + +/// Add the environment variable specified by `EnvironmentVariableName` to the Executor process. +/// +/// The user can specify multiple of these to set multiple environment variables. +/// +/// Default: `null` +@Since { version = "0.9.0" } +`spark.executorEnv.[EnvironmentVariableName]`: Mapping? + +/// Regex to decide which Spark configuration properties and environment variables in driver and executor environments contain sensitive information. +/// +/// When this regex matches a property key or value, the value is redacted from the environment UI and various logs like YARN and event logs. +/// +/// Default: `"(?i)secret|password|token"` +@Since { version = "2.1.2" } +`spark.redaction.regex`: String? + +/// Enable profiling in Python worker, the profile result will show up by `sc.show_profiles()`, or it will be displayed before the driver exits. +/// +/// It also can be dumped into disk by `sc.dump_profiles(path)`. +/// If some of the profile results had been displayed manually, they will not be displayed automatically before driver exiting. +/// +/// By default the pyspark.profiler.BasicProfiler will be used, but this can be overridden by +/// passing a profiler class in as a parameter to the SparkContext constructor. +/// +/// Default: `false` +@Since { version = "1.2.0" } +`spark.python.profile`: Boolean? + +/// The directory which is used to dump the profile result before driver exiting. +/// +/// The results will be dumped as separated file for each RDD. +/// They can be loaded by `pstats.Stats()`. If this is specified, the profile result will not be displayed automatically. +/// +/// Default: `null` +@Since { version = "1.2.0" } +`spark.python.profile.dump`: String? + +/// Amount of memory to use per python worker process during aggregation, in the same format as JVM memory strings with a size unit suffix ("k", "m", "g" or "t") (e.g. +/// +/// `512m`, `2g`). +/// If the memory used during aggregation goes above this amount, it will spill the data into disks. +/// +/// Default: `512.mib` +@Since { version = "1.1.0" } +`spark.python.worker.memory`: DataSize? + +/// Reuse Python worker or not. +/// +/// If yes, it will use a fixed number of Python workers, does not need to fork() a Python process for every task. +/// It will be very useful if there is a large broadcast, then the broadcast will not need to be transferred from JVM to Python worker for every task. +/// +/// Default: `true` +@Since { version = "1.2.0" } +`spark.python.worker.reuse`: Boolean? + +/// Comma-separated list of files to be placed in the working directory of each executor. +/// +/// Globs are allowed. +/// +/// Default: `null` +@Since { version = "1.0.0" } +`spark.files`: String? + +/// Comma-separated list of .zip, .egg, or .py files to place on the PYTHONPATH for Python apps. +/// +/// Globs are allowed. +/// +/// Default: `null` +@Since { version = "1.0.1" } +`spark.submit.pyFiles`: String? + +/// Comma-separated list of jars to include on the driver and executor classpaths. +/// +/// Globs are allowed. +/// +/// Default: `null` +@Since { version = "0.9.0" } +`spark.jars`: String? + +/// Comma-separated list of Maven coordinates of jars to include on the driver and executor classpaths. +/// +/// The coordinates should be groupId:artifactId:version. +/// If `spark.jars.ivySettings` is given artifacts will be resolved according to the configuration in the file, otherwise artifacts will be searched for in the local maven repo, then maven central and finally any additional remote repositories given by the command-line option `--repositories`. +/// For more details, see [Advanced Dependency Management](https://spark.apache.org/docs/latest/submitting-applications.html#advanced-dependency-management). +/// +/// Default: `null` +@Since { version = "1.5.0" } +`spark.jars.packages`: String? + +/// Comma-separated list of groupId:artifactId, to exclude while resolving the dependencies provided in `spark.jars.packages` to avoid dependency conflicts. +/// +/// Default: `null` +@Since { version = "1.5.0" } +`spark.jars.excludes`: String? + +/// Path to specify the Ivy user directory, used for the local Ivy cache and package files from `spark.jars.packages`. +/// +/// This will override the Ivy property `ivy.default.ivy.user.dir` which defaults to \~/.ivy2. +/// +/// Default: `null` +@Since { version = "1.3.0" } +`spark.jars.ivy`: String? + +/// Path to an Ivy settings file to customize resolution of jars specified using `spark.jars.packages` instead of the built-in defaults, such as maven central. +/// +/// Additional repositories given by the command-line option `--repositories` or `spark.jars.repositories` will also be included. +/// Useful for allowing Spark to resolve artifacts from behind a firewall e.g. via an in-house artifact server like Artifactory. +/// Details on the settings file format can be found at [Settings Files](http://ant.apache.org/ivy/history/latest-milestone/settings.html). +/// Only paths with `file://` scheme are supported. +/// Paths without a scheme are assumed to have a `file://` scheme. +/// +/// +///
+/// +/// When running in YARN cluster mode, this file will also be localized to the remote driver for dependency resolution within `SparkContext#addJar` +/// +/// Default: `null` +@Since { version = "2.2.0" } +`spark.jars.ivySettings`: String? + +/// Comma-separated list of additional remote repositories to search for the maven coordinates given with `--packages` or `spark.jars.packages`. +/// +/// Default: `null` +@Since { version = "2.3.0" } +`spark.jars.repositories`: String? + +/// Comma-separated list of archives to be extracted into the working directory of each executor. +/// +/// .jar, .tar.gz, .tgz and .zip are supported. +/// You can specify the directory name to unpack via adding `#` after the file name to unpack, for example, `file.zip#directory`. +/// This configuration is experimental. +/// +/// Default: `null` +@Since { version = "3.1.0" } +`spark.archives`: String? + +/// Python binary executable to use for PySpark in driver. +/// +/// (default is `spark.pyspark.python`) +/// +/// Default: `null` +@Since { version = "2.1.0" } +`spark.pyspark.driver.python`: String? + +/// Python binary executable to use for PySpark in both driver and executors. +/// +/// Default: `null` +@Since { version = "2.1.0" } +`spark.pyspark.python`: String? + +/// Maximum size of map outputs to fetch simultaneously from each reduce task, in MiB unless otherwise specified. +/// +/// Since each output requires us to create a buffer to receive it, this represents a fixed memory overhead per reduce task, so keep it small unless you have a large amount of memory. +/// +/// Default: `48.mib` +@Since { version = "1.4.0" } +`spark.reducer.maxSizeInFlight`: DataSize? + +/// This configuration limits the number of remote requests to fetch blocks at any given point. +/// +/// When the number of hosts in the cluster increase, it might lead to very large number of inbound connections to one or more nodes, causing the workers to fail under load. +/// By allowing it to limit the number of fetch requests, this scenario can be mitigated. +/// +/// Default: `2147483647` +@Since { version = "2.0.0" } +`spark.reducer.maxReqsInFlight`: Int? + +/// This configuration limits the number of remote blocks being fetched per reduce task from a given host port. +/// +/// When a large number of blocks are being requested from a given address in a single fetch or simultaneously, this could crash the serving executor or Node Manager. +/// This is especially useful to reduce the load on the Node Manager when external shuffle is enabled. +/// You can mitigate this issue by setting it to a lower value. +/// +/// Default: `2147483647` +@Since { version = "2.2.1" } +`spark.reducer.maxBlocksInFlightPerAddress`: Int? + +/// Whether to compress map output files. +/// +/// Generally a good idea. +/// Compression will use `spark.io.compression.codec`. +/// +/// Default: `true` +@Since { version = "0.6.0" } +`spark.shuffle.compress`: Boolean? + +/// Size of the in-memory buffer for each shuffle file output stream, in KiB unless otherwise specified. +/// +/// These buffers reduce the number of disk seeks and system calls made in creating intermediate shuffle files. +/// +/// Default: `32.kib` +@Since { version = "1.4.0" } +`spark.shuffle.file.buffer`: DataSize? + +/// (Netty only) Fetches that fail due to IO-related exceptions are automatically retried if this is set to a non-zero value. +/// +/// This retry logic helps stabilize large shuffles in the face of long GC pauses or transient network connectivity issues. +/// +/// Default: `3` +@Since { version = "1.2.0" } +`spark.shuffle.io.maxRetries`: Int? + +/// (Netty only) Connections between hosts are reused in order to reduce connection buildup for large clusters. +/// +/// For clusters with many hard disks and few hosts, this may result in insufficient concurrency to saturate all disks, and so users may consider increasing this value. +/// +/// Default: `1` +@Since { version = "1.2.1" } +`spark.shuffle.io.numConnectionsPerPeer`: Int? + +/// (Netty only) Off-heap buffers are used to reduce garbage collection during shuffle and cache block transfer. +/// +/// For environments where off-heap memory is tightly limited, users may wish to turn this off to force all allocations from Netty to be on-heap. +/// +/// Default: `true` +@Since { version = "1.2.0" } +`spark.shuffle.io.preferDirectBufs`: Boolean? + +/// (Netty only) How long to wait between retries of fetches. +/// +/// The maximum delay caused by retrying is 15 seconds by default, calculated as `maxRetries * retryWait`. +/// +/// Default: `5.s` +@Since { version = "1.2.1" } +`spark.shuffle.io.retryWait`: Duration? + +/// Length of the accept queue for the shuffle service. +/// +/// For large applications, this value may need to be increased, so that incoming connections are not dropped if the service cannot keep up with a large number of connections arriving in a short period of time. +/// This needs to be configured wherever the shuffle service itself is running, which may be outside of the application (see `spark.shuffle.service.enabled` option below). +/// If set below 1, will fallback to OS default defined by Netty's `io.netty.util.NetUtil#SOMAXCONN`. +/// +/// Default: `-1` +@Since { version = "1.1.1" } +`spark.shuffle.io.backLog`: Int? + +/// Timeout for the established connections between shuffle servers and clients to be marked as idled and closed if there are still outstanding fetch requests but no traffic no the channel for at least \`connectionTimeout\`. +/// +/// Default: value of spark.network.timeout +@Since { version = "1.2.0" } +`spark.shuffle.io.connectionTimeout`: Duration? + +/// Enables the external shuffle service. +/// +/// This service preserves the shuffle files written by executors e.g. so that executors can be safely removed, or so that shuffle fetches can continue in the event of executor failure. +/// The external shuffle service must be set up in order to enable it. See [dynamic allocation configuration and setup documentation](https://spark.apache.org/docs/latest/job-scheduling.html#configuration-and-setup) for more information. +/// +/// Default: `false` +@Since { version = "1.2.0" } +`spark.shuffle.service.enabled`: Boolean? + +/// Port on which the external shuffle service will run. +/// +/// Default: `7337` +@Since { version = "1.2.0" } +`spark.shuffle.service.port`: UInt16? + +/// Cache entries limited to the specified memory footprint, in bytes unless otherwise specified. +/// +/// Default: `100.mib` +@Since { version = "2.3.0" } +`spark.shuffle.service.index.cache.size`: DataSize? + +/// Whether to use the ExternalShuffleService for deleting shuffle blocks for deallocated executors when the shuffle is no longer needed. +/// +/// Without this enabled, shuffle data on executors that are deallocated will remain on disk until the application ends. +/// +/// Default: `false` +@Since { version = "3.3.0" } +`spark.shuffle.service.removeShuffle`: Boolean? + +/// The max number of chunks allowed to be transferred at the same time on shuffle service. +/// +/// Note that new incoming connections will be closed when the max number is hit. +/// The client will retry according to the shuffle retry configs (see `spark.shuffle.io.maxRetries` and `spark.shuffle.io.retryWait`), if those limits are reached the task will fail with fetch failure. +/// +/// Default: `"Long.MAX_VALUE"` +@Since { version = "2.3.0" } +`spark.shuffle.maxChunksBeingTransferred`: String? + +/// (Advanced) In the sort-based shuffle manager, avoid merge-sorting data if there is no map-side aggregation and there are at most this many reduce partitions. +/// +/// Default: `200` +@Since { version = "1.1.1" } +`spark.shuffle.sort.bypassMergeThreshold`: Int? + +/// Whether to compress data spilled during shuffles. +/// +/// Compression will use `spark.io.compression.codec`. +/// +/// Default: `true` +@Since { version = "0.9.0" } +`spark.shuffle.spill.compress`: Boolean? + +/// Threshold in bytes above which the size of shuffle blocks in HighlyCompressedMapStatus is accurately recorded. +/// +/// This helps to prevent OOM by avoiding underestimating shuffle block size when fetch shuffle blocks. +/// +/// Default: `100 * 1024 * 1024` +@Since { version = "2.2.1" } +`spark.shuffle.accurateBlockThreshold`: Number? + +/// Timeout in milliseconds for registration to the external shuffle service. +/// +/// Default: `5000` +@Since { version = "2.3.0" } +`spark.shuffle.registration.timeout`: Int? + +/// When we fail to register to the external shuffle service, we will retry for maxAttempts times. +/// +/// Default: `3` +@Since { version = "2.3.0" } +`spark.shuffle.registration.maxAttempts`: Int? + +/// Timeout for the established connections for fetching files in Spark RPC environments to be marked as idled and closed if there are still outstanding files being downloaded but no traffic no the channel for at least \`connectionTimeout\`. +/// +/// Default: value of spark.network.timeout +@Since { version = "1.6.0" } +`spark.files.io.connectionTimeout`: Duration? + +/// Whether to calculate the checksum of shuffle data. +/// +/// If enabled, Spark will calculate the checksum values for each partition data within the map output file and store the values in a checksum file on the disk. +/// When there's shuffle data corruption detected, Spark will try to diagnose the cause (e.g., network issue, disk issue, etc.) of the corruption by using the checksum file. +/// +/// Default: `true` +@Since { version = "3.2.0" } +`spark.shuffle.checksum.enabled`: Boolean? + +/// The algorithm is used to calculate the shuffle checksum. +/// +/// Currently, it only supports built-in algorithms of JDK, e.g., ADLER32, CRC32. +/// +/// Default: `"ADLER32"` +@Since { version = "3.2.0" } +`spark.shuffle.checksum.algorithm`: String? + +/// Whether to use the ExternalShuffleService for fetching disk persisted RDD blocks. +/// +/// In case of dynamic allocation if this feature is enabled executors having only disk persisted blocks are considered idle after `spark.dynamicAllocation.executorIdleTimeout` and will be released accordingly. +/// +/// Default: `false` +@Since { version = "3.0.0" } +`spark.shuffle.service.fetch.rdd.enabled`: Boolean? + +/// Whether to log events for every block update, if `spark.eventLog.enabled` is true. +/// +/// \*Warning\*: This will increase the size of the event log considerably. +/// +/// Default: `false` +@Since { version = "2.3.0" } +`spark.eventLog.logBlockUpdates.enabled`: Boolean? + +/// If true, use the long form of call sites in the event log. +/// +/// Otherwise use the short form. +/// +/// Default: `false` +@Since { version = "2.4.0" } +`spark.eventLog.longForm.enabled`: Boolean? + +/// Whether to compress logged events, if `spark.eventLog.enabled` is true. +/// +/// Default: `false` +@Since { version = "1.0.0" } +`spark.eventLog.compress`: Boolean? + +/// The codec to compress logged events. +/// +/// By default, Spark provides four codecs: `lz4`, `lzf`, `snappy`, and `zstd`. +/// You can also use fully qualified class names to specify the codec, e.g. `org.apache.spark.io.LZ4CompressionCodec`, +/// `org.apache.spark.io.LZFCompressionCodec`, +/// `org.apache.spark.io.SnappyCompressionCodec`, +/// and `org.apache.spark.io.ZStdCompressionCodec`. +/// +/// Default: `"zstd"` +@Since { version = "3.0.0" } +`spark.eventLog.compression.codec`: String? + +/// Whether to allow event logs to use erasure coding, or turn erasure coding off, regardless of filesystem defaults. +/// +/// On HDFS, erasure coded files will not update as quickly as regular replicated files, so the application updates will take longer to appear in the History Server. +/// Note that even if this is true, Spark will still not force the file to use erasure coding, it will simply use filesystem defaults. +/// +/// Default: `false` +@Since { version = "3.0.0" } +`spark.eventLog.erasureCoding.enabled`: Boolean? + +/// Base directory in which Spark events are logged, if `spark.eventLog.enabled` is true. +/// +/// Within this base directory, Spark creates a sub-directory for each application, and logs the events specific to the application in this directory. +/// Users may want to set this to a unified location like an HDFS directory so history files can be read by the history server. +/// +/// Default: `"file:///tmp/spark-events"` +@Since { version = "1.0.0" } +`spark.eventLog.dir`: String? + +/// Whether to log Spark events, useful for reconstructing the Web UI after the application has finished. +/// +/// Default: `false` +@Since { version = "1.0.0" } +`spark.eventLog.enabled`: Boolean? + +/// Whether to overwrite any existing files. +/// +/// Default: `false` +@Since { version = "1.0.0" } +`spark.eventLog.overwrite`: Boolean? + +/// Buffer size to use when writing to output streams, in KiB unless otherwise specified. +/// +/// Default: `100.kib` +@Since { version = "1.0.0" } +`spark.eventLog.buffer.kb`: DataSize? + +/// Whether rolling over event log files is enabled. +/// +/// If set to true, it cuts down each event log file to the configured size. +/// +/// Default: `false` +@Since { version = "3.0.0" } +`spark.eventLog.rolling.enabled`: Boolean? + +/// When `spark.eventLog.rolling.enabled=true`, specifies the max size of event log file before it's rolled over. +/// +/// Default: `128.mib` +@Since { version = "3.0.0" } +`spark.eventLog.rolling.maxFileSize`: DataSize? + +/// How many DAG graph nodes the Spark UI and status APIs remember before garbage collecting. +/// +/// Default: `2147483647` +@Since { version = "2.1.0" } +`spark.ui.dagGraph.retainedRootRDDs`: Int? + +/// Whether to run the web UI for the Spark application. +/// +/// Default: `true` +@Since { version = "1.1.1" } +`spark.ui.enabled`: Boolean? + +/// Allows jobs and stages to be killed from the web UI. +/// +/// Default: `true` +@Reserved +@Since { version = "1.0.0" } +`spark.ui.killEnabled`: Null? + +/// How often to update live entities. +/// +/// -1 means "never update" when replaying applications, meaning only the last write will happen. +/// For live applications, this avoids a few operations that we can live without when rapidly processing incoming task events. +/// +/// Default: `100.ms` +@Since { version = "2.3.0" } +`spark.ui.liveUpdate.period`: Duration? + +/// Minimum time elapsed before stale UI data is flushed. +/// +/// This avoids UI staleness when incoming task events are not fired frequently. +/// +/// Default: `1.s` +@Since { version = "2.4.2" } +`spark.ui.liveUpdate.minFlushPeriod`: Duration? + +/// Port for your application's dashboard, which shows memory and workload data. +/// +/// Default: `4040` +@Since { version = "0.7.0" } +`spark.ui.port`: UInt16? + +/// How many jobs the Spark UI and status APIs remember before garbage collecting. +/// +/// This is a target maximum, and fewer elements may be retained in some circumstances. +/// +/// Default: `1000` +@Since { version = "1.2.0" } +`spark.ui.retainedJobs`: Int? + +/// How many stages the Spark UI and status APIs remember before garbage collecting. +/// +/// This is a target maximum, and fewer elements may be retained in some circumstances. +/// +/// Default: `1000` +@Since { version = "0.9.0" } +`spark.ui.retainedStages`: Int? + +/// How many tasks in one stage the Spark UI and status APIs remember before garbage collecting. +/// +/// This is a target maximum, and fewer elements may be retained in some circumstances. +/// +/// Default: `100000` +@Since { version = "2.0.1" } +`spark.ui.retainedTasks`: Int? + +/// Enable running Spark Master as reverse proxy for worker and application UIs. +/// +/// In this mode, Spark master will reverse proxy the worker and application UIs to enable access without requiring direct access to their hosts. +/// Use it with caution, as worker and application UI will not be accessible directly, you will only be able to access them through spark master/proxy public URL. +/// This setting affects all the workers and application UIs running in the cluster and must be set on all the workers, drivers and masters. +/// +/// Default: `false` +@Since { version = "2.1.0" } +`spark.ui.reverseProxy`: Boolean? + +/// If the Spark UI should be served through another front-end reverse proxy, this is the URL for accessing the Spark master UI through that reverse proxy. +/// +/// This is useful when running proxy for authentication e.g. an OAuth proxy. +/// The URL may contain a path prefix, like `http://mydomain.com/path/to/spark/`, allowing you to serve the UI for multiple Spark clusters and other web applications through the same virtual host and port. +/// Normally, this should be an absolute URL including scheme (http/https), host and port. +/// It is possible to specify a relative URL starting with "/" here. +/// In this case, all URLs generated by the Spark UI and Spark REST APIs will be server-relative links -- this will still work, as the entire Spark UI is served through the same host and port. +/// The setting affects link generation in the Spark UI, but the front-end reverse proxy is responsible for +/// +/// * stripping a path prefix before forwarding the request, +/// * rewriting redirects which point directly to the Spark master, +/// * redirecting access from `http://mydomain.com/path/to/spark` to `http://mydomain.com/path/to/spark/` (trailing slash after path prefix); otherwise relative links on the master page do not work correctly. +/// +/// +/// This setting affects all the workers and application UIs running in the cluster and must be set identically on all the workers, drivers and masters. +/// In is only effective when `spark.ui.reverseProxy` is turned on. This setting is not needed when the Spark master web UI is directly reachable. +/// +/// Default: `null` +@Since { version = "2.1.0" } +`spark.ui.reverseProxyUrl`: String? + +/// Where to address redirects when Spark is running behind a proxy. +/// +/// This will make Spark modify redirect responses so they point to the proxy server, instead of the Spark UI's own address. +/// This should be only the address of the server, without any prefix paths for the application; the prefix should be set either by the proxy server itself (by adding the `X-Forwarded-Context` request header), or by setting the proxy base in the Spark app's configuration. +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.ui.proxyRedirectUri`: String? + +/// Show the progress bar in the console. +/// +/// The progress bar shows the progress of stages that run for longer than 500ms. +/// If multiple stages run at the same time, multiple progress bars will be displayed on the same line. +/// *Note:* In shell environment, the default value of spark.ui.showConsoleProgress is true. +/// +/// Default: `false` +@Since { version = "1.2.1" } +`spark.ui.showConsoleProgress`: Boolean? + +/// Specifies custom spark executor log URL for supporting external log service instead of using cluster managers' application log URLs in Spark UI. +/// +/// Spark will support some path variables via patterns which can vary on cluster manager. +/// Please check the documentation for your cluster manager to see which patterns are supported, if any. +/// +/// +///
+/// +/// Please note that this configuration also replaces original log urls in event log, which will be also effective when accessing the application on history server. +/// The new log urls must be permanent, otherwise you might have dead link for executor log urls. +/// +/// +///
+/// +/// For now, only YARN mode supports this configuration +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.ui.custom.executor.log.url`: String? + +/// How many finished executors the Spark UI and status APIs remember before garbage collecting. +/// +/// Default: `1000` +@Since { version = "1.5.0" } +`spark.worker.ui.retainedExecutors`: Int? + +/// How many finished drivers the Spark UI and status APIs remember before garbage collecting. +/// +/// Default: `1000` +@Since { version = "1.5.0" } +`spark.worker.ui.retainedDrivers`: Int? + +/// How many finished executions the Spark UI and status APIs remember before garbage collecting. +/// +/// Default: `1000` +@Since { version = "1.5.0" } +`spark.sql.ui.retainedExecutions`: Int? + +/// How many finished batches the Spark UI and status APIs remember before garbage collecting. +/// +/// Default: `1000` +@Since { version = "1.0.0" } +`spark.streaming.ui.retainedBatches`: Int? + +/// How many dead executors the Spark UI and status APIs remember before garbage collecting. +/// +/// Default: `100` +@Since { version = "2.0.0" } +`spark.ui.retainedDeadExecutors`: Int? + +/// Comma separated list of filter class names to apply to the Spark Web UI. +/// +/// The filter should be a standard [javax servlet Filter](http://docs.oracle.com/javaee/6/api/javax/servlet/Filter.html). +/// +///
Filter parameters can also be specified in the configuration, by setting config entries +/// of the form spark.<class name of filter>.param.<param name>=<value> +/// +///
For example: +///
spark.ui.filters=com.test.filter1 +///
spark.com.test.filter1.param.name1=foo +///
spark.com.test.filter1.param.name2=bar +/// +/// Default: `null` +@Since { version = "1.0.0" } +`spark.ui.filters`: String? + +/// The maximum allowed size for a HTTP request header, in bytes unless otherwise specified. +/// +/// This setting applies for the Spark History Server too. +/// +/// Default: `8.kib` +@Since { version = "2.2.3" } +`spark.ui.requestHeaderSize`: DataSize? + +/// The maximum number of executors shown in the event timeline. +/// +/// Default: `250` +@Since { version = "3.2.0" } +`spark.ui.timeline.executors.maximum`: Int? + +/// The maximum number of jobs shown in the event timeline. +/// +/// Default: `500` +@Since { version = "3.2.0" } +`spark.ui.timeline.jobs.maximum`: Int? + +/// The maximum number of stages shown in the event timeline. +/// +/// Default: `500` +@Since { version = "3.2.0" } +`spark.ui.timeline.stages.maximum`: Int? + +/// The maximum number of tasks shown in the event timeline. +/// +/// Default: `1000` +@Since { version = "1.4.0" } +`spark.ui.timeline.tasks.maximum`: Int? + +/// Whether to compress broadcast variables before sending them. +/// +/// Generally a good idea. +/// Compression will use `spark.io.compression.codec`. +/// +/// Default: `true` +@Since { version = "0.6.0" } +`spark.broadcast.compress`: Boolean? + +/// Whether to compress RDD checkpoints. +/// +/// Generally a good idea. +/// Compression will use `spark.io.compression.codec`. +/// +/// Default: `false` +@Since { version = "2.2.0" } +`spark.checkpoint.compress`: Boolean? + +/// The codec used to compress internal data such as RDD partitions, event log, broadcast variables and shuffle outputs. +/// +/// By default, Spark provides four codecs: `lz4`, `lzf`, `snappy`, and `zstd`. +/// You can also use fully qualified class names to specify the codec, e.g. `org.apache.spark.io.LZ4CompressionCodec`, `org.apache.spark.io.LZFCompressionCodec`, `org.apache.spark.io.SnappyCompressionCodec`, and `org.apache.spark.io.ZStdCompressionCodec`. +/// +/// Default: `"lz4"` +@Since { version = "0.8.0" } +`spark.io.compression.codec`: String? + +/// Block size used in LZ4 compression, in the case when LZ4 compression codec is used. +/// +/// Lowering this block size will also lower shuffle memory usage when LZ4 is used. +/// Default unit is bytes, unless otherwise specified. +/// +/// Default: `32.kib` +@Since { version = "1.4.0" } +`spark.io.compression.lz4.blockSize`: DataSize? + +/// Block size in Snappy compression, in the case when Snappy compression codec is used. +/// +/// Lowering this block size will also lower shuffle memory usage when Snappy is used. +/// Default unit is bytes, unless otherwise specified. +/// +/// Default: `32.kib` +@Since { version = "1.4.0" } +`spark.io.compression.snappy.blockSize`: DataSize? + +/// Compression level for Zstd compression codec. +/// +/// Increasing the compression level will result in better compression at the expense of more CPU and memory. +/// +/// Default: `1` +@Since { version = "2.3.0" } +`spark.io.compression.zstd.level`: Int? + +/// Buffer size in bytes used in Zstd compression, in the case when Zstd compression codec is used. +/// +/// Lowering this size will lower the shuffle memory usage when Zstd is used, but it might increase the compression cost because of excessive JNI call overhead. +/// +/// Default: `32.kib` +@Since { version = "2.3.0" } +`spark.io.compression.zstd.bufferSize`: DataSize? + +/// If you use Kryo serialization, give a comma-separated list of custom class names to register with Kryo. +/// +/// See the [tuning guide](https://spark.apache.org/docs/latest/tuning.html#data-serialization) for more details. +/// +/// Default: `null` +@Since { version = "1.2.0" } +`spark.kryo.classesToRegister`: String? + +/// Whether to track references to the same object when serializing data with Kryo, which is necessary if your object graphs have loops and useful for efficiency if they contain multiple copies of the same object. +/// +/// Can be disabled to improve performance if you know this is not the case. +/// +/// Default: `true` +@Since { version = "0.8.0" } +`spark.kryo.referenceTracking`: Boolean? + +/// Whether to require registration with Kryo. +/// +/// If set to 'true', Kryo will throw an exception if an unregistered class is serialized. +/// If set to false (the default), Kryo will write unregistered class names along with each object. +/// Writing class names can cause significant performance overhead, so enabling this option can enforce strictly that a user has not omitted classes from registration. +/// +/// Default: `false` +@Since { version = "1.1.0" } +`spark.kryo.registrationRequired`: Boolean? + +/// If you use Kryo serialization, give a comma-separated list of classes that register your custom classes with Kryo. +/// +/// This property is useful if you need to register your classes in a custom way, e.g. to specify a custom field serializer. +/// Otherwise `spark.kryo.classesToRegister` is simpler. +/// It should be set to classes that extend [`KryoRegistrator`](https://spark.apache.org/docs/latest/api/scala/org/apache/spark/serializer/KryoRegistrator.html). +/// See the [tuning guide](https://spark.apache.org/docs/latest/tuning.html#data-serialization) for more details. +/// +/// Default: `null` +@Since { version = "0.5.0" } +`spark.kryo.registrator`: String? + +/// Whether to use unsafe based Kryo serializer. +/// +/// Can be substantially faster by using Unsafe Based IO. +/// +/// Default: `false` +@Since { version = "2.1.0" } +`spark.kryo.unsafe`: Boolean? + +/// Maximum allowable size of Kryo serialization buffer, in MiB unless otherwise specified. +/// +/// This must be larger than any object you attempt to serialize and must be less than 2048m. +/// Increase this if you get a "buffer limit exceeded" exception inside Kryo. +/// +/// Default: `64.mib` +@Since { version = "1.4.0" } +`spark.kryoserializer.buffer.max`: DataSize? + +/// Initial size of Kryo's serialization buffer, in KiB unless otherwise specified. +/// +/// Note that there will be one buffer *per core* on each worker. +/// This buffer will grow up to `spark.kryoserializer.buffer.max` if needed. +/// +/// Default: `64.kib` +@Since { version = "1.4.0" } +`spark.kryoserializer.buffer`: DataSize? + +/// Whether to compress serialized RDD partitions (e.g. +/// +/// for `StorageLevel.MEMORY_ONLY_SER` in Java and Scala or `StorageLevel.MEMORY_ONLY` in Python). +/// Can save substantial space at the cost of some extra CPU time. +/// Compression will use `spark.io.compression.codec`. +/// +/// Default: `false` +@Since { version = "0.6.0" } +`spark.rdd.compress`: Boolean? + +/// Class to use for serializing objects that will be sent over the network or need to be cached in serialized form. +/// +/// The default of Java serialization works with any Serializable Java object but is quite slow, so we recommend [using `org.apache.spark.serializer.KryoSerializer` and configuring Kryo serialization](https://spark.apache.org/docs/latest/tuning.html) when speed is necessary. +/// Can be any subclass of [`org.apache.spark.Serializer`](https://spark.apache.org/docs/latest/api/scala/org/apache/spark/serializer/Serializer.html). +/// +/// Default: org.apache.spark.serializer. JavaSerializer +@Since { version = "0.5.0" } +`spark.serializer`: String? + +/// When serializing using org.apache.spark.serializer.JavaSerializer, the serializer caches objects to prevent writing redundant data, however that stops garbage collection of those objects. +/// +/// By calling 'reset' you flush that info from the serializer, and allow old objects to be collected. +/// To turn off this periodic reset set it to -1. By default it will reset the serializer every 100 objects. +/// +/// Default: `100` +@Since { version = "1.0.0" } +`spark.serializer.objectStreamReset`: Int? + +/// Fraction of (heap space - 300MB) used for execution and storage. +/// +/// The lower this is, the more frequently spills and cached data eviction occur. +/// The purpose of this config is to set aside memory for internal metadata, user data structures, and imprecise size estimation in the case of sparse, unusually large records. +/// Leaving this at the default value is recommended. +/// For more detail, including important information about correctly tuning JVM garbage collection when increasing this value, see [this description](https://spark.apache.org/docs/latest/tuning.html#memory-management-overview). +/// +/// Default: `0.6` +@Since { version = "1.6.0" } +`spark.memory.fraction`: Float? + +/// Amount of storage memory immune to eviction, expressed as a fraction of the size of the region set aside by `spark.memory.fraction`. +/// +/// The higher this is, the less working memory may be available to execution and tasks may spill to disk more often. +/// Leaving this at the default value is recommended. +/// For more detail, see [this description](https://spark.apache.org/docs/latest/tuning.html#memory-management-overview). +/// +/// Default: `0.5` +@Since { version = "1.6.0" } +`spark.memory.storageFraction`: Float? + +/// If true, Spark will attempt to use off-heap memory for certain operations. +/// +/// If off-heap memory use is enabled, then `spark.memory.offHeap.size` must be positive. +/// +/// Default: `false` +@Since { version = "1.6.0" } +`spark.memory.offHeap.enabled`: Boolean? + +/// The absolute amount of memory which can be used for off-heap allocation, in bytes unless otherwise specified. +/// +/// This setting has no impact on heap memory usage, so if your executors' total memory consumption must fit within some hard limit then be sure to shrink your JVM heap size accordingly. +/// This must be set to a positive value when `spark.memory.offHeap.enabled=true`. +/// +/// Default: `0` +@Since { version = "1.6.0" } +`spark.memory.offHeap.size`: DataSize? + +/// Enables proactive block replication for RDD blocks. +/// +/// Cached RDD block replicas lost due to executor failures are replenished if there are any existing available replicas. +/// This tries to get the replication level of the block to the initial number. +/// +/// Default: `false` +@Since { version = "2.2.0" } +`spark.storage.replication.proactive`: DataSize? + +/// Controls how often to trigger a garbage collection. +/// +/// +/// This context cleaner triggers cleanups only when weak references are garbage collected. +/// In long-running applications with large driver JVMs, where there is little memory pressure on the driver, this may happen very occasionally or not at all. +/// Not cleaning at all may lead to executors running out of disk space after a while. +/// +/// Default: `30.min` +@Since { version = "1.6.0" } +`spark.cleaner.periodicGC.interval`: Duration? + +/// Enables or disables context cleaning. +/// +/// Default: `true` +@Since { version = "1.0.0" } +`spark.cleaner.referenceTracking`: Boolean? + +/// Controls whether the cleaning thread should block on cleanup tasks (other than shuffle, which is controlled by `spark.cleaner.referenceTracking.blocking.shuffle` Spark property). +/// +/// Default: `true` +@Since { version = "1.0.0" } +`spark.cleaner.referenceTracking.blocking`: Boolean? + +/// Controls whether the cleaning thread should block on shuffle cleanup tasks. +/// +/// Default: `false` +@Since { version = "1.1.1" } +`spark.cleaner.referenceTracking.blocking.shuffle`: Boolean? + +/// Controls whether to clean checkpoint files if the reference is out of scope. +/// +/// Default: `false` +@Since { version = "1.4.0" } +`spark.cleaner.referenceTracking.cleanCheckpoints`: Boolean? + +/// Size of each piece of a block for `TorrentBroadcastFactory`, in KiB unless otherwise specified. +/// +/// Too large a value decreases parallelism during broadcast (makes it slower); however, if it is too small, `BlockManager` might take a performance hit. +/// +/// Default: `4.mib` +@Since { version = "0.5.0" } +`spark.broadcast.blockSize`: DataSize? + +/// Whether to enable checksum for broadcast. +/// +/// If enabled, broadcasts will include a checksum, which can help detect corrupted blocks, at the cost of computing and sending a little more data. +/// It's possible to disable it if the network has other mechanisms to guarantee data won't be corrupted during broadcast. +/// +/// Default: `true` +@Since { version = "2.1.1" } +`spark.broadcast.checksum`: Boolean? + +/// The number of cores to use on each executor. +/// +/// +/// In standalone and Mesos coarse-grained modes, for more detail, see +/// this description. +/// +/// Default: `1 in YARN mode, all the available cores on the worker in standalone and Mesos coarse-grained modes.` +@Since { version = "1.0.0" } +`spark.executor.cores`: Number? + +/// Default number of partitions in RDDs returned by transformations like `join`, `reduceByKey`, and `parallelize` when not set by user. +/// +/// Default: For distributed shuffle operations like reduceByKey and join, the largest number of partitions in a parent RDD. For operations like parallelize with no parent RDDs, it depends on the cluster manager: Local mode: number of cores on the local machine Mesos fine grained mode: 8 Others: total number of cores on all executor nodes or 2, whichever is larger +@Since { version = "0.5.0" } +`spark.default.parallelism`: String? + +/// Interval between each executor's heartbeats to the driver. +/// +/// Heartbeats let the driver know that the executor is still alive and update it with metrics for in-progress tasks. +/// spark.executor.heartbeatInterval should be significantly less than spark.network.timeout +/// +/// Default: `10.s` +@Since { version = "1.1.0" } +`spark.executor.heartbeatInterval`: Duration? + +/// Communication timeout to use when fetching files added through SparkContext.addFile() from the driver. +/// +/// Default: `60.s` +@Since { version = "1.0.0" } +`spark.files.fetchTimeout`: Duration? + +/// If set to true (default), file fetching will use a local cache that is shared by executors that belong to the same application, which can improve task launching performance when running many executors on the same host. +/// +/// If set to false, these caching optimizations will be disabled and all executors will fetch their own copies of files. +/// This optimization may be disabled in order to use Spark local directories that reside on NFS filesystems (see [SPARK-6313](https://issues.apache.org/jira/browse/SPARK-6313) for more details). +/// +/// Default: `true` +@Since { version = "1.2.2" } +`spark.files.useFetchCache`: Boolean? + +/// Whether to overwrite any files which exist at the startup. +/// +/// Users can not overwrite the files added by `SparkContext.addFile` or `SparkContext.addJar` before even if this option is set `true`. +/// +/// Default: `false` +@Since { version = "1.0.0" } +`spark.files.overwrite`: Boolean? + +/// The maximum number of bytes to pack into a single partition when reading files. +/// +/// Default: `134217728 (128 MiB)` +@Since { version = "2.1.0" } +`spark.files.maxPartitionBytes`: Number? + +/// The estimated cost to open a file, measured by the number of bytes could be scanned at the same time. +/// +/// This is used when putting multiple files into a partition. +/// It is better to overestimate, then the partitions with small files will be faster than partitions with bigger files. +/// +/// Default: `4194304 (4 MiB)` +@Since { version = "2.1.0" } +`spark.files.openCostInBytes`: Number? + +/// If set to true, clones a new Hadoop `Configuration` object for each task. +/// +/// This option should be enabled to work around `Configuration` thread-safety issues (see [SPARK-2546](https://issues.apache.org/jira/browse/SPARK-2546) for more details). +/// This is disabled by default in order to avoid unexpected performance regressions for jobs that are not affected by these issues. +/// +/// Default: `false` +@Since { version = "1.0.3" } +`spark.hadoop.cloneConf`: Boolean? + +/// If set to true, validates the output specification (e.g. +/// +/// checking if the output directory already exists) used in saveAsHadoopFile and other variants. +/// This can be disabled to silence exceptions due to pre-existing output directories. +/// We recommend that users do not disable this except if trying to achieve compatibility with previous versions of Spark. +/// Simply use Hadoop's FileSystem API to delete output directories by hand. +/// This setting is ignored for jobs generated through Spark Streaming's StreamingContext, since data may need to be rewritten to pre-existing output directories during checkpoint recovery. +/// +/// Default: `true` +@Since { version = "1.0.1" } +`spark.hadoop.validateOutputSpecs`: Boolean? + +/// Size of a block above which Spark memory maps when reading a block from disk. +/// +/// Default unit is bytes, unless specified otherwise. +/// This prevents Spark from memory mapping very small blocks. +/// In general, memory mapping has high overhead for blocks close to or below the page size of the operating system. +/// +/// Default: `2.mib` +@Since { version = "0.9.2" } +`spark.storage.memoryMapThreshold`: DataSize? + +/// The file output committer algorithm version, valid algorithm version number: 1 or 2. +/// +/// Note that 2 may cause a correctness issue like MAPREDUCE-7282. +/// +/// Default: `1` +@Since { version = "2.2.0" } +`spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version`: Int? + +/// Whether to write per-stage peaks of executor metrics (for each executor) to the event log. +/// +/// *Note:* The metrics are polled (collected) and sent in the executor heartbeat, and this is always done; this configuration is only to determine if aggregated metric peaks are written to the event log. +/// +/// Default: `false` +@Since { version = "3.0.0" } +`spark.eventLog.logStageExecutorMetrics`: Boolean? + +/// Whether to collect process tree metrics (from the /proc filesystem) when collecting executor metrics. +/// +/// *Note:* The process tree metrics are collected only if the /proc filesystem exists. +/// +/// Default: `false` +@Since { version = "3.0.0" } +`spark.executor.processTreeMetrics.enabled`: Boolean? + +/// How often to collect executor metrics (in milliseconds). +/// +/// If 0, the polling is done on executor heartbeats (thus at the heartbeat interval, specified by `spark.executor.heartbeatInterval`). +/// If positive, the polling is done at this interval. +/// +/// Default: `0` +@Since { version = "3.0.0" } +`spark.executor.metrics.pollingInterval`: Int? + +/// Maximum message size (in MiB) to allow in "control plane" communication; generally only applies to map output size information sent between executors and the driver. +/// +/// Increase this if you are running jobs with many thousands of map and reduce tasks and see messages about the RPC message size. +/// +/// Default: `128` +@Since { version = "2.0.0" } +`spark.rpc.message.maxSize`: Int? + +/// Port for all block managers to listen on. +/// +/// These exist on both the driver and the executors. +/// +/// Default: (random) +@Since { version = "1.1.0" } +`spark.blockManager.port`: UInt16? + +/// Driver-specific port for the block manager to listen on, for cases where it cannot use the same configuration as executors. +/// +/// Default: (value of spark.blockManager.port) +@Since { version = "2.1.0" } +`spark.driver.blockManager.port`: UInt16? + +/// Hostname or IP address where to bind listening sockets. +/// +/// This config overrides the SPARK_LOCAL_IP environment variable (see below). +/// +///
It also allows a different address from the local one to be advertised to executors or external systems. +/// This is useful, for example, when running containers with bridged networking. +/// For this to properly work, +/// the different ports used by the driver (RPC, block manager and UI) need to be forwarded from the +/// container's host. +/// +/// Default: (value of spark.driver.host) +@Since { version = "2.1.0" } +`spark.driver.bindAddress`: String? + +/// Hostname or IP address for the driver. +/// +/// This is used for communicating with the executors and the standalone Master. +/// +/// Default: (local hostname) +@Since { version = "0.7.0" } +`spark.driver.host`: String? + +/// Port for the driver to listen on. +/// +/// This is used for communicating with the executors and the standalone Master. +/// +/// Default: (random) +@Since { version = "0.7.0" } +`spark.driver.port`: UInt16? + +/// Length of the accept queue for the RPC server. +/// +/// For large applications, this value may need to be increased, so that incoming connections are not dropped when a large number of connections arrives in a short period of time. +/// +/// Default: `64` +@Since { version = "3.0.0" } +`spark.rpc.io.backLog`: Int? + +/// Default timeout for all network interactions. +/// +/// This config will be used in place of `spark.storage.blockManagerHeartbeatTimeoutMs`, `spark.shuffle.io.connectionTimeout`, `spark.rpc.askTimeout` or `spark.rpc.lookupTimeout` if they are not configured. +/// +/// Default: `120.s` +@Since { version = "1.3.0" } +`spark.network.timeout`: Duration? + +/// If enabled then off-heap buffer allocations are preferred by the shared allocators. +/// +/// Off-heap buffers are used to reduce garbage collection during shuffle and cache block transfer. +/// For environments where off-heap memory is tightly limited, users may wish to turn this off to force all allocations to be on-heap. +/// +/// Default: `true` +@Since { version = "3.0.0" } +`spark.network.io.preferDirectBufs`: Boolean? + +/// Maximum number of retries when binding to a port before giving up. +/// +/// When a port is given a specific value (non 0), each subsequent retry will increment the port used in the previous attempt by 1 before retrying. +/// This essentially allows it to try a range of ports from the start port specified to port + maxRetries. +/// +/// Default: `16` +@Since { version = "1.1.1" } +`spark.port.maxRetries`: Int? + +/// Number of times to retry before an RPC task gives up. +/// +/// An RPC task will run at most times of this number. +/// +/// Default: `3` +@Since { version = "1.4.0" } +`spark.rpc.numRetries`: Int? + +/// Duration for an RPC ask operation to wait before retrying. +/// +/// Default: `3.s` +@Since { version = "1.4.0" } +`spark.rpc.retry.wait`: Duration? + +/// Duration for an RPC ask operation to wait before timing out. +/// +/// Default: `spark.network.timeout` +@Since { version = "1.4.0" } +`spark.rpc.askTimeout`: Duration? + +/// Duration for an RPC remote endpoint lookup operation to wait before timing out. +/// +/// Default: `120.s` +@Since { version = "1.4.0" } +`spark.rpc.lookupTimeout`: Duration? + +/// Remote block will be fetched to disk when size of the block is above this threshold in bytes. +/// +/// This is to avoid a giant request takes too much memory. +/// Note this configuration will affect both shuffle fetch and block manager remote block fetch. +/// For users who enabled external shuffle service, this feature can only work when external shuffle service is at least 2.3.0. +/// +/// Default: `200.mib` +@Since { version = "3.0.0" } +`spark.network.maxRemoteBlockSizeFetchToMem`: DataSize? + +/// Timeout for the established connections between RPC peers to be marked as idled and closed if there are outstanding RPC requests but no traffic on the channel for at least \`connectionTimeout\`. +/// +/// Default: value of spark.network.timeout +@Since { version = "1.2.0" } +`spark.rpc.io.connectionTimeout`: Duration? + +/// When running on a [standalone deploy cluster](https://spark.apache.org/docs/latest/spark-standalone.html) or a [Mesos cluster in "coarse-grained" sharing mode](https://spark.apache.org/docs/latest/running-on-mesos.html#mesos-run-modes), the maximum amount of CPU cores to request for the application from across the cluster (not from each machine). +/// +/// If not set, the default will be `spark.deploy.defaultCores` on Spark's standalone cluster manager, or infinite (all available cores) on Mesos. +/// +/// Default: (not set) +@Reserved +@Since { version = "0.6.0" } +`spark.cores.max`: Null? + +/// How long to wait to launch a data-local task before giving up and launching it on a less-local node. +/// +/// The same wait will be used to step through multiple locality levels (process-local, node-local, rack-local and then any). +/// It is also possible to customize the waiting time for each level by setting `spark.locality.wait.node`, etc. +/// You should increase this setting if your tasks are long and see poor locality, but the default usually works well. +/// +/// Default: `3.s` +@Since { version = "0.5.0" } +`spark.locality.wait`: Duration? + +/// Customize the locality wait for node locality. +/// +/// For example, you can set this to 0 to skip node locality and search immediately for rack locality (if your cluster has rack information). +/// +/// Default: `spark.locality.wait` +@Since { version = "0.8.0" } +`spark.locality.wait.node`: Duration? + +/// Customize the locality wait for process locality. +/// +/// This affects tasks that attempt to access cached data in a particular executor process. +/// +/// Default: `spark.locality.wait` +@Since { version = "0.8.0" } +`spark.locality.wait.process`: Duration? + +/// Customize the locality wait for rack locality. +/// +/// Default: `spark.locality.wait` +@Since { version = "0.8.0" } +`spark.locality.wait.rack`: Duration? + +/// Maximum amount of time to wait for resources to register before scheduling begins. +/// +/// Default: `30.s` +@Since { version = "1.1.1" } +`spark.scheduler.maxRegisteredResourcesWaitingTime`: Duration? + +/// The minimum ratio of registered resources (registered resources / total expected resources) (resources are executors in yarn mode and Kubernetes mode, CPU cores in standalone mode and Mesos coarse-grained mode \['spark.cores.max' value is total expected resources for Mesos coarse-grained mode\] ) to wait for before scheduling begins. +/// +/// Specified as a double between 0.0 and 1.0. Regardless of whether the minimum ratio of resources has been reached, the maximum amount of time it will wait before scheduling begins is controlled by config `spark.scheduler.maxRegisteredResourcesWaitingTime`. +/// +/// Default: 0.8 for KUBERNETES mode; 0.8 for YARN mode; 0.0 for standalone mode and Mesos coarse-grained mode +@Since { version = "1.1.1" } +`spark.scheduler.minRegisteredResourcesRatio`: Float? + +/// The [scheduling mode](https://spark.apache.org/docs/latest/job-scheduling.html#scheduling-within-an-application) between jobs submitted to the same SparkContext. +/// +/// Can be set to `FAIR` to use fair sharing instead of queueing jobs one after another. +/// Useful for multi-user services. +/// +/// Default: `"FIFO"` +@Since { version = "0.8.0" } +`spark.scheduler.mode`: String? + +/// The interval length for the scheduler to revive the worker resource offers to run tasks. +/// +/// Default: `1.s` +@Since { version = "0.8.1" } +`spark.scheduler.revive.interval`: Duration? + +/// The default capacity for event queues. +/// +/// Spark will try to initialize an event queue using capacity specified by \`spark.scheduler.listenerbus.eventqueue.queueName.capacity\` first. +/// If it's not configured, Spark will use the default capacity specified by this config. +/// Note that capacity must be greater than 0. Consider increasing value (e.g. 20000) if listener events are dropped. +/// Increasing this value may result in the driver using more memory. +/// +/// Default: `10000` +@Since { version = "2.3.0" } +`spark.scheduler.listenerbus.eventqueue.capacity`: Int? + +/// Capacity for shared event queue in Spark listener bus, which hold events for external listener(s) that register to the listener bus. +/// +/// Consider increasing value, if the listener events corresponding to shared queue are dropped. +/// Increasing this value may result in the driver using more memory. +/// +/// Default: `spark.scheduler.listenerbus.eventqueue.capacity` +@Since { version = "3.0.0" } +`spark.scheduler.listenerbus.eventqueue.shared.capacity`: Int? + +/// Capacity for appStatus event queue, which hold events for internal application status listeners. +/// +/// Consider increasing value, if the listener events corresponding to appStatus queue are dropped. +/// Increasing this value may result in the driver using more memory. +/// +/// Default: `spark.scheduler.listenerbus.eventqueue.capacity` +@Since { version = "3.0.0" } +`spark.scheduler.listenerbus.eventqueue.appStatus.capacity`: Int? + +/// Capacity for executorManagement event queue in Spark listener bus, which hold events for internal executor management listeners. +/// +/// Consider increasing value if the listener events corresponding to executorManagement queue are dropped. +/// Increasing this value may result in the driver using more memory. +/// +/// Default: `spark.scheduler.listenerbus.eventqueue.capacity` +@Since { version = "3.0.0" } +`spark.scheduler.listenerbus.eventqueue.executorManagement.capacity`: Int? + +/// Capacity for eventLog queue in Spark listener bus, which hold events for Event logging listeners that write events to eventLogs. +/// +/// Consider increasing value if the listener events corresponding to eventLog queue are dropped. +/// Increasing this value may result in the driver using more memory. +/// +/// Default: `spark.scheduler.listenerbus.eventqueue.capacity` +@Since { version = "3.0.0" } +`spark.scheduler.listenerbus.eventqueue.eventLog.capacity`: Int? + +/// Capacity for streams queue in Spark listener bus, which hold events for internal streaming listener. +/// +/// Consider increasing value if the listener events corresponding to streams queue are dropped. +/// Increasing this value may result in the driver using more memory. +/// +/// Default: `spark.scheduler.listenerbus.eventqueue.capacity` +@Since { version = "3.0.0" } +`spark.scheduler.listenerbus.eventqueue.streams.capacity`: Int? + +/// If set to "true", Spark will merge ResourceProfiles when different profiles are specified in RDDs that get combined into a single stage. +/// +/// When they are merged, Spark chooses the maximum of each resource and creates a new ResourceProfile. +/// The default of false results in Spark throwing an exception if multiple different ResourceProfiles are found in RDDs going into the same stage. +/// +/// Default: `false` +@Since { version = "3.1.0" } +`spark.scheduler.resource.profileMergeConflicts`: Boolean? + +/// The timeout in seconds to wait to acquire a new executor and schedule a task before aborting a TaskSet which is unschedulable because all executors are excluded due to task failures. +/// +/// Default: `120.s` +@Since { version = "2.4.1" } +`spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout`: Duration? + +/// If set to "true", prevent Spark from scheduling tasks on executors that have been excluded due to too many task failures. +/// +/// The algorithm used to exclude executors and nodes can be further controlled by the other "spark.excludeOnFailure" configuration options. +/// +/// Default: `false` +@Since { version = "2.1.0" } +`spark.excludeOnFailure.enabled`: Boolean? + +/// (Experimental) How long a node or executor is excluded for the entire application, before it is unconditionally removed from the excludelist to attempt running new tasks. +/// +/// Default: `1.h` +@Since { version = "2.1.0" } +`spark.excludeOnFailure.timeout`: Duration? + +/// (Experimental) For a given task, how many times it can be retried on one executor before the executor is excluded for that task. +/// +/// Default: `1` +@Since { version = "2.1.0" } +`spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor`: Int? + +/// (Experimental) For a given task, how many times it can be retried on one node, before the entire node is excluded for that task. +/// +/// Default: `2` +@Since { version = "2.1.0" } +`spark.excludeOnFailure.task.maxTaskAttemptsPerNode`: Int? + +/// (Experimental) How many different tasks must fail on one executor, within one stage, before the executor is excluded for that stage. +/// +/// Default: `2` +@Since { version = "2.1.0" } +`spark.excludeOnFailure.stage.maxFailedTasksPerExecutor`: Int? + +/// (Experimental) How many different executors are marked as excluded for a given stage, before the entire node is marked as failed for the stage. +/// +/// Default: `2` +@Since { version = "2.1.0" } +`spark.excludeOnFailure.stage.maxFailedExecutorsPerNode`: Int? + +/// (Experimental) How many different tasks must fail on one executor, in successful task sets, before the executor is excluded for the entire application. +/// +/// Excluded executors will be automatically added back to the pool of available resources after the timeout specified by `spark.excludeOnFailure.timeout`. +/// Note that with dynamic allocation, though, the executors may get marked as idle and be reclaimed by the cluster manager. +/// +/// Default: `2` +@Since { version = "2.2.0" } +`spark.excludeOnFailure.application.maxFailedTasksPerExecutor`: Int? + +/// (Experimental) How many different executors must be excluded for the entire application, before the node is excluded for the entire application. +/// +/// Excluded nodes will be automatically added back to the pool of available resources after the timeout specified by `spark.excludeOnFailure.timeout`. +/// Note that with dynamic allocation, though, the executors on the node may get marked as idle and be reclaimed by the cluster manager. +/// +/// Default: `2` +@Since { version = "2.2.0" } +`spark.excludeOnFailure.application.maxFailedExecutorsPerNode`: Int? + +/// (Experimental) If set to "true", allow Spark to automatically kill the executors when they are excluded on fetch failure or excluded for the entire application, as controlled by spark.killExcludedExecutors.application.\*. +/// +/// Note that, when an entire node is added excluded, all of the executors on that node will be killed. +/// +/// Default: `false` +@Since { version = "2.2.0" } +`spark.excludeOnFailure.killExcludedExecutors`: Boolean? + +/// (Experimental) If set to "true", Spark will exclude the executor immediately when a fetch failure happens. +/// +/// If external shuffle service is enabled, then the whole node will be excluded. +/// +/// Default: `false` +@Since { version = "2.3.0" } +`spark.excludeOnFailure.application.fetchFailure.enabled`: Boolean? + +/// If set to "true", performs speculative execution of tasks. +/// +/// This means if one or more tasks are running slowly in a stage, they will be re-launched. +/// +/// Default: `false` +@Since { version = "0.6.0" } +`spark.speculation`: Boolean? + +/// How often Spark will check for tasks to speculate. +/// +/// Default: `100.ms` +@Since { version = "0.6.0" } +`spark.speculation.interval`: Duration? + +/// How many times slower a task is than the median to be considered for speculation. +/// +/// Default: `1.5` +@Since { version = "0.6.0" } +`spark.speculation.multiplier`: Float? + +/// Fraction of tasks which must be complete before speculation is enabled for a particular stage. +/// +/// Default: `0.75` +@Since { version = "0.6.0" } +`spark.speculation.quantile`: Float? + +/// Minimum amount of time a task runs before being considered for speculation. +/// +/// This can be used to avoid launching speculative copies of tasks that are very short. +/// +/// Default: `100.ms` +@Since { version = "3.2.0" } +`spark.speculation.minTaskRuntime`: Duration? + +/// Task duration after which scheduler would try to speculative run the task. +/// +/// If provided, tasks would be speculatively run if current stage contains less tasks than or equal to the number of slots on a single executor and the task is taking longer time than the threshold. +/// This config helps speculate stage with very few tasks. +/// Regular speculation configs may also apply if the executor slots are large enough. +/// E.g. tasks might be re-launched if there are enough successful runs even though the threshold hasn't been reached. +/// The number of slots is computed based on the conf values of spark.executor.cores and spark.task.cpus minimum 1. Default unit is bytes, unless otherwise specified. +/// +/// Default: `null` +@Since { version = "3.0.0" } +`spark.speculation.task.duration.threshold`: String? + +/// Number of cores to allocate for each task. +/// +/// Default: `1` +@Since { version = "0.5.0" } +`spark.task.cpus`: Int? + +/// Amount of a particular resource type to allocate for each task, note that this can be a double. +/// +/// If this is specified you must also provide the executor config `spark.executor.resource.{resourceName}.amount` and any corresponding discovery configs so that your executors are created with that resource type. +/// In addition to whole amounts, a fractional amount (for example, 0.25, which means 1/4th of a resource) may be specified. +/// Fractional amounts must be less than or equal to 0.5, or in other words, the minimum amount of resource sharing is 2 tasks per resource. +/// Additionally, fractional amounts are floored in order to assign resource slots (e.g. a 0.2222 configuration, or 1/0.2222 slots will become 4 tasks/resource, not 5). +/// +/// Default: `1` +@Since { version = "3.0.0" } +`spark.task.resource.{resourceName}.amount`: Mapping? + +/// Number of continuous failures of any particular task before giving up on the job. +/// +/// The total number of failures spread across different tasks will not cause the job to fail; a particular task has to fail this number of attempts continuously. +/// If any attempt succeeds, the failure count for the task will be reset. +/// Should be greater than or equal to 1. Number of allowed retries = this value - 1. +/// +/// Default: `4` +@Since { version = "0.8.0" } +`spark.task.maxFailures`: Int? + +/// Enables monitoring of killed / interrupted tasks. +/// +/// When set to true, any task which is killed will be monitored by the executor until that task actually finishes executing. +/// See the other `spark.task.reaper.*` configurations for details on how to control the exact behavior of this monitoring. +/// When set to false (the default), task killing will use an older code path which lacks such monitoring. +/// +/// Default: `false` +@Since { version = "2.0.3" } +`spark.task.reaper.enabled`: Boolean? + +/// When `spark.task.reaper.enabled = true`, this setting controls the frequency at which executors will poll the status of killed tasks. +/// +/// If a killed task is still running when polled then a warning will be logged and, by default, a thread-dump of the task will be logged (this thread dump can be disabled via the `spark.task.reaper.threadDump` setting, which is documented below). +/// +/// Default: `10.s` +@Since { version = "2.0.3" } +`spark.task.reaper.pollingInterval`: Duration? + +/// When `spark.task.reaper.enabled = true`, this setting controls whether task thread dumps are logged during periodic polling of killed tasks. +/// +/// Set this to false to disable collection of thread dumps. +/// +/// Default: `true` +@Since { version = "2.0.3" } +`spark.task.reaper.threadDump`: Boolean? + +/// When `spark.task.reaper.enabled = true`, this setting specifies a timeout after which the executor JVM will kill itself if a killed task has not stopped running. +/// +/// The default value, -1, disables this mechanism and prevents the executor from self-destructing. +/// The purpose of this setting is to act as a safety-net to prevent runaway noncancellable tasks from rendering an executor unusable. +/// +/// Default: `-1` +@Since { version = "2.0.3" } +`spark.task.reaper.killTimeout`: Int? + +/// Number of consecutive stage attempts allowed before a stage is aborted. +/// +/// Default: `4` +@Since { version = "2.2.0" } +`spark.stage.maxConsecutiveAttempts`: Int? + +/// The timeout in seconds for each `barrier()` call from a barrier task. +/// +/// If the coordinator didn't receive all the sync messages from barrier tasks within the configured time, throw a SparkException to fail all the tasks. +/// The default value is set to 31536000(3600 \* 24 \* 365) so the `barrier()` call shall wait for one year. +/// +/// Default: `365.0` +@Since { version = "2.4.0" } +`spark.barrier.sync.timeout`: Float? + +/// Time in seconds to wait between a max concurrent tasks check failure and the next check. +/// +/// A max concurrent tasks check ensures the cluster can launch more concurrent tasks than required by a barrier stage on job submitted. +/// The check can fail in case a cluster has just started and not enough executors have registered, so we wait for a little while and try to perform the check again. +/// If the check fails more than a configured max failure times for a job then fail current job submission. +/// Note this config only applies to jobs that contain one or more barrier stages, we won't perform the check on non-barrier jobs. +/// +/// Default: `15.s` +@Since { version = "2.4.0" } +`spark.scheduler.barrier.maxConcurrentTasksCheck.interval`: Duration? + +/// Number of max concurrent tasks check failures allowed before fail a job submission. +/// +/// A max concurrent tasks check ensures the cluster can launch more concurrent tasks than required by a barrier stage on job submitted. +/// The check can fail in case a cluster has just started and not enough executors have registered, so we wait for a little while and try to perform the check again. +/// If the check fails more than a configured max failure times for a job then fail current job submission. +/// Note this config only applies to jobs that contain one or more barrier stages, we won't perform the check on non-barrier jobs. +/// +/// Default: `40` +@Since { version = "2.4.0" } +`spark.scheduler.barrier.maxConcurrentTasksCheck.maxFailures`: Int? + +/// Whether to use dynamic resource allocation, which scales the number of executors registered with this application up and down based on the workload. +/// +/// For more detail, see the description [here](https://spark.apache.org/docs/latest/job-scheduling.html#dynamic-resource-allocation). +/// +/// This requires `spark.shuffle.service.enabled` or `spark.dynamicAllocation.shuffleTracking.enabled` to be set. +/// The following configurations are also relevant: `spark.dynamicAllocation.minExecutors`, `spark.dynamicAllocation.maxExecutors`, and `spark.dynamicAllocation.initialExecutors` `spark.dynamicAllocation.executorAllocationRatio` +/// +/// Default: `false` +@Since { version = "1.2.0" } +`spark.dynamicAllocation.enabled`: Boolean? + +/// If dynamic allocation is enabled and an executor has been idle for more than this duration, the executor will be removed. +/// +/// For more detail, see this [description](https://spark.apache.org/docs/latest/job-scheduling.html#resource-allocation-policy). +/// +/// Default: `60.s` +@Since { version = "1.2.0" } +`spark.dynamicAllocation.executorIdleTimeout`: Duration? + +/// If dynamic allocation is enabled and an executor which has cached data blocks has been idle for more than this duration, the executor will be removed. +/// +/// For more details, see this [description](https://spark.apache.org/docs/latest/job-scheduling.html#resource-allocation-policy). +/// +/// Default: `infinity` +@Since { version = "1.4.0" } +`spark.dynamicAllocation.cachedExecutorIdleTimeout`: Number? + +/// Initial number of executors to run if dynamic allocation is enabled. +/// +/// +/// If \`--num-executors\` (or \`spark.executor.instances\`) is set and larger than this value, it will be used as the initial number of executors. +/// +/// Default: `spark.dynamicAllocation.minExecutors` +@Since { version = "1.3.0" } +`spark.dynamicAllocation.initialExecutors`: Int? + +/// Upper bound for the number of executors if dynamic allocation is enabled. +/// +/// Default: `infinity` +@Since { version = "1.2.0" } +`spark.dynamicAllocation.maxExecutors`: Number? + +/// Lower bound for the number of executors if dynamic allocation is enabled. +/// +/// Default: `0` +@Since { version = "1.2.0" } +`spark.dynamicAllocation.minExecutors`: Int? + +/// By default, the dynamic allocation will request enough executors to maximize the parallelism according to the number of tasks to process. +/// +/// While this minimizes the latency of the job, with small tasks this setting can waste a lot of resources due to executor allocation overhead, as some executor might not even do any work. +/// This setting allows to set a ratio that will be used to reduce the number of executors w.r.t. full parallelism. +/// Defaults to 1.0 to give maximum parallelism. +/// 0.5 will divide the target number of executors by 2 The target number of executors computed by the dynamicAllocation can still be overridden by the `spark.dynamicAllocation.minExecutors` and `spark.dynamicAllocation.maxExecutors` settings +/// +/// Default: `1` +@Since { version = "2.4.0" } +`spark.dynamicAllocation.executorAllocationRatio`: Float? + +/// If dynamic allocation is enabled and there have been pending tasks backlogged for more than this duration, new executors will be requested. +/// +/// For more detail, see this [description](https://spark.apache.org/docs/latest/job-scheduling.html#resource-allocation-policy). +/// +/// Default: `1.s` +@Since { version = "1.2.0" } +`spark.dynamicAllocation.schedulerBacklogTimeout`: Duration? + +/// Same as `spark.dynamicAllocation.schedulerBacklogTimeout`, but used only for subsequent executor requests. +/// +/// For more detail, see this [description](https://spark.apache.org/docs/latest/job-scheduling.html#resource-allocation-policy). +/// +/// Default: `"schedulerBacklogTimeout"` +@Since { version = "1.2.0" } +`spark.dynamicAllocation.sustainedSchedulerBacklogTimeout`: String? + +/// Enables shuffle file tracking for executors, which allows dynamic allocation without the need for an external shuffle service. +/// +/// This option will try to keep alive executors that are storing shuffle data for active jobs. +/// +/// Default: `false` +@Since { version = "3.0.0" } +`spark.dynamicAllocation.shuffleTracking.enabled`: Boolean? + +/// When shuffle tracking is enabled, controls the timeout for executors that are holding shuffle data. +/// +/// The default value means that Spark will rely on the shuffles being garbage collected to be able to release executors. +/// If for some reason garbage collection is not cleaning up shuffles quickly enough, this option can be used to control when to time out executors even when they are storing shuffle data. +/// +/// Default: `infinity` +@Since { version = "3.0.0" } +`spark.dynamicAllocation.shuffleTracking.timeout`: Number? + +/// Number of threads used in the server thread pool +/// +/// Default: Fall back on spark.rpc.io.serverThreads +@Since { version = "1.6.0" } +`spark.{driver|executor}.rpc.io.serverThreads`: Mapping<"driver"|"executor", UInt>? + +/// Number of threads used in the client thread pool +/// +/// Default: Fall back on spark.rpc.io.clientThreads +@Since { version = "1.6.0" } +`spark.{driver|executor}.rpc.io.clientThreads`: Mapping<"driver"|"executor", UInt>? + +/// Number of threads used in RPC message dispatcher thread pool +/// +/// Default: Fall back on spark.rpc.netty.dispatcher.numThreads +@Since { version = "3.0.0" } +`spark.{driver|executor}.rpc.netty.dispatcher.numThreads`: Mapping<"driver"|"executor", UInt>? + +/// Enables or disables Spark Streaming's internal backpressure mechanism (since 1.5). +/// +/// This enables the Spark Streaming to control the receiving rate based on the current batch scheduling delays and processing times so that the system receives only as fast as the system can process. +/// Internally, this dynamically sets the maximum receiving rate of receivers. +/// This rate is upper bounded by the values `spark.streaming.receiver.maxRate` and `spark.streaming.kafka.maxRatePerPartition` if they are set (see below). +/// +/// Default: `false` +@Since { version = "1.5.0" } +`spark.streaming.backpressure.enabled`: Boolean? + +/// This is the initial maximum receiving rate at which each receiver will receive data for the first batch when the backpressure mechanism is enabled. +/// +/// Default: `null` +@Since { version = "2.0.0" } +`spark.streaming.backpressure.initialRate`: UInt? + +/// Interval at which data received by Spark Streaming receivers is chunked into blocks of data before storing them in Spark. +/// +/// Minimum recommended - 50 ms. See the [performance tuning](https://spark.apache.org/docs/latest/streaming-programming-guide.html#level-of-parallelism-in-data-receiving) section in the Spark Streaming programming guide for more details. +/// +/// Default: `200.ms` +@Since { version = "0.8.0" } +`spark.streaming.blockInterval`: Duration? + +/// Maximum rate (number of records per second) at which each receiver will receive data. +/// +/// Effectively, each stream will consume at most this number of records per second. +/// Setting this configuration to 0 or a negative number will put no limit on the rate. +/// See the [deployment guide](https://spark.apache.org/docs/latest/streaming-programming-guide.html#deploying-applications) in the Spark Streaming programming guide for mode details. +/// +/// Default: `null` +@Since { version = "1.0.2" } +`spark.streaming.receiver.maxRate`: UInt? + +/// Enable write-ahead logs for receivers. +/// +/// All the input data received through receivers will be saved to write-ahead logs that will allow it to be recovered after driver failures. +/// See the [deployment guide](https://spark.apache.org/docs/latest/streaming-programming-guide.html#deploying-applications) in the Spark Streaming programming guide for more details. +/// +/// Default: `false` +@Since { version = "1.2.1" } +`spark.streaming.receiver.writeAheadLog.enable`: Boolean? + +/// Force RDDs generated and persisted by Spark Streaming to be automatically unpersisted from Spark's memory. +/// +/// The raw input data received by Spark Streaming is also automatically cleared. +/// Setting this to false will allow the raw data and persisted RDDs to be accessible outside the streaming application as they will not be cleared automatically. +/// But it comes at the cost of higher memory usage in Spark. +/// +/// Default: `true` +@Since { version = "0.9.0" } +`spark.streaming.unpersist`: Boolean? + +/// If `true`, Spark shuts down the `StreamingContext` gracefully on JVM shutdown rather than immediately. +/// +/// Default: `false` +@Since { version = "1.4.0" } +`spark.streaming.stopGracefullyOnShutdown`: Boolean? + +/// Maximum rate (number of records per second) at which data will be read from each Kafka partition when using the new Kafka direct stream API. +/// +/// See the [Kafka Integration guide](https://spark.apache.org/docs/latest/streaming-kafka-0-10-integration.html) for more details. +/// +/// Default: `null` +@Since { version = "1.3.0" } +`spark.streaming.kafka.maxRatePerPartition`: UInt? + +/// Minimum rate (number of records per second) at which data will be read from each Kafka partition when using the new Kafka direct stream API. +/// +/// Default: `1` +@Since { version = "2.4.0" } +`spark.streaming.kafka.minRatePerPartition`: Int? + +/// Whether to close the file after writing a write-ahead log record on the driver. +/// +/// Set this to 'true' when you want to use S3 (or any file system that does not support flushing) for the metadata WAL on the driver. +/// +/// Default: `false` +@Since { version = "1.6.0" } +`spark.streaming.driver.writeAheadLog.closeFileAfterWrite`: Boolean? + +/// Whether to close the file after writing a write-ahead log record on the receivers. +/// +/// Set this to 'true' when you want to use S3 (or any file system that does not support flushing) for the data WAL on the receivers. +/// +/// Default: `false` +@Since { version = "1.6.0" } +`spark.streaming.receiver.writeAheadLog.closeFileAfterWrite`: Boolean? + +/// Number of threads used by RBackend to handle RPC calls from SparkR package. +/// +/// Default: `2` +@Since { version = "1.4.0" } +`spark.r.numRBackendThreads`: Int? + +/// Executable for executing R scripts in cluster modes for both driver and workers. +/// +/// Default: `"Rscript"` +@Since { version = "1.5.3" } +`spark.r.command`: String? + +/// Executable for executing R scripts in client modes for driver. +/// +/// Ignored in cluster modes. +/// +/// Default: `spark.r.command` +@Since { version = "1.5.3" } +`spark.r.driver.command`: String? + +/// Executable for executing sparkR shell in client modes for driver. +/// +/// Ignored in cluster modes. +/// It is the same as environment variable `SPARKR_DRIVER_R`, but take precedence over it. `spark.r.shell.command` is used for sparkR shell while `spark.r.driver.command` is used for running R script. +/// +/// Default: `"R"` +@Since { version = "2.1.0" } +`spark.r.shell.command`: String? + +/// Connection timeout set by R process on its connection to RBackend in seconds. +/// +/// Default: `6000` +@Since { version = "2.1.0" } +`spark.r.backendConnectionTimeout`: Int? + +/// Interval for heartbeats sent from SparkR backend to R process to prevent connection timeout. +/// +/// Default: `100` +@Since { version = "2.1.0" } +`spark.r.heartBeatInterval`: Int? + +/// Checkpoint interval for graph and message in Pregel. +/// +/// It used to avoid stackOverflowError due to long lineage chains after lots of iterations. +/// The checkpoint is disabled by default. +/// +/// Default: `-1` +@Since { version = "2.2.0" } +`spark.graphx.pregel.checkpointInterval`: Int? + +/// The recovery mode setting to recover submitted Spark jobs with cluster mode when it failed and relaunches. +/// +/// This is only applicable for cluster mode when running with Standalone or Mesos. +/// +/// Default: `null` +@Since { version = "0.8.1" } +`spark.deploy.recoveryMode`: String? + +/// When \`spark.deploy.recoveryMode\` is set to ZOOKEEPER, this configuration is used to set the zookeeper URL to connect to. +/// +/// Default: `null` +@Since { version = "0.8.1" } +`spark.deploy.zookeeper.url`: String? + +/// When \`spark.deploy.recoveryMode\` is set to ZOOKEEPER, this configuration is used to set the zookeeper directory to store recovery state. +/// +/// Default: `null` +@Since { version = "0.8.1" } +`spark.deploy.zookeeper.dir`: String? + +/// Class name of the implementation of `MergedShuffleFileManager` that manages push-based shuffle. +/// +/// This acts as a server side config to disable or enable push-based shuffle. +/// By default, push-based shuffle is disabled at the server side. +/// +/// +/// To enable push-based shuffle on the server side, set this config to `org.apache.spark.network.shuffle.RemoteBlockPushResolver` +/// +/// Default: org.apache.spark.network.shuffle.NoOpMergedShuffleFileManager +@Since { version = "3.2.0" } +`spark.shuffle.push.server.mergedShuffleFileManagerImpl`: String? + +/// The minimum size of a chunk when dividing a merged shuffle file into multiple chunks during push-based shuffle. +/// +/// A merged shuffle file consists of multiple small shuffle blocks. +/// Fetching the complete merged shuffle file in a single disk I/O increases the memory requirements for both the clients and the external shuffle services. +/// Instead, the external shuffle service serves the merged file in `MB-sized chunks`. +/// +/// This configuration controls how big a chunk can get. +/// A corresponding index file for each merged shuffle file will be generated indicating chunk boundaries. +/// +/// +/// Setting this too high would increase the memory requirements on both the clients and the external shuffle service. +/// +/// +/// Setting this too low would increase the overall number of RPC requests to external shuffle service unnecessarily. +/// +/// Default: `2.mib` +@Since { version = "3.2.0" } +`spark.shuffle.push.server.minChunkSizeInMergedShuffleFile`: DataSize? + +/// The maximum size of cache in memory which could be used in push-based shuffle for storing merged index files. +/// +/// This cache is in addition to the one configured via `spark.shuffle.service.index.cache.size`. +/// +/// Default: `100.mib` +@Since { version = "3.2.0" } +`spark.shuffle.push.server.mergedIndexCacheSize`: DataSize? + +/// Set to true to enable push-based shuffle on the client side and works in conjunction with the server side flag `spark.shuffle.push.server.mergedShuffleFileManagerImpl`. +/// +/// Default: `false` +@Since { version = "3.2.0" } +`spark.shuffle.push.enabled`: Boolean? + +/// The amount of time driver waits in seconds, after all mappers have finished for a given shuffle map stage, before it sends merge finalize requests to remote external shuffle services. +/// +/// This gives the external shuffle services extra time to merge blocks. +/// Setting this too long could potentially lead to performance regression. +/// +/// Default: `10.s` +@Since { version = "3.2.0" } +`spark.shuffle.push.finalize.timeout`: Duration? + +/// Maximum number of merger locations cached for push-based shuffle. +/// +/// Currently, merger locations are hosts of external shuffle services responsible for handling pushed blocks, merging them and serving merged blocks for later shuffle fetch. +/// +/// Default: `500` +@Since { version = "3.2.0" } +`spark.shuffle.push.maxRetainedMergerLocations`: Int? + +/// Ratio used to compute the minimum number of shuffle merger locations required for a stage based on the number of partitions for the reducer stage. +/// +/// For example, a reduce stage which has 100 partitions and uses the default value 0.05 requires at least 5 unique merger locations to enable push-based shuffle. +/// +/// Default: `0.05` +@Since { version = "3.2.0" } +`spark.shuffle.push.mergersMinThresholdRatio`: Float? + +/// The static threshold for number of shuffle push merger locations should be available in order to enable push-based shuffle for a stage. +/// +/// Note this config works in conjunction with `spark.shuffle.push.mergersMinThresholdRatio`. +/// Maximum of `spark.shuffle.push.mergersMinStaticThreshold` and `spark.shuffle.push.mergersMinThresholdRatio` ratio number of mergers needed to enable push-based shuffle for a stage. +/// For example: with 1000 partitions for the child stage with spark.shuffle.push.mergersMinStaticThreshold as 5 and spark.shuffle.push.mergersMinThresholdRatio set to 0.05, we would need at least 50 mergers to enable push-based shuffle for that stage. +/// +/// Default: `5` +@Since { version = "3.2.0" } +`spark.shuffle.push.mergersMinStaticThreshold`: Int? + +/// The max size of an individual block to push to the remote external shuffle services. +/// +/// Blocks larger than this threshold are not pushed to be merged remotely. +/// These shuffle blocks will be fetched in the original manner. +/// +/// +/// Setting this too high would result in more blocks to be pushed to remote external shuffle services but those are already efficiently fetched with the existing mechanisms resulting in additional overhead of pushing the large blocks to remote external shuffle services. +/// It is recommended to set `spark.shuffle.push.maxBlockSizeToPush` lesser than `spark.shuffle.push.maxBlockBatchSize` config's value. +/// +/// +/// Setting this too low would result in lesser number of blocks getting merged and directly fetched from mapper external shuffle service results in higher small random reads affecting overall disk I/O performance. +/// +/// Default: `1.mib` +@Since { version = "3.2.0" } +`spark.shuffle.push.maxBlockSizeToPush`: DataSize? + +/// The max size of a batch of shuffle blocks to be grouped into a single push request. +/// +/// Default is set to `3m` in order to keep it slightly higher than `spark.storage.memoryMapThreshold` default which is `2m` as it is very likely that each batch of block gets memory mapped which incurs higher overhead. +/// +/// Default: `3.mib` +@Since { version = "3.2.0" } +`spark.shuffle.push.maxBlockBatchSize`: DataSize? + +/// Driver will wait for merge finalization to complete only if total shuffle data size is more than this threshold. +/// +/// If total shuffle size is less, driver will immediately finalize the shuffle output. +/// +/// Default: `500.mib` +@Since { version = "3.3.0" } +`spark.shuffle.push.minShuffleSizeToWait`: DataSize? + +/// Fraction of minimum map partitions that should be push complete before driver starts shuffle merge finalization during push based shuffle. +/// +/// Default: `1.0` +@Since { version = "3.3.0" } +`spark.shuffle.push.minCompletedPushRatio`: Float? + +typealias ResourceName = String diff --git a/packages/org.apache.spark/PropertiesBase.pkl b/packages/org.apache.spark/PropertiesBase.pkl new file mode 100644 index 0000000..522ee88 --- /dev/null +++ b/packages/org.apache.spark/PropertiesBase.pkl @@ -0,0 +1,42 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +abstract module org.apache.spark.PropertiesBase + +import "pkl:semver" +import "utils.pkl" + +/// The Spark version to use these properties with. +hidden targetSparkVersion: String? + +function since(sparkVersion: String): Boolean = + if (targetSparkVersion == null) true + else + let (parsedVersion = semver.Version(sparkVersion)) + if (!parsedVersion.isGreaterThan(semver.Version(targetSparkVersion))) + throw("This property exists since Spark version \(parsedVersion), but target Spark version is \(targetSparkVersion).") + else + true + +hidden effectiveProperties: Map = + module.toMap().flatMap((key, value) -> utils.convertProperty(key, value)) + +/// Indicates that the annotated property cannot be set by the user. +class Reserved extends Annotation + +output { + value = effectiveProperties + renderer = new PropertiesRenderer {} +} diff --git a/packages/org.apache.spark/tests/PropertiesBase.pkl b/packages/org.apache.spark/tests/PropertiesBase.pkl new file mode 100644 index 0000000..c1d8c81 --- /dev/null +++ b/packages/org.apache.spark/tests/PropertiesBase.pkl @@ -0,0 +1,47 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module org.apache.spark.tests.PropertiesBase + +amends "pkl:test" + +import "../PropertiesBase.pkl" + +local class SampleSparkProperties extends PropertiesBase { + `spark.core.numbers`: Int? + `spark.{driver|executor}.something`: Mapping? + `spark.shards.[0-9].setting.[node].memory`: Mapping>? + `spark.kubernetes.some.type.of.memory`: DataSize? + `spark.some.type.of.duration`: Duration? +} + +examples { + ["rendering properties"] { + new SampleSparkProperties { + `spark.{driver|executor}.something` { + ["driver"] = "some driver setting" + ["executor"] = "some executor setting" + } + `spark.core.numbers` = 5 + `spark.shards.[0-9].setting.[node].memory` { + ["1"] { + ["this-node"] = 5.gib + } + } + `spark.kubernetes.some.type.of.memory` = 10.gib + `spark.some.type.of.duration` = 5.min + }.output.text + } +} diff --git a/packages/org.apache.spark/tests/PropertiesBase.pkl-expected.pcf b/packages/org.apache.spark/tests/PropertiesBase.pkl-expected.pcf new file mode 100644 index 0000000..b925451 --- /dev/null +++ b/packages/org.apache.spark/tests/PropertiesBase.pkl-expected.pcf @@ -0,0 +1,13 @@ +examples { + ["rendering properties"] { + """ + spark.core.numbers = 5 + spark.driver.something = some driver setting + spark.executor.something = some executor setting + spark.shards.1.setting.this-node.memory = 5g + spark.kubernetes.some.type.of.memory = 10Gi + spark.some.type.of.duration = 5min + + """ + } +} diff --git a/packages/org.apache.spark/tests/utils.pkl b/packages/org.apache.spark/tests/utils.pkl new file mode 100644 index 0000000..7cbe96c --- /dev/null +++ b/packages/org.apache.spark/tests/utils.pkl @@ -0,0 +1,56 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module org.apache.spark.tests.utils + +amends "pkl:test" + +import "../utils.pkl" + +examples { + ["convertIndexedProperty"] { + utils.convertIndexedProperty("foo.bar.[baz].biz.[buz]", new Mapping { + ["1"] = new Mapping { + ["11"] = "eleven" + ["12"] = "twelve" + } + ["2"] = new Mapping { + ["22"] = "two" + } + }) + utils.convertIndexedProperty("foo.bar.[baz].biz", new Mapping { + ["1"] = 1 + ["2"] = 2 + }) + } + ["flattenMapKeys"] { + utils.flattenMapKeys(new Mapping { + ["1"] = new Mapping { + ["11"] = "eleven" + ["12"] = "twelve" + } + ["2"] = new Mapping { + ["22"] = "two" + } + }, List()) + } + ["convertProperty"] { + utils.convertProperty("foo.bar.baz", 1) + utils.convertProperty("foo.env.[baz]", new Mapping { ["FOO"] = "foo"; ["BAR"] = "bar" }) + utils.convertProperty("foo.env.[baz].buz.{baz}", new Mapping { ["FOO"] = new Mapping { ["BAR"] = "bar" } }) + utils.convertProperty("spark.kubernetes.baz", 5.gib) + utils.convertProperty("spark.memory", 5.gib) + } +} diff --git a/packages/org.apache.spark/tests/utils.pkl-expected.pcf b/packages/org.apache.spark/tests/utils.pkl-expected.pcf new file mode 100644 index 0000000..7a65ca3 --- /dev/null +++ b/packages/org.apache.spark/tests/utils.pkl-expected.pcf @@ -0,0 +1,16 @@ +examples { + ["convertIndexedProperty"] { + Map("foo.bar.1.biz.11", "eleven", "foo.bar.1.biz.12", "twelve", "foo.bar.2.biz.22", "two") + Map("foo.bar.1.biz", 1, "foo.bar.2.biz", 2) + } + ["flattenMapKeys"] { + Map(List(".1", ".11"), "eleven", List(".1", ".12"), "twelve", List(".2", ".22"), "two") + } + ["convertProperty"] { + Map("foo.bar.baz", 1) + Map("foo.env.FOO", "foo", "foo.env.BAR", "bar") + Map("foo.env.FOO.buz.BAR", "bar") + Map("spark.kubernetes.baz", "5Gi") + Map("spark.memory", "5g") + } +} diff --git a/packages/org.apache.spark/utils.pkl b/packages/org.apache.spark/utils.pkl new file mode 100644 index 0000000..8b92457 --- /dev/null +++ b/packages/org.apache.spark/utils.pkl @@ -0,0 +1,104 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module org.apache.spark.utils + +/// Match indexed positions in a property. +/// +/// For example matches `.[0-9]` or `.[Foo]` or `.{Foo}` +// language=regexp +indexRegex: Regex = Regex(#""" + (?x) # turn on extended mode + \. # '.' literal + (?: + \{ # '{' literal + ([^}]+) # capture: any character except for '}' + } # followed by '}' + | # OR + \[ # '[' literal + ([^]]+) # capture: any character except for ']' + ] # folowed by ']' + ) + """#) + +// https://spark.apache.org/docs/latest/configuration.html#spark-properties +function convertDuration(dur: Duration): String = + if (dur.unit == "ns" || dur.unit == "us") convertDuration(dur.toUnit("ms")) + else "\(dur.value)\(dur.unit)" + +function convertValue(propName: String, propValue: Any): Any = + if (propValue is Duration) + convertDuration(propValue) + else if (propValue is DataSize) + if (propName.contains(".kubernetes.")) + // taken from io.k8s.K8sObject + let (unit = propValue.unit) + let (k8sUnit = + if (unit.length == 3) unit[0].toUpperCase() + unit[1] + else if (unit.length == 2) unit[0].toUpperCase() + else "" + ) + "\(propValue.value)\(k8sUnit)" + else + // https://spark.apache.org/docs/latest/configuration.html#spark-properties + let (bin = propValue.toBinaryUnit()) "\(bin.value)\(bin.unit.take(1))" + else propValue + +/// Converts a property to its expanded set of properties. +function convertProperty(propName: String, propValue: Any): Map = + if (propValue is Mapping) + convertIndexedProperty(propName, propValue) + else + Map(propName, convertValue(propName, propValue)) + +/// Flatten a deeply nested Mapping into a Map where the keys becomes a list +/// +/// For example: +/// +/// new Mapping { ["foo"] = new Mapping { ["bar"] = 100 } } +/// +/// Becomes: +/// +/// Map(List(".foo", ".bar"), 100) +/// +function flattenMapKeys(value: Mapping, path: List): Map, unknown> = + value + .toMap() + .flatMap((key, _value) -> + if (_value is Mapping) flattenMapKeys(_value, path.add("." + key)) + else Map(path.add("." + key), _value) + ) + +/// Performs substituting of indexed positions with the corresponding Mapping keys. +/// +/// Given prop `foo.bar.[baz].biz.[buz]` and value `new { [1] { [11] = "one" }; [2] { [22] = "two" } }`, +/// produces: +/// +/// Map( +/// "foo.bar.1.biz.11", "one", +/// "foo.bar.2.biz.22", "two" +/// ) +function convertIndexedProperty(propName: String, propValue: Mapping): Map = + let (propNameParts = propName.split(indexRegex)) + let (flattenedMap = flattenMapKeys(propValue, List())) + flattenedMap.map((key, value) -> + let (convertedProperty = propNameParts + .zip(key) + .flatMap((pair) -> List(pair.first, pair.second)) + .join("") + + if (propNameParts.length > key.length) propNameParts.last else "" + ) + Pair(convertedProperty, convertValue(convertedProperty, value)) + ) diff --git a/packages/org.openapis.v3/PklProject b/packages/org.openapis.v3/PklProject new file mode 100644 index 0000000..7f4d069 --- /dev/null +++ b/packages/org.openapis.v3/PklProject @@ -0,0 +1,21 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Modules for describing [OpenAPI](https://www.openapis.org/) v3.x +amends "../basePklProject.pkl" + +package { + version = "1.0.0" +} diff --git a/packages/org.openapis.v3/PklProject.deps.json b/packages/org.openapis.v3/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/org.openapis.v3/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/org.openapis.v3/Reference.pkl b/packages/org.openapis.v3/Reference.pkl new file mode 100644 index 0000000..e503f3a --- /dev/null +++ b/packages/org.openapis.v3/Reference.pkl @@ -0,0 +1,29 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// A reference to other components in the specification, internally and externally. +/// +/// The Reference Object is defined by [JSON Reference](https://datatracker.ietf.org/doc/html/draft-pbryan-zyp-json-ref-03) +/// and follows the same structure, behavior and rules. +/// +/// For this specification, reference resolution is accomplished as defined by the JSON Reference specification +/// and not by the JSON Schema specification. +/// +/// https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.3.md#reference-object +@ModuleInfo { minPklVersion = "0.25.0" } +module org.openapis.v3.Reference + +/// The reference URI +`$ref`: Uri diff --git a/packages/org.openapis.v3/Schema.pkl b/packages/org.openapis.v3/Schema.pkl new file mode 100644 index 0000000..0954664 --- /dev/null +++ b/packages/org.openapis.v3/Schema.pkl @@ -0,0 +1,383 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// The Schema object as defined by the OpenAPI v3.0 Specification. +/// +/// The [Schema] object allows the definition of input and output data types. +/// These types can be objects, but also primitives and arrays. +/// This object is an extended subset of +/// [JSON Schema Specification Wright Draft 00](http://json-schema.org/specification-links.html#draft-5) +/// (a.k.a. JSON Schema Draft 5). +/// +/// For more information about the properties, see +/// [JSON Schema Core](https://tools.ietf.org/html/draft-wright-json-schema-00) and +/// [JSON Schema Validation](https://tools.ietf.org/html/draft-wright-json-schema-validation-00). +/// +/// **NOTE**: OpenAPI v3.0's schema object is incompatible with OpenAPI v3.1's schema object. +/// For example, [nullable] is valid on v3.0, but is invalid in v3.1. +/// +/// +@ModuleInfo { minPklVersion = "0.25.0" } +open module org.openapis.v3.Schema + +import "Schema.pkl" +import "Reference.pkl" + +/// The basic type of the value represented by this schema. +/// +/// If this property is not defined, the value may be of any type. +type: ("string"|"number"|"integer"|"boolean"|"object"|"array")? + +/// An additional descriptor for the value represented by this schema. +/// +/// OAS uses several known formats to define in fine detail the data type being used. +/// +/// However, to support documentation needs, the [format] property is an open `string`-valued property, and can have any value. +/// Formats such as `"email"`, `"uuid"`, and so on, *may* be used even though undefined by this specification. +/// Types that are not accompanied by a `format` property follow the type definition in the JSON Schema. Tools that do +/// not recognize a specific [format] *may* default to the [type] alone, as if the [format] is not specified. +/// +/// The formats defined by the OAS are: +/// +/// Common Name | [type] | [format] | Comments +/// ----------- | ------ | -------- | -------- +/// integer | `integer` | `int32` | signed 32 bits +/// long | `integer` | `int64` | signed 64 bits +/// float | `number` | `float` | | +/// double | `number` | `double` | | +/// string | `string` | | | +/// byte | `string` | `byte` | base64 encoded characters +/// binary | `string` | `binary` | any sequence of octets +/// boolean | `boolean` | | | +/// date | `string` | `date` | As defined by `full-date` - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) +/// dateTime | `string` | `date-time` | As defined by `date-time` - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) +/// password | `string` | `password` | A hint to UIs to obscure input. +/// +/// +format: ("int32"|"int64"|"float"|"double"|"byte"|"binary"|"date"|"date-time"|"password"|String)? + +/// A short descriptor of this schema. +/// +/// +title: String? + +/// A long descriptor of this schema. Maybe written in Markdown. +/// +/// +description: String? + +/// Specifies a default value. +/// +/// This value is not used to fill in missing values during the validation process. +/// Non-validation tools such as documentation generators or form +/// generators may use this value to give hints to users about how to use +/// a value. However, [default] is typically used to express that if a +/// value is missing, then the value is semantically the same as if the +/// value was present with the default value. The value of [default] +/// should validate against the schema in which it resides, but that isn't +/// required. +/// +/// +default: Any? + +/// Restricts the value specified by this schema to a fixed set of values. +/// +/// It must be an array with at least one element, where each element is unique. +/// +/// You can use enum even without a type, to accept values of different types. +/// +/// Elements in the array might be of any type, including [null]. +/// +enum: Listing(!isEmpty && isDistinct)? + +// === Numeric type validators === + +/// Restricts to a number that is a multiple of this value. +/// +/// It may be set to any positive number. +/// +/// +multipleOf: Number(type is ("number"|"integer") && isPositive)? + +/// Represent a number that is greater or equal to this value. +/// +/// +minimum: Number(type is ("number"|"integer"))? + +/// Represent a number that is greater than this value. +/// +/// +exclusiveMinimum: Number(type is ("number"|"integer"))? + +/// Represent a number that is less than or equal to this value. +/// +/// +maximum: Number(type is ("number"|"integer"))? + +/// Represent a number that is less than this value. +/// +/// +exclusiveMaximum: Number(type is ("number"|"integer"))? + +// === String validators === + +/// Represent a string that adheres to a regex pattern. +/// +/// The regular expression must conform to [ECMA 262](https://262.ecma-international.org/5.1/#sec-7.8.5). +/// +/// +pattern: String(isRegex && type == "string")? + +/// Represent a string that has a minimum length. +/// +/// +minLength: UInt(type == "string")? + +/// Represent a string that has a maximum length. +/// +/// +maxLength: UInt(type == "string")? + +// == Array validators == + +/// Represent that each item in an array must conform to the specified schema. +/// +/// If the type is array, [items] must be specified. +items: (Schema|Reference)(type == "array")? + +/// Represent an array that has a minumum length. +/// +/// +minItems: UInt(type == "array")? + +/// Represent an array that has a maximum length. +/// +/// +maxItems: UInt(type == "array")? + +/// Represent an array where each item is unique. +/// +/// +uniqueItems: Boolean? + +// == Objects == + +/// Represent an object that must have at minimum a certain number of properties. +/// +/// +minProperties: UInt(type == "object")? + +/// Represent an object that must have at maximum a certain number of properties. +maxProperties: UInt(type == "object")? + +/// Represent an object that have properties that conform to a +/// certain schema. +/// +/// +properties: Mapping(type == "object")? + +/// Represent an object that has additional properties. +/// +/// The value of [additionalProperties] is a schema that +/// will be used to validate any properties in the instance that are not +/// matched by [properties]. Setting the +/// [additionalProperties] schema to [false] means no additional +/// properties will be allowed. +/// +/// +additionalProperties: (Boolean|PropertySchema|Reference)(type == "object")? + +/// Represent an object that has certain properties defined on it. +/// +/// By default, no properties are required. +/// +/// +required: Listing(type == "object", isDistinct)? + +// == Composition == + +/// Represent a value that must match against **exactly** one of the subschemas. +/// +/// +oneOf: Listing(length > 0)? + +/// Represent a value that must match ay least one of the subschemas. +/// +/// +anyOf: Listing(length > 0)? + +/// Represent a value that must match **all** of the subschemas. +/// +/// +allOf: Listing(length > 0)? + +/// Represent a value that must not match the given schema. +/// +/// +not: (Schema|Reference)? + +// == OpenAPI v3.0 specific fields == + +/// Represent that the value may optionally be [null]. +/// +/// Default value is [false]. +/// +/// +nullable: Boolean? + +/// Helps inform of alternative schemas. +/// +/// When request bodies or response payloads may be one of a number of different +/// schemas, a discriminator object can be used to aid in serialization, +/// deserialization, and validation. The discriminator is a specific object in +/// a schema which is used to inform the consumer of the specification of an +/// alternative schema based on the value associated with it. +/// +/// When using the discriminator, inline schemas will not be considered. +/// +/// +discriminator: Discriminator? + +/// Declares the property as "read only". +/// +/// Relevant only for Schema [properties] definitions. This means that +/// it *may* be sent as part of a response but *should not* be sent as part +/// of the request. If the property is marked as [readOnly] and is in the +/// [required] list, [required] only effects the response. A property +/// *may not* be marked as both [readOnly] and [writeOnly]. +/// +/// Default value is [false]. +/// +/// +readOnly: Boolean? + +/// Declares the property as "write only". +/// +/// This is relevant only for Schema [properties] definitions. Therefore, it +/// it *may* be sent as part of a response but *should not* be sent as part +/// If the property is marked as [writeOnly] being [true] and is in the [required] list, +/// the [required] will take effect on the request only. A property *may not* be marked +/// as both [readOnly] and [writeOnly] being [true]. +/// +/// Default value is [false]. +/// +/// +writeOnly: Boolean(implies(readOnly != true))? + +/// Additional external documentation for this schema. +externalDocs: ExternalDocumentation? + +/// Indicates that this property is deprecated. +/// +/// Default value is [false]. +/// +/// +deprecated: Boolean? + +/// A free-form property to include an example of an instance for this schema. +/// +/// To represent examples that cannot be naturally represented in JSON or YAML, a string +/// value can be used to contain the example with escaping where necessary. +example: Any? + +/// Helps inform of alternative schemas. +/// +/// When request bodies or response payloads may be one of a number of different +/// schemas, a discriminator object can be used to aid in serialization, +/// deserialization, and validation. The discriminator is a specific object in +/// a schema which is used to inform the consumer of the specification of an +/// alternative schema based on the value associated with it. +/// +/// When using the discriminator, inline schemas will not be considered. +/// +/// +class Discriminator { + /// The name of the property in the payload that holds the discriminator value. + propertyName: String + + /// A mapping from payload values to schema names or references. + mapping: Mapping? +} + +/// Reference to an external resource for extended documentation. +/// +/// +class ExternalDocumentation { + /// A short description of the target documentation. + /// + /// [CommonMark syntax](https://spec.commonmark.org) *may* be used for rich text representation. + description: String? + + /// The URL for the target documentation. + /// + /// Value *must* be in the format of a URL. + uri: Uri +} + +/// A metadata object that allows for more fine-tuned XML model definitions. +/// +/// When using arrays, XML element names are not inferred (for singular/plural +/// forms) and the [name] property **should** be used to add that information. +/// +/// +class Xml { + /// Replaces the name of the element/attribute used for the described schema property. + /// + /// When defined within [items], it affects the name of the individual XML elements within the list. + /// When defined alongside [type] being `array` (outside the [items]), it affects the wrapping element + /// and only if [wrapped] is [true]. If [wrapped] is [false], it is ignored. + name: String? + + /// The URI of the namespace definition. Value MUST be in the form of an absolute URI. + namespace: Uri? + + /// The prefix used for the [name]. + prefix: String? + + /// Declares whether the property definition translates to an attribute instead of an element. + /// + /// Default value is [false]. + attribute: Boolean? + + /// Signifies whether the array is wrapped (for example, ``) + /// or unwrapped (``). + /// + /// *May* be used only for an array definition. Default value is [false]. + /// + /// The definition takes effect only when defined alongside [type] being `array` (outside the items). + wrapped: Boolean? +} + +/// Property schemas are [Schema]s that optionally include XML metadata. +class PropertySchema extends Schema { + /// Adds additional metadata to describe the XML representation of this property. + /// + /// This *may* be used only on properties schemas. It has no effect on root schemas. + xml: Xml? +} + +hidden renderers: Mapping = new { + ["json"] = new JsonRenderer {} + ["yaml"] = new YamlRenderer {} + ["pcf"] = new PcfRenderer {} +} + +output { + // It's pretty common to have OpenAPI written in YAML. Therefore, we support the YAML format too. + renderer = let (format = read?("prop:pkl.outputFormat") ?? "json") + if (renderers.containsKey(format)) + renderers[format] + else throw("Unsupported output format: `\(format)`. Supported formats are `json`, `yaml` and `pcf`.") +} diff --git a/packages/org.openapis.v3/SchemaGenerator.pkl b/packages/org.openapis.v3/SchemaGenerator.pkl new file mode 100644 index 0000000..13aaca1 --- /dev/null +++ b/packages/org.openapis.v3/SchemaGenerator.pkl @@ -0,0 +1,299 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Enables the generation of a [Schema] from a Pkl module, class, or typealias. +@ModuleInfo { minPklVersion = "0.25.0" } +module org.openapis.v3.SchemaGenerator + +import "pkl:reflect" +import "pkl:math" +import "Schema.pkl" + +/// Given either a module, class, or typealias, generates the equivalent OpenAPI v3.0 [Schema]. +/// +/// Example: +/// ``` +/// import "package://pkg.pkl-lang.org/pkl-pantry/org.openapis.v3@1.0.0#/SchemaGenerator.pkl" +/// +/// class Person { +/// /// The person's legal name. +/// name: String? +/// } +/// +/// personSchema = SchemaGenerator.generate(Person) +/// ``` +/// +/// `personSchema` generates a schema that looks like so: +/// +/// ```json +/// { +/// "type": "object", +/// "title": "Person", +/// "properties": { +/// "name": { +/// "type": "string", +/// "description": "The person's legal name.", +/// "nullable": true +/// } +/// }, +/// "additionalProperties": false, +/// "required": [] +/// } +/// ``` +/// +/// Known limitations: +/// * This generator cannot generate schemas that contain recursive descriptors, +/// because a single [Schema] lacks the ability to reuse definitions. +/// * [Type Constraints](https://pkl-lang.org/main/current/language-reference/index.html#type-constraints) +/// do not get encoded into the [Schema]. The only exceptions are well-known type constraints for +/// numbers (for example, [Int8] and [UInt8]). +function generate(value: Module|Class|TypeAlias): Schema = + let (ref = if (value is Module) + reflect.Module(value) + else if (value is TypeAlias) + reflect.TypeAlias(value) + else + reflect.Class(value) + ) + convertDeclaration(ref, Set()) + +/// Customize how the generator produces JSON Schema. This is useful if a Pkl type is expected +/// to map to a (non-standard) JSON Schema type. +/// +/// To use converters, a new instance of [SchemaGenerator] must be initialized. +/// +/// Example: +/// ``` +/// import "package://pkg.pkl-lang.org/pkl-pantry/org.openapis.v3@1.0.0#/SchemaGenerator.pkl" +/// +/// k8sGenerator = new SchemaGenerator { +/// converters { +/// [DataSize] { +/// type = "string" +/// description = "A Kubernetes data size representation. For instance, 5Mi" +/// pattern = "^\d+(?:[EPTGMK]i?)?$" +/// } +/// } +/// } +/// ``` +hidden converters: Mapping + +/// Converts a [reflect.Declaration] into the equivalent [Schema]. +local function convertDeclaration(declaration: reflect.Declaration, seenClasses: Set): Schema = + if (declaration is reflect.Class) + convertType(reflect.DeclaredType(declaration), seenClasses) + else if (declaration is reflect.Module) + (convertDeclaration(declaration.moduleClass, seenClasses)) { + // We need to amend title because it would otherwise be `"ModuleClass"`. + title = declaration.name + } + else if (declaration is reflect.Property) + convertType(declaration.type, seenClasses) |> addBaseSchema(declaration) + else + convertType((declaration as reflect.TypeAlias).referent, seenClasses) |> addBaseSchema(declaration) + +/// Adds common base values to a [Schema]. +local function addBaseSchema(declaration: reflect.Declaration): Mixin = new { + description = declaration.docComment + // If this is a `Property`, the name is the name of the property, which is not a good + // indication of the underlying type. Therefore, we only apply the title when the + // declaration is not a property. + when (!(declaration is reflect.Property)) { + title = declaration.name + } + when (!declaration.annotations.filter((a) -> a is Deprecated).isEmpty) { + deprecated = true + } +} + +local function convertDeclaredType(typ: reflect.DeclaredType, seenClasses: Set): Schema = + let (reflectee = typ.referent.reflectee) + if (reflectee == Any || typ == reflect.unknownType) + new {} + else if (reflectee == Int) + new { + type = "integer" + } + else if (reflectee == Int8) + new { + type = "integer" + minimum = math.minInt8 + maximum = math.maxInt8 + } + else if (reflectee == Int16) + new { + type = "integer" + minimum = math.minInt16 + maximum = math.maxInt16 + } + else if (reflectee == Int32) + new { + type = "integer" + format = "int32" + } + else if (reflectee == UInt8) + new { + type = "integer" + minimum = 0 + maximum = math.maxUInt8 + } + else if (reflectee == UInt16) + new { + type = "integer" + minimum = 0 + maximum = math.maxUInt16 + } + else if (reflectee == UInt32) + new { + type = "integer" + minimum = 0 + maximum = math.maxUInt32 + } + else if (reflectee == Float) + new { + type = "number" + format = "float" + } + else if (reflectee == Dynamic) + new { + oneOf { + new { + type = "array" + } + new { + type = "object" + } + } + } + else if (reflectee == Boolean) + new { + type = "boolean" + } + else if (reflectee == Null) + new { + enum { + null + } + } + else if (reflectee == Number) + new { + type = "number" + } + else if (reflectee == String) + new { + type = "string" + } + else if (typ == reflect.nothingType) + // This type is `NOT ANY`, ergo, nothing. + new { + not {} + } + else if (reflectee == Listing || reflectee == List) + let (elementType = typ.typeArguments[0]) + new { + type = "array" + items = convertType(elementType, seenClasses) + } + else if (reflectee == Mapping || reflectee == Map) + let (keyType = typ.typeArguments[0]) + let (valueType = typ.typeArguments[1]) + if (keyType != reflect.stringType) + throw("Invalid schema: Mappings can only be a string type when converting to JSON schema. Received: \(keyType)") + else + new { + type = "object" + additionalProperties = convertTypeForProperty(valueType, seenClasses) + } + else if (reflectee is Class) + if (seenClasses.findOrNull((c) -> c == typ.referent.reflectee) != null) + throw("Invalid Schema: Unable to convert a schema that refers to itself. This is because OpenAPI v3.0 does not have a way to provide definitions. Recurring class: \(typ.referent.reflectee)") + else + converters.getOrNull(typ.referent.reflectee) ?? + let (reflectedClass = reflect.Class(reflectee)) + let (_properties = getProperties(reflectedClass)) + new Schema { + type = "object" + properties { + for (_, property in _properties) { + [property.name] = + convertDeclarationForProperty(property, seenClasses.add(reflectee)) + } + } + additionalProperties = false + required { + for (_, property in _properties) { + when (property.type.nullable != property.type) { + property.name + } + } + } + } |> addBaseSchema(reflectedClass) + else + convertDeclaration(reflect.TypeAlias(reflectee), seenClasses) + +/// Given a `reflect.Type`, produce a matching [Schema]. +local function convertType(typ: reflect.Type, seenClasses: Set): Schema = + if (typ is reflect.NullableType) + (convertType(typ.member, seenClasses)) { + nullable = true + } + else if (typ is reflect.UnionType) + new { + oneOf { + for (member in typ.members) { + convertType(member, seenClasses) + } + } + } + else if (typ is reflect.StringLiteralType) + new { + enum { + typ.value + } + } + else if (typ is reflect.DeclaredType) + convertDeclaredType(typ, seenClasses) + else throw("Unsure how to cast this type: \(typ)") + +/// Same as [convertType], except casts results to [Schema.PropertySchema] +local function convertTypeForProperty(typ: reflect.Type, seenClasses: Set): Schema.PropertySchema = + convertType(typ, seenClasses).toDynamic().toTyped(Schema.PropertySchema) + +/// Same as [convertDeclaration], except casts results to [Schema.PropertySchema]. +local function convertDeclarationForProperty(declaration: reflect.Declaration, seenClasses: Set): Schema.PropertySchema = + convertDeclaration(declaration, seenClasses).toDynamic().toTyped(Schema.PropertySchema) + +/// Given a class, return all of its properties, including properties of all inherited classes. +/// +/// Excludes properties that are functions, and any hidden properties. Also, excludes properties on [Module]. +/// +/// **NOTE**: JSON Schema has a way to combine schemas together using `allOf`. At first glance, this might +/// seem like a good way to model inheritance; the schema is all of the child properties and the parent +/// properties. However, this mechanism breaks if a child class overrides the property of a parent class. +/// If an overridden property is mutually exclusive with the parent property, `allOf` would produce an +/// invalid JSON schema because it is impossible for any object to match all of the specified constraints. +local function getProperties(clazz: reflect.Class): Map = + // Edge case: Modules implicitly inherit the `Module` class. We do not want to capture this as part of the + // schema. + if (clazz.reflectee == Module) + Map() + else + let (properties = clazz.properties + .filter((_, prop) -> !(prop.type is reflect.FunctionType)) + .filter((_, prop) -> !prop.modifiers.contains("hidden"))) + if (clazz.superclass == clazz || clazz.superclass == null) + properties + else + getProperties(clazz.superclass!!) + properties diff --git a/packages/org.openapis.v3/examples/SwallowSchema.pkl b/packages/org.openapis.v3/examples/SwallowSchema.pkl new file mode 100644 index 0000000..66a8ad0 --- /dev/null +++ b/packages/org.openapis.v3/examples/SwallowSchema.pkl @@ -0,0 +1,100 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module org.openapis.v3_0.examples.SwallowSchema + +import "../SchemaGenerator.pkl" + +/// A friend of a Swallow. +class SwallowFriend { + /// The name of the friend. + name: String + + /// Is this friend also a Swallow? + isSwallow: Boolean + + /// Any extra attributes for the friend. + tags: Mapping +} + +/// This is a Swallow +class Swallow { + + /// Does this Swallow have a gulp? + hasGulp: Boolean + + /// One of the variants for this swallow. + variants: Listing<"African"|"European"> + + /// What is the air speed of this swallow? + airSpeed: Number? + + /// Is this Swallow a Duck? + /// + /// This is deprecated because no swallows are ever ducks. + @Deprecated + isDuck: Boolean? + + /// How old is this Swallow? + age: UInt8 + + /// What are the nicknames? + nicknames: Listing + + /// Who is the best friend of this Swallow? + bestFriend: SwallowFriend + + /// Who are the friends of this Swallow? + friends: Listing + + /// Any extra attributes. + tags: Mapping + + /// The typical size of a flight path of this Swallow. + typicalFlightPathSize: DataSize + + /// This is a list. It really should not be used, but sometimes people make mistakes. Regardless, + /// it should turn into an array type. + list: List + + /// Swoop! + /// + /// This should not appear in the schema output. + swoop: (String) -> String = (input) -> "SWOOP \(input)" + + /// Swoopwoop! + /// + /// This should also not appear in the schema output. + function swoopwoop() = "swoopwoop!" + + /// soar! + /// + /// Don't show me in the schema because I don't show up in any rendered output. + hidden soar: Boolean? +} + +generator = new SchemaGenerator { + converters { + [DataSize] { + type = "string" + description = "A data size description" + } + } +} + +output { + value = generator.generate(Swallow) + renderer = new JsonRenderer {} +} diff --git a/packages/org.openapis.v3/tests/SchemaGenerator.pkl b/packages/org.openapis.v3/tests/SchemaGenerator.pkl new file mode 100644 index 0000000..7d3b355 --- /dev/null +++ b/packages/org.openapis.v3/tests/SchemaGenerator.pkl @@ -0,0 +1,72 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module org.openapis.v3.tests.SchemaGenerator + +amends "pkl:test" + +import "fixtures/SelfReference.pkl" +import "../examples/SwallowSchema.pkl" +import "../SchemaGenerator.pkl" +import "../Schema.pkl" + +local schema = SchemaGenerator.generate(SwallowSchema.Swallow) + +facts { + ["recursive references throw an error"] { + module.catch(() -> SchemaGenerator.generate(SelfReference).output.text) + .startsWith("Invalid Schema: Unable to convert a schema that refers to itself.") + } + ["nullable values show up as nullable"] { + (schema.properties!!["airSpeed"] as Schema).nullable == true + } + ["deprecated fields show up as deprecated"] { + (schema.properties!!["isDuck"] as Schema).deprecated == true + } + ["generates basic metadata"] { + schema.title == "Swallow" + schema.description == "This is a Swallow" + } + ["listings are encoded as arrays"] { + local nicknameSchema = schema.properties!!["nicknames"] as Schema + nicknameSchema.type == "array" + nicknameSchema.items.type == "string" + } + ["mappings are encoded as objects"] { + local tagsSchema = schema.properties!!["tags"] as Schema + tagsSchema.type == "object" + tagsSchema.additionalProperties.type == "string" + } + ["classes are encoded as objects"] { + local friendSchema = schema.properties!!["bestFriend"] as Schema + friendSchema.type == "object" + friendSchema.properties.toMap().keys == Set("name", "isSwallow", "tags") + } +} + +examples { + ["converts a module"] { + let (schema = new SchemaGenerator { + converters { + [DataSize] { + title = "DataSize" + type = "string" + description = "The size of data in [quantity][unit] representation." + } + } + }.generate(SwallowSchema.Swallow)) + new JsonRenderer {}.renderDocument(schema) + } +} diff --git a/packages/org.openapis.v3/tests/SchemaGenerator.pkl-expected.pcf b/packages/org.openapis.v3/tests/SchemaGenerator.pkl-expected.pcf new file mode 100644 index 0000000..37ff6b8 --- /dev/null +++ b/packages/org.openapis.v3/tests/SchemaGenerator.pkl-expected.pcf @@ -0,0 +1,151 @@ +examples { + ["converts a module"] { + """ + { + "type": "object", + "title": "Swallow", + "description": "This is a Swallow", + "properties": { + "hasGulp": { + "type": "boolean", + "description": "Does this Swallow have a gulp?" + }, + "variants": { + "type": "array", + "description": "One of the variants for this swallow.", + "items": { + "oneOf": [ + { + "enum": [ + "African" + ] + }, + { + "enum": [ + "European" + ] + } + ] + } + }, + "airSpeed": { + "type": "number", + "description": "What is the air speed of this swallow?", + "nullable": true + }, + "isDuck": { + "type": "boolean", + "description": "Is this Swallow a Duck?\\n\\nThis is deprecated because no swallows are ever ducks.", + "nullable": true, + "deprecated": true + }, + "age": { + "type": "integer", + "description": "How old is this Swallow?", + "minimum": 0, + "maximum": 255 + }, + "nicknames": { + "type": "array", + "description": "What are the nicknames?", + "items": { + "type": "string" + } + }, + "bestFriend": { + "type": "object", + "title": "SwallowFriend", + "description": "Who is the best friend of this Swallow?", + "properties": { + "name": { + "type": "string", + "description": "The name of the friend." + }, + "isSwallow": { + "type": "boolean", + "description": "Is this friend also a Swallow?" + }, + "tags": { + "type": "object", + "description": "Any extra attributes for the friend.", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false, + "required": [ + "name", + "isSwallow", + "tags" + ] + }, + "friends": { + "type": "array", + "description": "Who are the friends of this Swallow?", + "items": { + "type": "object", + "title": "SwallowFriend", + "description": "A friend of a Swallow.", + "properties": { + "name": { + "type": "string", + "description": "The name of the friend." + }, + "isSwallow": { + "type": "boolean", + "description": "Is this friend also a Swallow?" + }, + "tags": { + "type": "object", + "description": "Any extra attributes for the friend.", + "additionalProperties": { + "type": "string" + } + } + }, + "additionalProperties": false, + "required": [ + "name", + "isSwallow", + "tags" + ] + } + }, + "tags": { + "type": "object", + "description": "Any extra attributes.", + "additionalProperties": { + "type": "string" + } + }, + "typicalFlightPathSize": { + "type": "string", + "title": "DataSize", + "description": "The typical size of a flight path of this Swallow." + }, + "list": { + "type": "array", + "description": "This is a list. It really should not be used, but sometimes people make mistakes. Regardless,\\nit should turn into an array type.", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false, + "required": [ + "hasGulp", + "variants", + "age", + "nicknames", + "bestFriend", + "friends", + "tags", + "typicalFlightPathSize", + "list" + ] + } + + """ + } +} diff --git a/packages/org.openapis.v3/tests/fixtures/SelfReference.pkl b/packages/org.openapis.v3/tests/fixtures/SelfReference.pkl new file mode 100644 index 0000000..21dbf5a --- /dev/null +++ b/packages/org.openapis.v3/tests/fixtures/SelfReference.pkl @@ -0,0 +1,20 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module SelfReference + +import "SelfReference.pkl" + +myself: SelfReference diff --git a/packages/pkl.csv/PklProject b/packages/pkl.csv/PklProject new file mode 100644 index 0000000..b7c00e3 --- /dev/null +++ b/packages/pkl.csv/PklProject @@ -0,0 +1,21 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// A [RFC4180](https://www.ietf.org/rfc/rfc4180.txt) compliant CSV [Renderer] and [Parser]. +amends "../basePklProject.pkl" + +package { + version = "1.0.0" +} diff --git a/packages/pkl.csv/PklProject.deps.json b/packages/pkl.csv/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/pkl.csv/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/pkl.csv/csv.pkl b/packages/pkl.csv/csv.pkl new file mode 100644 index 0000000..106b71a --- /dev/null +++ b/packages/pkl.csv/csv.pkl @@ -0,0 +1,268 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// A renderer for Comma Separated Values (CSV) files, following [RFC 4180](https://www.ietf.org/rfc/rfc4180.txt). +/// +/// Basic usage: +/// ``` +/// import "package:#/csv.pkl" +/// +/// output { +/// renderer = new csv.Renderer {} +/// } +/// ``` +@ModuleInfo { minPklVersion = "0.25.0" } +module pkl.csv.csv + +import "pkl:reflect" + +typealias Value = Null|Number|String|Boolean + +typealias ListLike = List|Listing|Dynamic(toMap().isEmpty) + +const local listLikeDescription = "`List`s, `Listing`s, or `Dynamic`s with only elements" + +const local function mapOf(thing): Map = + if (thing is Map) thing else thing?.toMap() ?? Map() + +/// Renders values as CSV. +class Renderer extends ValueRenderer { + /// Value converters to apply before values are rendered. + /// + /// For further information see [PcfRenderer.converters]. + converters: Mapping Any> + + function renderValue(value: Any) = + if (value is Null|Number|String|Boolean) + new Mapping { + [Null] = "" + [String] = + if (value.contains(charactersToWrap)) + #""\#(value.replaceAll("\"", "\"\""))""# + else + value as String + }.getOrNull(value.getClass()) ?? "\(value)" + else + throw("The CSV renderer only supports primitive values in `renderValue`.") + +function renderDocument(value: Any) = + let (table = + if (value is ListLike?) + (value as ListLike? ?? List()).toList() + else + throw("Only \(listLikeDescription) values can be rendered as CSV. Instead, found a \(value.getClass()).\n\nValue:\n\(value)") + ) + let (violations = table.filter((it) -> !(if (table.firstOrNull is Value) it is Value else it is Typed|Dynamic|Mapping|Map))) + let (headerKeys = + if (unification == "pad") + table.fold(Set(), (acc: Set, row) -> acc + row.toMap().keys) + else + mapOf(table.firstOrNull).keys + ) + new Listing { + when (!violations.isEmpty) { + throw("The CSV renderer only supports rows consisting of primitive values, or of type `Typed|Dynamic|Mapping`.\nValue: \(violations.first)") + } + when (includeHeader) { + headerKeys.map((it) -> renderValue(it)).join(",") + } + for (row in table) { + new Listing { + when (unification == "error" && mapOf(row).keys != headerKeys) { + throw("Invalid input: CSV can only render rows with all the same properties. Expecting keys: \(headerKeys.join(",")). Received: \(row).") + } + when (includeHeader) { + for (column in headerKeys) { + renderValue(mapOf(row).getOrNull(column)) + } + } else { + for (column in if (row is ListLike) row.toList() else row.toMap().values) { + renderValue(column) + } + } + }.join(",") + } + "" + }.join(lineBreak) + + /// The line break to use. + /// + /// [RFC 4180](https://www.ietf.org/rfc/rfc4180.txt) states that line breaks are carriage-return-line-feed, but also: + /// > As per section 4.1.1. of RFC 2046, this media type uses CRLF to denote line breaks. + /// > However, implementors should be aware that some implementations may use other values. + /// + /// This property can be used to define which encoding to use. + /// + /// (Default: `"\r\n"`) + lineBreak: String = "\r\n" + + local charactersToWrap = Regex("[\",]|\(lineBreak)") + + /// How to handle polymorphic rows. + /// + /// When rendering a table of `Listing`, which includes elements of type `DerivedX` and `DerivedY` (where both + /// `extends Base`), how should this table be rendered? There are three options: + /// - `"error"` throws an error when any row has property names that are not in the header. + /// - `"drop"` only renders properties with names in the header and ignores any other properties. + /// - `"pad"` gathers property names from the entire table and inserts (pads) empty values when properties are missing. + /// + /// (Default: `"error"`) + unification: *"error"|"drop"|"pad" + + /// Tells whether to include a (first) line with the names of the columns. + includeHeader: Boolean = true +} + +class Parser { + /// The expected type of the rows to parse. + rowClass: Class? + + /// The [String] to parse. + input: String + + /// Tells whether the first row contains names of columns. + includeHeader: Boolean = false + + /// The line break to use. + /// + /// [RFC 4180](https://www.ietf.org/rfc/rfc4180.txt) states that line breaks are carriage-return-line-feed, but also: + /// > As per section 4.1.1. of RFC 2046, this media type uses CRLF to denote line breaks. + /// > However, implementors should be aware that some implementations may use other values. + /// + /// This property can be used to define which encoding to use. + /// + /// (Default: `"\r\n"`) + lineBreak: String = "\r\n" + + converters: Mapping unknown> = new { + [Int] = (it) -> it.toInt() + [Float] = (it) -> it.toFloat() + [Number] = (it) -> it.toFloat() + [Boolean] = (it) -> it.toBoolean() + } + + function parse(source: Resource|String): + List(rowClass == null)|List(rowClass != null) = + (this) { input = if (source is String) source else source.text }.parsed + + /// The result of parsing [input] as CSV. + /// + /// This is a "final" property, because it is derived from the input properties [input], [includeHeader] and [rowClass]. + parsed: (*List(rowClass == null)|List(rowClass != null))(this == _parsed) = + _parsed + + local function convert(type: reflect.Type|Class|TypeAlias): (String) -> unknown = + new Mapping unknown)?> { + [reflect.DeclaredType] = convert((type as reflect.DeclaredType).referent.reflectee) + [Class] = converters.getOrNull(type) + [TypeAlias] = convert(reflect.TypeAlias(type as TypeAlias).referent) + // Support for (type aliases that are) union types is rather ad hoc; pick the first class for which there exists a converter. + // TODO: Improve this. + [reflect.UnionType] = (type as reflect.UnionType).members.fold(null, (acc, ty) -> acc ?? convert(ty)) + }.getOrNull(type.getClass()) ?? (it) -> it + + local function allProps(clazz: reflect.Class): Map = + (clazz.superclass.ifNonNull((zuper) -> allProps(zuper as reflect.Class)) ?? Map()) + + clazz.properties.filter((_, p) -> !p.modifiers.contains("hidden")) + + local _parsed = + let (self = this) + let (stringyResult = new StringyTableParser { + rowClass = self.rowClass + input = self.input + lineBreak = self.lineBreak + }.parseResult.rows.toList()) + let (properties: Map? = rowClass.ifNonNull((clazz) -> allProps(reflect.Class(clazz as Class)))) + let (header: List? = (if (includeHeader) stringyResult[0] else properties?.keys)?.toList() as List?) + if (header == null) stringyResult.map((rawRow) -> rawRow.toDynamic()) else + stringyResult.toList().drop(if (includeHeader) 1 else 0).map((rawRow) -> + let (row = + header + .zip(rawRow.toList()) + .toMap((entry) -> entry.first, (entry) -> entry.second) + ) + if (properties == null) row.toDynamic() else + let (spuriousKeys = row.keys.filter((key) -> !properties.containsKey(key))) + let (_ = if (spuriousKeys.isEmpty) "ok" else throw("Unrecognized keys found in row: \(spuriousKeys.join(", "))")) + properties + .filter((name, _) -> row.containsKey(name)) + .mapValues((name, property) -> row[name].ifNonNull((value) -> convert(property.type).apply(value as String))) + .toTyped(rowClass!!) + ) +} + +local class StringyTableParser { + rowClass: Class? + input: String + + position: Int = 0 + currentRow: Listing + rows: Listing> + + lineBreak: String + + local length: Int = input.length - position + + parseResult: StringyTableParser = if (length <= 0) this else + let (isEscapedField = input.getOrNull(position) == "\"") + + let (idxValueStart = if (isEscapedField) position + 1 else position) + let (idxValueEnd = + List( + input.length, + if (isEscapedField) + findValueEndIndex(idxValueStart) + else + position + (input.drop(position).indexOfOrNull(Regex(",|\(lineBreak)")) ?? length) + ).min + ) + let (idxFieldEnd = idxValueEnd + if (isEscapedField) 1 else 0) + let (delimiter = input.getOrNull(idxFieldEnd)) + + // Ugly corner-case for CSV, if the input ends with a `,`, there is still a `null` value "after" that, but no `,` or lineBreak to signal as much. + let (nullEnd = delimiter == "," && idxFieldEnd == input.length - 1) + + let (value = input.substring(idxValueStart, idxValueEnd)) + let (newRow = (currentRow) { + if (value.isEmpty) null else if (isEscapedField) value.replaceAll("\"\"", "\"") else value + when (nullEnd) { + null + } + }) + + (this) { + position = idxFieldEnd + if (delimiter == ",") 1 else lineBreak.length + when (delimiter != "," || nullEnd) { + rows { + newRow.toList() + } + currentRow = new {} + } else { + currentRow = newRow + } + }.parseResult + + function findValueEndIndex(position: UInt): UInt = + let (characterAtPosition = input.getOrNull(position)) + if (characterAtPosition == "\"") + if (input.getOrNull(position + 1) == "\"") + findValueEndIndex(position + 2) + else + position + else if (characterAtPosition == null) + throw("Premature end of quoted field") + else + findValueEndIndex(position + 1) +} diff --git a/packages/pkl.csv/tests/csv_test.pkl b/packages/pkl.csv/tests/csv_test.pkl new file mode 100644 index 0000000..e2d92ea --- /dev/null +++ b/packages/pkl.csv/tests/csv_test.pkl @@ -0,0 +1,250 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.csv.tests.csv_test + +amends "pkl:test" + +import "../csv.pkl" + +// Rendering functions +local renderer = new csv.Renderer {} +local function value(val: csv.Value): String = renderer.renderValue(val) +local function doc(val: csv.ListLike): String = renderer.renderDocument(val) +local function anon(val: csv.ListLike): String = (renderer) { includeHeader = false }.renderDocument(val) +local function pad(val: csv.ListLike) = (renderer) { unification = "pad" }.renderDocument(val) +local function drop(val: csv.ListLike) = (renderer) { unification = "drop" }.renderDocument(val) +local function error(val: csv.ListLike) = (renderer) { unification = "error" }.renderDocument(val) + +local parser = new csv.Parser {} +local function pValue(_input: String): csv.Value = + let (lineBreak = "\r\n") + let (table = (parser) { input = _input + lineBreak }.parsed.toList()) + if (table.length != 1 || table.first.toList().length != 1) + throw("When parsing '\(_input)', expecting a single row, with a single column, but found \(table.length) rows and \(table.firstOrNull?.toList()?.length) columns") + else + table.first.toList().first + +local values = new Mapping> { + ["string"] = new Mapping { + ["some string"] = "some string" + [#"string with "quotes""#] = #""string with ""quotes""""# + ["a string, with a comma"] = #""a string, with a comma""# + ["a multiline\nstring"] = "a multiline\nstring" + ["a multiline\r\nstring with carriage return"] = #""a multiline\#r\#nstring with carriage return""# + ["\""] = "\"\"\"\"" + ["\t\\\r"] = "\t\\\r" + } + ["number"] = new Mapping { + [0] = "0" + [42] = "42" + [-1] = "-1" + [0.42] = "0.42" + [-1.0] = "-1.0" + } + ["other"] = new Mapping { + [true] = "true" + [false] = "false" + [null] = "" + } +} + +local exampleTable = new Listing { + default = (i) -> new Dynamic { lineNumber = i description = "This is line \(i) of the table" } + for (_ in IntSeq(1, 5)) { + new {} + } +} + +local typealias MyTypeAlias = *String|Int + +local open class Base { + hidden quoted: Boolean = false + local quote = if (quoted) "\"" else "" + foo: MyTypeAlias = "FOO!" + bar: String = "Just wanted to say \(quote)hello\(quote)" + baz: Int = -42 + qux: Float = 0.1337 +} + +local class DerivedOne extends Base { + quux: Number = -0.42 +} + +local class DerivedTwo extends Base { + corge: Boolean = false +} + +local homogeneousListing = new Listing { + new { foo = "first row" } + new { foo = "second\nrow" } + new { baz = 0 } + new { qux = 1.337 } +} + +local heterogeneousListing = new Listing { + new DerivedTwo {} + new Base { quoted = true } + new DerivedOne {} + new Base {} +} + +facts { + for (category, cases in values) { + ["Rendering \(category) values"] { + for (value, rendered in cases) { + if (value(value) == rendered) true else let (_ = trace("\(value(value)) != \(rendered)")) false + } + } + + ["Parsing \(category) values"] { + for (value, rendered in cases) { + pValue(rendered) == value?.toString() // because the parser does not have type information + } + } + } + + ["Rendering list(like) values"] { + anon(new Dynamic { new Dynamic { "foo" 1 false "bar" } }) == """ + foo,1,false,bar + + """.replaceAll("\n", "\r\n") + + doc(new Dynamic { new Dynamic { foo = 42 bar = false } }) == """ + foo,bar + 42,false + + """.replaceAll("\n", "\r\n") + + doc(exampleTable) == """ + lineNumber,description + 0,This is line 0 of the table + 1,This is line 1 of the table + 2,This is line 2 of the table + 3,This is line 3 of the table + 4,This is line 4 of the table + + """.replaceAll("\n", "\r\n") + + anon(exampleTable) == """ + 0,This is line 0 of the table + 1,This is line 1 of the table + 2,This is line 2 of the table + 3,This is line 3 of the table + 4,This is line 4 of the table + + """.replaceAll("\n", "\r\n") + } + + ["Rendering polymorphic lists; unifications"] { + pad(heterogeneousListing) == #""" + foo,bar,baz,qux,corge,quux + FOO!,Just wanted to say hello,-42,0.1337,false, + FOO!,"Just wanted to say ""hello""",-42,0.1337,, + FOO!,Just wanted to say hello,-42,0.1337,,-0.42 + FOO!,Just wanted to say hello,-42,0.1337,, + + """#.replaceAll("\n", "\r\n") + + drop(heterogeneousListing) == #""" + foo,bar,baz,qux,corge + FOO!,Just wanted to say hello,-42,0.1337,false + FOO!,"Just wanted to say ""hello""",-42,0.1337, + FOO!,Just wanted to say hello,-42,0.1337, + FOO!,Just wanted to say hello,-42,0.1337, + + """#.replaceAll("\n", "\r\n") + + error(exampleTable) == doc(exampleTable) + + module.catch(() -> error(heterogeneousListing)) + .startsWith("Invalid input: CSV can only render rows with all the same properties.") + } + + ["Rendering errors"] { + module.catch(() -> doc(new Dynamic { Map("foo", 1, "bar", 2) Map("foo", 1) })) + .startsWith("Invalid input: CSV can only render rows with all the same properties.") + } + + ["Parsing with header information"] { + // A little finicky, but if the parser doesn't have type information, every _value_ comes out stringy. + new csv.Parser { + input = """ + foo,bar,baz,qux,corge\r + first row,Just wanted to say hello,-42,0.1337,false\r + second + row,Just wanted to say hello,-42,0.1337,false\r + FOO!,Just wanted to say hello,0,0.1337,false\r + FOO!,Just wanted to say hello,-42,1.337,false\r + + """ + includeHeader = true + }.parsed == new Listing { + for (row in homogeneousListing) { + row.toDynamic().toMap().mapValues((_, v) -> v.toString()).toDynamic() + } + }.toList() + + new csv.Parser { + input = """ + foo,bar,baz,qux,corge\r + first row,Just wanted to say hello,-42,0.1337,false\r + second + row,Just wanted to say hello,-42,0.1337,false\r + FOO!,Just wanted to say hello,0,0.1337,false\r + FOO!,Just wanted to say hello,-42,1.337,false\r + + """ + rowClass = DerivedTwo + includeHeader = true + }.parsed == homogeneousListing.toList() + } + + ["Parsing with class, without header information"] { + new csv.Parser { + input = anon(homogeneousListing) + rowClass = DerivedTwo + includeHeader = false + }.parsed == homogeneousListing.toList() + } + + ["Rendering and parsing CRLF alternatives"] { + for (break in List("\n", "\r", "\t", "😀", "\r\r\n\n")) { + ...new Listing { + local quote = if (break == "\n") "\"" else "" + local source = """ + foo,bar,baz,qux,corge\r + first row,Just wanted to say hello,-42,0.1337,false\r + \(quote)second + row\(quote),Just wanted to say hello,-42,0.1337,false\r + FOO!,Just wanted to say hello,0,0.1337,false\r + FOO!,Just wanted to say hello,-42,1.337,false\r + + """.replaceAll("\r\n", break) + + new csv.Renderer { + lineBreak = break + }.renderDocument(homogeneousListing) == source + + new csv.Parser { + input = source + rowClass = DerivedTwo + includeHeader = true + lineBreak = break + }.parsed == homogeneousListing.toList() + } + } + } +} diff --git a/packages/pkl.experimental.deepToTyped/PklProject b/packages/pkl.experimental.deepToTyped/PklProject new file mode 100644 index 0000000..3ca4e0b --- /dev/null +++ b/packages/pkl.experimental.deepToTyped/PklProject @@ -0,0 +1,21 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// An experimental module that does a deep-conversion of [Dynamic] into [Typed]. +amends "../basePklProject.pkl" + +package { + version = "1.0.0" +} diff --git a/packages/pkl.experimental.deepToTyped/PklProject.deps.json b/packages/pkl.experimental.deepToTyped/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/pkl.experimental.deepToTyped/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/pkl.experimental.deepToTyped/deepToTyped.pkl b/packages/pkl.experimental.deepToTyped/deepToTyped.pkl new file mode 100644 index 0000000..fee7b49 --- /dev/null +++ b/packages/pkl.experimental.deepToTyped/deepToTyped.pkl @@ -0,0 +1,225 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.deepToTyped.deepToTyped + +import "pkl:reflect" + +/// Takes a given structure `Any` and coerces it into a concrete type `Class|TypeAlias`. +/// +/// Facts: +/// ``` +/// local class Foo { x: Int } +/// local class Bar { foo: Foo } +/// local dynamicBar = new Dynamic { foo { x = 1 } } +/// apply(Bar, dynamicBar) == new Bar { foo = new Foo { x = 1 } } +/// ``` +function apply(type: Class|TypeAlias, value: Any) = + let (result = + if (type is Class) + applyClass(reflect.Class(type), List(), value) + else + applyType(reflect.TypeAlias(type).referent, value) + ) + if (result is ConversionFailure) throw(result.message) else result + +local class ConversionFailure { + message: String + + function toMapping(): ConversionFailure = this + function toListing(): ConversionFailure = this +} + +function Fail(_message: String): ConversionFailure = new { + message = _message +} + +function Unexpected(expected: String, actual: String): ConversionFailure = + Fail(#"Expected "\#(expected)" but got "\#(actual)""#) + +hidden classHandlers: Mapping, Any) -> Any|ConversionFailure> = new { + [Mapping] = (typeArguments, value) -> + if (value is Dynamic|Map|Mapping) + applyMapping(typeArguments.firstOrNull ?? reflect.unknownType, typeArguments.getOrNull(1) ?? reflect.unknownType, value) + else + Unexpected("Dynamic|Map|Mapping", value.getClass().simpleName) + + [Map] = (typeArguments, value) -> + if (value is Dynamic|Map|Mapping) + applyMap(typeArguments.firstOrNull ?? reflect.unknownType, typeArguments.getOrNull(1) ?? reflect.unknownType, value) + else + Unexpected("Dynamic|Map|Mapping", value.getClass().simpleName) + + [Listing] = (typeArguments, value) -> + if (value is Dynamic|Collection|Listing) + applyListing(typeArguments.firstOrNull ?? reflect.unknownType, value.toList()) + else + Unexpected("Dynamic|Collection|Listing", value.getClass().simpleName) + + [List] = (typeArguments, value) -> + if (value is Dynamic|Collection) + applyList(typeArguments.firstOrNull ?? reflect.unknownType, value) + else + Unexpected("Dynamic|Collection", value.getClass().simpleName) + + [Set] = (typeArguments, value) -> + if (value is Dynamic|Collection) + applyList(typeArguments.firstOrNull ?? reflect.unknownType, value).toSet() + else + Unexpected("Dynamic|Collection", value.getClass().simpleName) + + [Collection] = (typeArguments, value) -> + if (value is Dynamic|Collection) + applyList(typeArguments.firstOrNull ?? reflect.unknownType, value) + else + Unexpected("Dynamic|Collection", value.getClass().simpleName) + + [Int] = (_, value) -> + if (value is Int) value else Unexpected("Int", value.getClass().simpleName) + + [String] = (_, value) -> + if (value is String) value else Unexpected("String", value.getClass().simpleName) + + [Float] = (_, value) -> + if (value is Number) value.toFloat() else Unexpected("Float", value.getClass().simpleName) + + [Number] = (_, value) -> + if (value is Number) value else Unexpected("Number", value.getClass().simpleName) + + [Boolean] = (_, value) -> + if (value is Boolean) value else Unexpected("Boolean", value.getClass().simpleName) +} + +local function applyClass(type: reflect.Class, typeArguments: List, value: Any): Any|ConversionFailure = + let (clazz = type.reflectee) + if (classHandlers.containsKey(clazz)) + classHandlers[clazz].apply(typeArguments, value) + else if (type.isSubclassOf(reflect.Class(Typed))) + applyTyped(type, value) + else + Fail("Unsupported type for conversion: \(type.reflectee.simpleName)") + +local function doesNotInherit(clazz: reflect.Class) = + clazz.superclass == null || clazz.superclass.reflectee == Module || clazz.superclass.reflectee == Typed + +local function getAllProperties(clazz: reflect.Class?): Map = + if (clazz == null) Map() + else if (doesNotInherit(clazz)) clazz.properties + else getAllProperties(clazz.superclass!!) + clazz.properties + +local function applyProperty(valueAsMap: Map, prop: reflect.Property) = + if (valueAsMap.containsKey(prop.name)) applyType(prop.type, valueAsMap[prop.name]) + else if (!(prop.type is reflect.NullableType) && prop.defaultValue != null) prop.defaultValue + else null + +local function applyDynamicOrMapping(type: reflect.Class, value: Dynamic|Mapping): Typed|ConversionFailure = + let (valueAsMap = value.toMap()) + let (converted = getAllProperties(type) + .fold( + Map(), + (acc: Map|ConversionFailure, name: String, prop: reflect.Property) -> + if (acc is ConversionFailure) + acc + else + let (result = applyProperty(valueAsMap, prop)) + if (result is ConversionFailure) result else acc.put(name, result) + ) + ) + if (converted is ConversionFailure) converted else converted.toTyped(type.reflectee) + +local function applyTyped(type: reflect.Class, value: Any): Typed|ConversionFailure = + if (value is Dynamic|Mapping) applyDynamicOrMapping(type, value) + else if (value is Typed && value.getClass() == type.reflectee) value + else Unexpected(type.name, value.getClass().simpleName) + +hidden reflectTypeHandlers: Mapping Any|ConversionFailure> = new Mapping { + [reflect.DeclaredType] = (type, value) -> + let (_type = type as reflect.DeclaredType) + let (reflectee = _type.referent.reflectee) + if (reflectee is Class) + applyClass(reflect.Class(reflectee), _type.typeArguments, value) + else + // TODO: Assert that typeParameters.isEmpty?? + applyType(reflect.TypeAlias(reflectee).referent, value) + + [reflect.StringLiteralType] = (type, value) -> + let (expected = (type as reflect.StringLiteralType).value) + if (value is String) + if (value == expected) + value + else + Unexpected(expected, value) + else + Unexpected(expected, value.getClass().simpleName) + + [reflect.UnionType] = (type, value) -> + (type as reflect.UnionType).members.fold( + Fail("No member of union type matched value '\(value)'"), + (acc, _type) -> + if (acc is ConversionFailure) + let (attempt = applyType(_type, value)) + if (attempt is ConversionFailure) acc else attempt + else + acc + ) + + [reflect.NullableType] = (type, value) -> + if (value == null) null else applyType((type as reflect.NullableType).member, value) + + [reflect.FunctionType] = (_, _) -> + Fail("Cannot convert function types") + + [reflect.ModuleType] = (_, _) -> + Fail("unimplmeneted") + + [reflect.UnknownType] = (_, value) -> + value + + [reflect.NothingType] = (_, _) -> + Fail("Cannot convert anything to `nothing`") + + [reflect.TypeVariable] = (type, value) -> + Fail("The type variable `\(type)` is unbound when trying to convert `\(value)`") +} + +local function applyType(type: reflect.Type, value: Any): Any|ConversionFailure = + let (clazz = type.getClass()) + if (reflectTypeHandlers.containsKey(clazz)) + reflectTypeHandlers[clazz].apply(type, value) + else + Fail("Unknown reflect.Type: \(type.getClass().simpleName)") + +local function applyMap(keyType: reflect.Type, valueType: reflect.Type, value: Dynamic|Map|Mapping): Map|ConversionFailure = + let (_value = if (value is Map) value else value.toMap()) + _value.fold(Map(), (acc, k, v) -> if (acc is ConversionFailure) acc else + let (_k = applyType(keyType, k)) + if (_k is ConversionFailure) _k else + let (_v = applyType(valueType, v)) + if (_v is ConversionFailure) _v else + acc.put(_k, _v) + ) + +local function applyMapping(keyType: reflect.Type, valueType: reflect.Type, value: Dynamic|Map|Mapping): Mapping|ConversionFailure = + applyMap(keyType, valueType, value).toMapping() + +local function applyList(type: reflect.Type, value: Dynamic|Collection): List|ConversionFailure = + value.toList().fold(List(), (acc: List|ConversionFailure, v) -> if (acc is ConversionFailure) acc else + let (_v = applyType(type, v)) + if (_v is ConversionFailure) _v else + acc.add(_v) + ) + +local function applyListing(type: reflect.Type, value: Dynamic|Collection): Listing|ConversionFailure = + applyList(type, value).toListing() diff --git a/packages/pkl.experimental.deepToTyped/tests/deepToTyped.pkl b/packages/pkl.experimental.deepToTyped/tests/deepToTyped.pkl new file mode 100644 index 0000000..01c4657 --- /dev/null +++ b/packages/pkl.experimental.deepToTyped/tests/deepToTyped.pkl @@ -0,0 +1,299 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.deepToTyped.tests.deepToTyped + +amends "pkl:test" + +import "../deepToTyped.pkl" as t + +local class Foo { + x: Int +} + +local class Bar { + foo: Foo +} + +local class Baz { + y: Int + baz: Baz? +} + +local dynamic: Dynamic = new Dynamic { + foo { + x = 1 + } +} + +local typealias FooBarMapping = Mapping//|Listing|Foo +local exMapping: FooBarMapping = new { + [new Foo { x = 42 }] { + foo { + x = 1337 + } + } +} + +local typealias BarList = List +local typealias BarListing = Listing +local exListing: BarListing = new { + new Bar { + foo { + x = -1 + } + } +} + +local typealias BarSet = Set +local exSet: BarSet = Set(new Bar { foo { x = 1 } }) + +local typealias BarUnionSetOrList = *BarSet|BarList + +local class MyService { + metadata: MyMetadata + spec: MySpec +} + +local class MyMetadata { + name: String + namespace: String + labels: Mapping +} + +local class MySpec { + type: String + ports: Listing + selector: Mapping +} + +local class Port { + port: Int + targetPort: Int + protocol: String +} + +local service: MyService = new { + metadata { + name = "example-service" + namespace = "example-namespace" + labels { + ["app"] = "example" + ["env"] = "prod" + ["prtn"] = "p102" + } + } + spec { + type = "ClusterIP" + ports { + new { + port = 8080 + targetPort = 8080 + protocol = "TCP" + } + } + selector { + ["app"] = "example" + ["env"] = "prod" + ["prtn"] = "p102" + } + } +} + +local class OverConstrained { + // k8s Deployment refers to k8s Probe, which has a type constraint on `exec` that excludes the default value of + // the Probe class; only one field of three may be `null` and all are, by default. + foo: String?(this != null || bar != null) + bar: Int? +} + +local typealias fooLiteral = "foo" +local typealias enumType = *"foo"|"bar"|"baz" + +local class ClassWithDefaultAttribute { + what: String = "The default" + who: String? + where: String +} + +local open class BaseClass { + a: String + b: String +} + +local class SubClass extends BaseClass { + // purposefully override `b`'s type to make sure we are converting correctly. + b: Int + c: Int +} + +local class Storage { + size: DataSize +} + +facts { + ["Basic types"] { + t.apply(Int, 1) == 1 + t.apply(Float, 1) == 1 + t.apply(Float, 1.1) == 1.1 + t.apply(Number, 1) == 1 + t.apply(Boolean, true) == true + t.apply(String, "hello") == "hello" + t.apply(fooLiteral, "foo") == "foo" + t.apply(enumType, "bar") == "bar" + } + + ["Listy types"] { + local list = new Listing { + "foo" + 42 + }.toList() + t.apply(List, list) == list + t.apply(Listing, list) == list.toListing() + t.apply(Set, list) == list.toSet() + // There is a choice here: Abstract (!) supertype Collection defaults to List + t.apply(Collection, list) == list + } + + ["Mappy types"] { + local map = new Mapping { + ["bar"] = "foo" + [0] = 42 + }.toMap() + t.apply(Map, map) == map + t.apply(Mapping, map) == map.toMapping() + } + + ["Values of the correct Class type are returned"] { + t.apply(Foo, new Foo { x = 1 }) == new Foo { x = 1 } + } + + ["Dynamic to user-defined class"] { + t.apply(Foo, dynamic.foo) == new Foo { x = 1 } + t.apply(Bar, dynamic) == new Bar { foo = new { x = 1 } } + } + + ["Dynamic to concrete typealias"] { + t.apply(FooBarMapping, exMapping.toMap().toDynamic()) == exMapping + t.apply(BarListing, exListing.toList().toDynamic()) == exListing + t.apply(BarList, exListing.toList().toDynamic()) == exListing.toList() + t.apply(BarSet, exSet.toDynamic()) == exSet + } + + ["Dynamic to union typealias"] { + t.apply(BarUnionSetOrList, exSet) == exSet + t.apply(BarUnionSetOrList, exListing.toList()) is Collection + } + + ["Service.toDynamic() to MyService"] { + t.apply(MyService, service.toDynamic()) == service + } + + ["Service nested Dynamic to k8s Service"] { + local nestedDynamicService = new Dynamic { + apiVersion = "v1" + kind = "Service" + metadata { + name = "example-service" + namespace = "example-namespace" + labels { + ["app"] = "example" + ["env"] = "prod" + ["prtn"] = "p102" + } + } + spec { + type = "ClusterIP" + ports { + new Dynamic { + port = 8080 + targetPort = 8080 + protocol = "TCP" + } + } + selector { + ["app"] = "example" + ["env"] = "prod" + ["prtn"] = "p102" + } + } + } + t.apply(MyService, nestedDynamicService) == service + } + + ["Negative cases"] { + // Something to think about: `y` is not in Foo; it quietly falls through here. + t.apply(Foo, new Dynamic { x = 42 y = 1337 }) == new Foo { x = 42 } + } + + ["Type with fields that have dependent constraints"] { + local expectedOverconstrained = new OverConstrained { + foo = "test" + bar = null + } + + t.apply(OverConstrained, new Dynamic { foo = "test" }) == expectedOverconstrained + } + + ["Class with default attribute gets set correctly"] { + local expectedClassWithDefault = new ClassWithDefaultAttribute { + who = "you!" + where = "here" + } + + expectedClassWithDefault.what == "The default" + + local input = new Dynamic { + who = "you!" + where = "here" + } + + t.apply(ClassWithDefaultAttribute, input) == expectedClassWithDefault + } + + ["Correctly converts parent properties on a class"] { + local expectedResult: SubClass = new { + a = "a" + b = 2 + c = 3 + } + + local value = new { + a = "a" + b = 2 + c = 3 + } + + t.apply(SubClass, value) == expectedResult + } + + ["Report missing union type value"] { + module.catch(() -> t.apply(enumType, "non")).contains("'non'") + } + ["Custom handler allows converting custom types"] { + local expectedResult: Storage = new { + size = 1024.b + } + local value = new { + size = 1024 + } + // instantiate module as we need to amend it + new t { + classHandlers { + [DataSize] = (_, value) -> (value as Int).toDataSize("b") + } + }.apply(Storage, value) == expectedResult + } +} diff --git a/packages/pkl.experimental.net/PklProject b/packages/pkl.experimental.net/PklProject new file mode 100644 index 0000000..963198d --- /dev/null +++ b/packages/pkl.experimental.net/PklProject @@ -0,0 +1,21 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Experimental library for handling network (IP and MAC) addresses. +amends "../basePklProject.pkl" + +package { + version = "1.0.0" +} diff --git a/packages/pkl.experimental.net/PklProject.deps.json b/packages/pkl.experimental.net/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/pkl.experimental.net/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/pkl.experimental.net/net.pkl b/packages/pkl.experimental.net/net.pkl new file mode 100644 index 0000000..9701b5d --- /dev/null +++ b/packages/pkl.experimental.net/net.pkl @@ -0,0 +1,305 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// We sell IP addresses and IP address accessories +/// This module contains types and functions for handling network (IP and MAC) addresses +@ModuleInfo { minPklVersion = "0.25.0" } +module pkl.experimental.net.net + +import "pkl:math" +import "./u128.pkl" +import "./net.pkl" + +// language=RegExp +const hidden hex: String = "[0-9a-fA-F]" +// language=RegExp +const hidden decByte: String = #"(25[0-5]|2[0-4]\d|[01]?\d\d?)"# +// language=RegExp +const hidden ipv4String = #"(\#(decByte)\.){3}\#(decByte)"# +// language=RegExp +const hidden ipv6String = #"((\#(hex){1,4}:){7}\#(hex){1,4}|(\#(hex){1,4}:){1,7}:|(\#(hex){1,4}:){1,6}:\#(hex){1,4}|(\#(hex){1,4}:){1,5}(:\#(hex){1,4}){1,2}|(\#(hex){1,4}:){1,4}(:\#(hex){1,4}){1,3}|(\#(hex){1,4}:){1,3}(:\#(hex){1,4}){1,4}|(\#(hex){1,4}:){1,2}(:\#(hex){1,4}){1,5}|\#(hex){1,4}:((:\#(hex){1,4}){1,6})|:((:\#(hex){1,4}){1,7}|:)|fe80:(:\#(hex){0,4}){0,4}%\#(hex){1,}|::(ffff(:0{1,4}){0,1}:){0,1}(\#(decByte)\.){3,3}\#(decByte)|(\#(hex){1,4}:){1,4}:(\#(decByte)\.){3,3}\#(decByte))"# + +/// A string that contains a MAC address +// language=RegExp +typealias MACAddressString = String(matches(Regex(#"(\#(net.hex){1,2}[\.:-]){5}(\#(net.hex){1,2})"#))) + +/// A string that contains either an IPv4 or IPv6 address. +typealias IPAddressString = IPv4AddressString|IPv6AddressString +/// A string that contains either an IPv4 or IPv6 address and port. +typealias IPAddressPortString = IPv4AddressPortString|IPv6AddressPortString +/// A string that contains either an IPv4 or IPv6 CIDR range. +typealias IPCIDRString = IPv4CIDRString|IPv6CIDRString +/// An IPv4 or IPv6 address. +typealias IPAddress = IPv4Address|IPv6Address +/// An IPv4 or IPv6 network. +typealias IPNetwork = IPv4Network|IPv6Network + +/// A string that contains an IPv4 address. +// language=RegExp +typealias IPv4AddressString = String(matches(Regex(net.ipv4String))) +/// A string that contains an IPv6 address. +// language=RegExp +typealias IPv6AddressString = String(matches(Regex(net.ipv6String))) + +/// A string that contains an IPv4 address and port. +// language=RegExp +typealias IPv4AddressPortString = String(matches(Regex(#"\#(net.ipv4String):\d{1,5}"#))) +/// A string that contains an IPv6 address and port. +// language=RegExp +typealias IPv6AddressPortString = String(matches(Regex(#"\[\#(net.ipv6String)\]:\d{1,5}"#))) + +/// A string that contains an IPv4 address. +// language=RegExp +typealias IPv4CIDRString = String(matches(Regex(#"\#(net.ipv4String)/\d{1,2}"#))) +/// A string that contains an IPv6 address. +// language=RegExp +typealias IPv6CIDRString = String(matches(Regex(#"\#(net.ipv6String)/\d{1,3}"#))) + +/// Creates an [IPAddress] from an [IPAddressString]. +const function IP(ip: IPAddressString): IPAddress = + if (ip is IPv6AddressString) IPv6Address(ip) + else if (ip is IPv4AddressString) IPv4Address(ip) + else throw("Invalid IP: \(ip)") + +/// Creates an [IPv4Address] from an [IPv4AddressString]. +const function IPv4Address(ip: IPv4AddressString): IPv4Address = new { + local parts = ip.split(".") + repr = parts[0].toInt().shl(24) + .or(parts[1].toInt().shl(16)) + .or(parts[2].toInt().shl(8)) + .or(parts[3].toInt()) +} + +/// An IPv4 address. +class IPv4Address { + hidden repr: UInt32 + const hidden bitWidth: UInt = 32 + + local self = this + + /// maskHi generates a mask of 1s in the top [prefix] bits of a [UInt32]. + function maskHi(prefix: UInt(isBetween(0, bitWidth))): UInt32 = mask32Hi(prefix) + /// maskLo generates a mask of 1s in the bottom [suffix] bits of a [UInt32]. + function maskLo(suffix: UInt(isBetween(0, bitWidth))): UInt32 = mask32Lo(suffix) + + /// reverse returns the PTR record name for this address. + function reverse(): String = new IPv4Network { base = self; prefix = self.bitWidth }.reverse() + + /// return the ip address immediately after this one + function next(): IPv4Address = new { repr = self.repr + 1 } + + function toString(): IPv4AddressString = new Listing { + repr.ushr(24).and(math.maxUInt8).toString() + repr.ushr(16).and(math.maxUInt8).toString() + repr.ushr(8).and(math.maxUInt8).toString() + repr.and(math.maxUInt8).toString() + }.join(".") +} + +/// Creates an [IPv6Address] from an [IPv6AddressString]. +const function IPv6Address(ip: IPv6AddressString): IPv6Address = + let (_ip = expandIPv6AddressString(ip).toLowerCase().split(":")) + new { + repr = u128.UInt128( + parseHex32(_ip[0] + _ip[1]), + parseHex32(_ip[2] + _ip[3]), + parseHex32(_ip[4] + _ip[5]), + parseHex32(_ip[6] + _ip[7]) + ) + } + +/// An IPv6 address. +class IPv6Address { + hidden repr: u128.UInt128 + const hidden bitWidth: UInt = 128 + + local self = this + + /// maskHi generates a mask of 1s in the top [prefix] bits of a [u128.UInt128]. + function maskHi(prefix: UInt(isBetween(0, bitWidth))): u128.UInt128 = + if (prefix <= 32) u128.UInt128(mask32Hi(prefix), 0, 0, 0) + else if (prefix <= 64) u128.UInt128(math.maxUInt32, mask32Hi(prefix - 32), 0, 0) + else if (prefix <= 96) u128.UInt128(math.maxUInt32, math.maxUInt32, mask32Hi(prefix - 64), 0) + else u128.UInt128(math.maxUInt32, math.maxUInt32, math.maxUInt32, mask32Hi(prefix - 96)) + /// maskLo generates a mask of 1s in the bottom [suffix] bits of a [u128.UInt128]. + function maskLo(suffix: UInt(isBetween(0, bitWidth))): u128.UInt128 = + if (suffix <= 32) u128.UInt128(0, 0, 0, mask32Lo(suffix)) + else if (suffix <= 64) u128.UInt128(0, 0, mask32Lo(suffix - 32), math.maxUInt32) + else if (suffix <= 96) u128.UInt128(0, mask32Lo(suffix - 64), math.maxUInt32, math.maxUInt32) + else u128.UInt128(mask32Lo(suffix - 96), math.maxUInt32, math.maxUInt32, math.maxUInt32) + + /// reverse returns the PTR record name for this address. + function reverse(): String = new IPv6Network { base = self; prefix = self.bitWidth }.reverse() + + /// return the ip address immediately after this one + function next(): IPv6Address = new { repr = self.repr.add(u128.one) } + + function toString(): IPv6AddressString = expandIPv6AddressString(new Listing { + repr.hihi.ushr(16).toRadixString(16) + repr.hihi.and(math.maxUInt16).toRadixString(16) + repr.hilo.ushr(16).toRadixString(16) + repr.hilo.and(math.maxUInt16).toRadixString(16) + repr.lohi.ushr(16).toRadixString(16) + repr.lohi.and(math.maxUInt16).toRadixString(16) + repr.lolo.ushr(16).toRadixString(16) + repr.lolo.and(math.maxUInt16).toRadixString(16) + }.join(":")) +} + +/// Creates an [IPNetwork] from an IPv4 or IPv6 CIDR block string +const function IPNetwork(cidr: String): IPNetwork = + if (cidr.split("/").first is IPv4AddressString) IPv4Network(cidr) + else if (cidr.split("/").first is IPv6AddressString) IPv6Network(cidr) + else throw("Invalid network CIDR: \(cidr)") + +/// Creates an [IPv4Network] from an IPv4 CIDR block string +const function IPv4Network(cidr: String): IPv4Network = new { + base = IPv4Address(cidr.split("/").first) + prefix = cidr.split("/").last.toInt() +} + +/// An IPv4 network. +class IPv4Network { + /// The base address of this network + base: IPv4Address + /// The CIDR prefix of this network + prefix: UInt(isBetween(0, bitWidth)) + fixed hidden bitWidth = base.bitWidth + const hidden reverseBitResolution: UInt = 8 + + local self = this + + /// The first address in this network. + /// Will be equivalent to [lastAddress] when [prefix] is equal to [bitWidth]. + fixed firstAddress: IPv4Address = new { repr = base.repr.and(base.maskHi(prefix)) } + /// The last address in this network. + /// Will be equivalent to [firstAddress] when [prefix] is equal to [bitWidth]. + fixed lastAddress: IPv4Address = new { repr = base.repr.or(base.maskLo(bitWidth - prefix)) } + + /// Return the subnet-mask for this network. + function getSubnetMask(): IPv4AddressString = new IPv4Address { repr = base.maskHi(prefix) }.toString() + + /// Return true if this network contains [ip]. + function contains(ip: IPv4Address): Boolean = firstAddress.repr <= ip.repr && ip.repr <= lastAddress.repr + /// Generate the name of the reverse DNS zone for this network. + function reverse(): String = base.toString().split(".").take(prefix ~/ reverseBitResolution).reverse().join(".") + ".in-addr.arpa" + + /// Calculate all subnets of this network with prefix [target]. + /// For example, given IPv4Network("10.53.120.0/21").subdivideTo(24), it outputs 8 /24 networks + function subdivideTo(target: UInt(isBetween(0, bitWidth) && this >= prefix)): Listing = + if (prefix == target) new { self } + else new { + ...new IPv4Network { base = self.base; prefix = self.prefix + 1 }.subdivideTo(target) + ...new IPv4Network { base = new { repr = self.base.repr + 1.shl(bitWidth - self.prefix - 1) }; prefix = self.prefix + 1 }.subdivideTo(target) + } + + function toString(): String = "\(base.toString())/\(prefix)" +} + +/// Produces a listing of IPv4 addresses between [start] and [end], inclusive. +const function IPv4Range(start: IPv4Address, end: IPv4Address): Listing = new { + for (ipu in IntSeq(start.repr, end.repr)) { + new { repr = ipu } + } +} + +/// Creates an [IPv6Network] from an IPv6 CIDR block string +const function IPv6Network(cidr: String): IPv6Network = new { + base = IPv6Address(cidr.split("/").first) + prefix = cidr.split("/").last.toInt() +} + +/// An IPv6 network. +class IPv6Network { + /// The base address of this network + base: IPv6Address + /// The CIDR prefix of this network + prefix: UInt(isBetween(0, bitWidth)) + fixed hidden bitWidth = base.bitWidth + const hidden reverseBitResolution: UInt = 4 + + local self = this + + /// The first address in this network. + /// Will be equivalent to [lastAddress] when [prefix] is equal to [bitWidth]. + fixed firstAddress: IPv6Address = new { repr = base.repr.and(base.maskHi(prefix)) } + /// The last address in this network. + /// Will be equivalent to [firstAddress] when [prefix] is equal to [bitWidth]. + fixed lastAddress: IPv6Address = new { repr = base.repr.or(base.maskLo(bitWidth - prefix)) } + + /// Return true if this network contains [ip]. + function contains(ip: IPv6Address): Boolean = firstAddress.repr.le(ip.repr) && ip.repr.le(lastAddress.repr) + /// Generate the name of the reverse DNS zone for this network. + function reverse(): String = expandIPv6AddressString(base.toString()).replaceAll(":", "").chars.take(prefix ~/ reverseBitResolution).reverse().join(".") + ".ip6.arpa" + + /// Calculate all subnets of this network with prefix [target]. + /// For example, given IPv6Network("2620:149:a:960::/61").subdivideTo(64), it outputs 8 /64 networks + function subdivideTo(target: UInt(isBetween(0, bitWidth) && this >= prefix)): Listing = + if (prefix == target) new { self } + else new { + ...new IPv6Network { base = self.base; prefix = self.prefix + 1 }.subdivideTo(target) + ...new IPv6Network { base = new { repr = self.base.repr.add(u128.one.shl(bitWidth - self.prefix - 1)) }; prefix = self.prefix + 1 }.subdivideTo(target) + } + + function toString(): String = "\(compressIPv6AddressString(base.toString()))/\(prefix)" +} + +/// Produces a listing of IPv6 addresses between [start] and [end], inclusive. +const function IPv6Range(start: IPv6Address, end: IPv6Address): Listing = new { + for (ipu in start.repr.seq(end.repr)) { + new { repr = ipu } + } +} + +/// Canonicalizes IPv6 addresses by expanding each component to be 4 digits (zero-padded) and expanding :: +/// Given input "123:45:6::7890", the output is "0123:0045:0006:0000:0000:0000:0000:7890" +const function expandIPv6AddressString(_ip: IPv6AddressString): IPv6AddressString = + if (_ip.length == 39) _ip // assume it's already canonicalized + else + let (ip = if (_ip.endsWith("::")) _ip + "0" else _ip) + let (stuff = ip.split("::").map((half) -> half.split(":").map((octet) -> octet.padStart(4, "0")))) + if (stuff.length == 1) stuff.first.join(":") + else if (stuff.length == 2) (stuff.first + List("0000").repeat(8 - stuff.first.length - stuff.last.length) + stuff.last).join(":") + else throw("unintelligible IPv6 address: " + ip) + +/// Compresses IPv6 addresses by stripping leading zeros from each component and collapsing repeated zero components to :: +/// Given input "0123:0045:0006:0000:0000:0000:0000:7890", the output is "123:45:6::7890" +/// Note that this may not produce optimally compressed addresses if there are multiple spans of repeated zero components +const function compressIPv6AddressString(ip: IPv6AddressString): IPv6AddressString = + let (trimmed = ip.split(":").map((octet) -> + if (octet == "0000") "0" + else octet.dropWhile((c) -> c == "0") + ).join(":")) + trimmed.replaceFirst(Regex("(:|^)(0:)+0(:|$)"), "::") + +/// parseHex tranforms a single hexadecimal character into its unsigned integer representation. +const function parseHex(digit: Char): UInt8 = + let (d = digit.toLowerCase()) + "0123456789abcdef".chars.findIndexOrNull((it) -> it == d) ?? + throw("Unrecognized hex digit: \(d)") + +/// parseHex32 transforms an 8 character hexidecimal string into its UInt32 representation. +const function parseHex32(s: String(this.length == 8)): UInt32 = + IntSeq(0, 7) + .step(2) + .map((it) -> s.substring(it, it + 2)) + .fold(0, (acc, it) -> acc.shl(8) + byteLut[it.toLowerCase()]) + +const local byteLut = IntSeq(0, 255).map((it) -> it).toMap((it) -> it.toRadixString(16).padStart(2, "0"), (it) -> it) + +/// mask32Hi generates a mask of 1s in the top [prefix] bits of a [UInt32]. +const function mask32Hi(prefix: UInt(this.isBetween(0, 32))): UInt32 = math.maxUInt32.ushr(32-prefix).shl(32-prefix) +/// mask32Lo generates a mask of 1s in the bottom [suffix] bits of a [UInt32]. +const function mask32Lo(suffix: UInt(this.isBetween(0, 32))): UInt32 = math.maxUInt32.ushr(32-suffix) diff --git a/packages/pkl.experimental.net/tests/u128.pkl b/packages/pkl.experimental.net/tests/u128.pkl new file mode 100644 index 0000000..4e55260 --- /dev/null +++ b/packages/pkl.experimental.net/tests/u128.pkl @@ -0,0 +1,94 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.net.tests.u128 + +amends "pkl:test" + +import "pkl:math" +import "../u128.pkl" + +local someValue = u128.UInt128(0x74ab3ef8, 0x09bfce41, 0xab38bef1, 0x10efba83) + +facts { + ["le"] { + u128.zero.le(u128.zero) + u128.zero.le(u128.one) + u128.zero.le(u128.maxUInt128) + someValue.le(u128.maxUInt128) + !u128.maxUInt128.le(someValue) + u128.zero.le(someValue) + someValue.le(someValue) + } + + ["lt"] { + !u128.zero.lt(u128.zero) + u128.zero.lt(u128.one) + u128.zero.lt(u128.maxUInt128) + someValue.lt(u128.maxUInt128) + !u128.maxUInt128.lt(someValue) + u128.zero.lt(someValue) + !someValue.lt(someValue) + } +} + +examples { + ["toString"] { + u128.one.toString() + u128.maxUInt128.toString() + module.catch(() -> + u128.UInt128(math.maxUInt32 + 1, math.maxUInt32 + 2, math.maxUInt32 + 3, math.maxUInt32 + 4).toString() + ) + } + + ["and"] { + someValue.and(u128.zero).toString() + u128.zero.and(someValue).toString() + u128.maxUInt128.and(someValue).toString() + someValue.and(u128.maxUInt128).toString() + } + + ["or"] { + someValue.or(u128.zero).toString() + u128.zero.or(someValue).toString() + u128.maxUInt128.or(someValue).toString() + someValue.or(u128.maxUInt128).toString() + } + + ["shl"] { + u128.one.shl(0).toString() + u128.one.shl(1).toString() + u128.one.shl(19).toString() + u128.one.shl(32).toString() + u128.one.shl(33).toString() + u128.one.shl(51).toString() + u128.one.shl(64).toString() + u128.one.shl(65).toString() + u128.one.shl(84).toString() + } + + ["add"] { + u128.one.add(u128.one).toString() + someValue.add(someValue).toString() + someValue.add(u128.zero).toString() + } + + ["seq"] { + u128.zero.seq(u128.UInt128(0, 0, 0, 64)).length// + // u128.zero.seq(u128.UInt128(0, 0, 1, 64)).length + // womp womp + // java.lang.NegativeArraySizeException: -2147483648 + } +} diff --git a/packages/pkl.experimental.net/tests/u128.pkl-expected.pcf b/packages/pkl.experimental.net/tests/u128.pkl-expected.pcf new file mode 100644 index 0000000..cec49dd --- /dev/null +++ b/packages/pkl.experimental.net/tests/u128.pkl-expected.pcf @@ -0,0 +1,38 @@ +examples { + ["toString"] { + "00000000000000000000000000000001" + "ffffffffffffffffffffffffffffffff" + "Type constraint `isBetween(0, 4294967295)` violated. Value: 4294967296" + } + ["and"] { + "00000000000000000000000000000000" + "00000000000000000000000000000000" + "74ab3ef809bfce41ab38bef110efba83" + "74ab3ef809bfce41ab38bef110efba83" + } + ["or"] { + "74ab3ef809bfce41ab38bef110efba83" + "74ab3ef809bfce41ab38bef110efba83" + "ffffffffffffffffffffffffffffffff" + "ffffffffffffffffffffffffffffffff" + } + ["shl"] { + "00000000000000000000000000000001" + "00000000000000000000000000000002" + "00000000000000000000000000080000" + "00000000000000000000000100000000" + "00000000000000000000000200000000" + "00000000000000000008000000000000" + "00000000000000010000000000000000" + "00000000000000020000000000000000" + "00000000001000000000000000000000" + } + ["add"] { + "00000000000000000000000000000002" + "e9567df0137f9c8356717de221df7506" + "74ab3ef809bfce41ab38bef110efba83" + } + ["seq"] { + 65 + } +} diff --git a/packages/pkl.experimental.net/u128.pkl b/packages/pkl.experimental.net/u128.pkl new file mode 100644 index 0000000..465a967 --- /dev/null +++ b/packages/pkl.experimental.net/u128.pkl @@ -0,0 +1,150 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Who was it that said we'd never need more than 64 bits?? +/// Some tasks work best with unsigned 128-bit integers, eg. working with IPv6 addresses. +/// This module implements a [UInt128] type composed out of 4 [UInt]s that can handle limited bit-wise and arithmetic operations. +/// NB: this task is a better fit for [UInt32], but it cannot be used here as we rely on shifting bits off the significant end and Pkl's implementation doesn't permit this +module pkl.experimental.net.u128 + +import "pkl:math" + +/// A [UInt128] with value 0. +const zero: UInt128 = UInt128(0, 0, 0, 0) +/// A [UInt128] with value 1. +const one: UInt128 = UInt128(0, 0, 0, 1) +/// The maximum [UInt128] value. +const maxUInt128: UInt128 = UInt128(math.maxUInt32, math.maxUInt32, math.maxUInt32, math.maxUInt32) + +/// Creates a [UInt128] with components [UInt32]s [high], [midHigh], [midLow], and [low] +const function UInt128(high: UInt32, midHigh: UInt32, midLow: UInt32, low: UInt32): UInt128 = new { + words = List(low, midLow, midHigh, high) +} + +/// An unsigned 128-bit integer. +class UInt128 { + hidden words: List(length == 4) + hidden hihi: UInt = words[3] + hidden hilo: UInt = words[2] + hidden lohi: UInt = words[1] + hidden lolo: UInt = words[0] + + local self = this + + function toString(): String = new Listing { + hihi.toRadixString(16).padStart(8, "0") + hilo.toRadixString(16).padStart(8, "0") + lohi.toRadixString(16).padStart(8, "0") + lolo.toRadixString(16).padStart(8, "0") + }.join("") + + /// Bitwise AND of this integer and [n]. + function and(other: UInt128): UInt128 = UInt128( + self.hihi.and(other.hihi), + self.hilo.and(other.hilo), + self.lohi.and(other.lohi), + self.lolo.and(other.lolo) + ) + + /// Bitwise OR of this integer and [n]. + function or(other: UInt128): UInt128 = UInt128( + self.hihi.or(other.hihi), + self.hilo.or(other.hilo), + self.lohi.or(other.lohi), + self.lolo.or(other.lolo) + ) + + /// Returns true if this integer is less than or equal to [other]. + function le(other: UInt128): Boolean = self.words == other.words || self.lt(other) + + /// Returns true if this integer is strictly less than [other]. + function lt(other: UInt128): Boolean = + self.hihi < other.hihi || self.hihi == other.hihi && ( + self.hilo < other.hilo || self.hilo == other.hilo && ( + self.lohi < other.lohi || self.lohi == other.lohi && ( + self.lolo < other.lolo + ))) + + /// Shifts this integer left by [n] bits. + function shl(n: UInt): UInt128 = + if (n == 0) self + else if (n > 32) self.shl(32).shl(n - 32) // just recurse, 5head! + // not only is this n == 32 case more efficient, but it protects us from the fact that Pkl has no UInt64 type + // we would be in for a bad time if we shifted a [math.maxUInt32] left 32 bits into the sign bit of [Int] + else if (n == 32) UInt128(self.hilo, self.lohi, self.lolo, 0) + else UInt128( + self.hihi.shl(n).and(math.maxUInt32).or(self.hilo.ushr(32 - n)), + self.hilo.shl(n).and(math.maxUInt32).or(self.lohi.ushr(32 - n)), + self.lohi.shl(n).and(math.maxUInt32).or(self.lolo.ushr(32 - n)), + self.lolo.shl(n).and(math.maxUInt32) + ) + + /// Return the sum of this integer and [other]. + function add(other: UInt128): UInt128 = new { + local complement = math.maxUInt32 - other.lolo + local loloOverflows = self.lolo > complement + local lolo = + if (loloOverflows) self.lolo - math.maxUInt32 + other.lolo - 1 + else self.lolo + other.lolo + + local lohiComplement = math.maxUInt32 - other.lohi + local lohiCarry = if (loloOverflows) 1 else 0 + local lohiOverflows = (self.lohi + lohiCarry) > lohiComplement + local lohi = + if (lohiOverflows) self.lohi - math.maxUInt32 + other.lohi - 1 + lohiCarry + else self.lohi + other.lohi + lohiCarry + + local hiloComplement = math.maxUInt32 - other.hilo + local hiloCarry = if (lohiOverflows) 1 else 0 + local hiloOverflows = (self.hilo + hiloCarry) > hiloComplement + local hilo = + if (hiloOverflows) self.hilo - math.maxUInt32 + other.hilo - 1 + hiloCarry + else self.hilo + other.hilo + hiloCarry + + local hihiCarry = if (hiloOverflows) 1 else 0 + local hihi = (self.hihi + other.hihi + hihiCarry).and(math.maxUInt32) + words = List(lolo, lohi, hilo, hihi) + } + + /// Generate a sequence of [UInt128] between this integer and [other], inclusive. + /// Behaves like [IntSeq]. + function seq(other: UInt128): Listing = + if (self.hihi == other.hihi) + if (self.hilo == other.hilo) + if (self.lohi == other.lohi) new { + for (ll in IntSeq(self.lolo, other.lolo)) { UInt128(self.hihi, self.hilo, self.lohi, ll) } + } + else new { + ...UInt128(self.hihi, self.hilo, self.lohi, self.lolo).seq(UInt128(self.hihi, self.hilo, self.lohi, math.maxUInt32)) + for (lh in IntSeq(self.lohi, other.lohi)) { + ...UInt128(self.hihi, self.hilo, lh, 0).seq(UInt128(self.hihi, self.hilo, lh, math.maxUInt32)) + } + ...UInt128(other.hihi, other.hilo, other.lohi, 0).seq(UInt128(other.hihi, other.hilo, other.lohi, other.lolo)) + } + else new { + ...UInt128(self.hihi, self.hilo, self.lohi, self.lolo).seq(UInt128(self.hihi, self.hilo, math.maxUInt32, math.maxUInt32)) + for (hl in IntSeq(self.hilo, other.hilo)) { + ...UInt128(self.hihi, hl, 0, 0).seq(UInt128(self.hihi, hl, math.maxUInt32, math.maxUInt32)) + } + ...UInt128(other.hihi, other.hilo, 0, 0).seq(UInt128(other.hihi, other.hilo, other.lohi, other.lolo)) + } + else new { + ...UInt128(self.hihi, self.hilo, self.lohi, self.lolo).seq(UInt128(self.hihi, math.maxUInt32, math.maxUInt32, math.maxUInt32)) + for (hh in IntSeq(self.hilo, other.hilo)) { + ...UInt128(hh, 0, 0, 0).seq(UInt128(hh, math.maxUInt32, math.maxUInt32, math.maxUInt32)) + } + ...UInt128(other.hihi, 0, 0, 0).seq(UInt128(other.hihi, other.hilo, other.lohi, other.lolo)) + } +} diff --git a/packages/pkl.experimental.syntax/AnnotationNode.pkl b/packages/pkl.experimental.syntax/AnnotationNode.pkl new file mode 100644 index 0000000..77c1087 --- /dev/null +++ b/packages/pkl.experimental.syntax/AnnotationNode.pkl @@ -0,0 +1,29 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.AnnotationNode + +extends "Node.pkl" + +import "QualifiedIdentifierNode.pkl" +import "ObjectBodyNode.pkl" + +identifier: QualifiedIdentifierNode + +body: ObjectBodyNode? + +function render(currentIndent: String) = + "\(currentIndent)@\(identifier.render(currentIndent))" + + if (body == null) "" else " " + body.render(currentIndent) diff --git a/packages/pkl.experimental.syntax/ClassNode.pkl b/packages/pkl.experimental.syntax/ClassNode.pkl new file mode 100644 index 0000000..7bb65b8 --- /dev/null +++ b/packages/pkl.experimental.syntax/ClassNode.pkl @@ -0,0 +1,59 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.ClassNode + +extends "ClassOrModuleNode.pkl" + +import "Node.pkl" +import "IdentifierNode.pkl" +import "AnnotationNode.pkl" +import "DocCommentNode.pkl" +import "TypeNode.pkl" + +docComment: DocCommentNode? + +annotations: Listing? + +classHeader: ClassHeader + +class ClassHeader extends Node { + modifiers: Listing<"abstract"|"external"|"local"|"open">(isDistinct)? + name: IdentifierNode + extendsClause: ClassExtendsClause? + + function render(currentIndent: String) = currentIndent + new Listing { + modifiers?.join(" ") + "class \(name.render(currentIndent))" + extendsClause?.render(currentIndent) + "{" + }.toList().filterNonNull().join(" ") +} + +class ClassExtendsClause extends Node { + type: TypeNode.DeclaredTypeNode + + function render(currentIndent: String) = "extends " + type.render(currentIndent) +} + +function render(currentIndent: String) = List( + docComment?.render(currentIndent), + annotations?.toList()?.map((a) -> a.render(currentIndent))?.join("\n"), + classHeader.render(currentIndent), + super.render(currentIndent + indent), + "\(currentIndent)}" +) + .filterNonNull() + .join("\n") diff --git a/packages/pkl.experimental.syntax/ClassOrModuleNode.pkl b/packages/pkl.experimental.syntax/ClassOrModuleNode.pkl new file mode 100644 index 0000000..51742f6 --- /dev/null +++ b/packages/pkl.experimental.syntax/ClassOrModuleNode.pkl @@ -0,0 +1,120 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +abstract module pkl.experimental.syntax.ClassOrModuleNode + +extends "Node.pkl" + +import "Node.pkl" +import "DocCommentNode.pkl" +import "AnnotationNode.pkl" +import "ExpressionNode.pkl" +import "TypeAnnotationNode.pkl" +import "IdentifierNode.pkl" +import "ObjectBodyNode.pkl" +import "ParameterNode.pkl" + +/// The properties as defined in the class or module. +properties: Listing? + +/// The methods as defined in the class or module. +methods: Listing? + +/// A node representing the definition of a new property. +class PropertyDefinitionNode extends Node { + docComment: DocCommentNode? + annotations: Listing? + modifiers: Listing<"abstract"|"const"|"external"|"fixed"|"hidden"|"local">(isDistinct)? + name: IdentifierNode + typeAnnotation: TypeAnnotationNode? + + defaultValue: ExpressionNode? + + /// The object body for a property definition. + /// + /// * `foo: Bar { ... }` is invalid syntax. + /// * `foo = myBar { ... }` is technically valid syntax but should be acheived using an amends expression node. + objectBody: ObjectBodyNode(typeAnnotation == null && defaultValue == null)? + + local function renderPropertyLine(currentIndent: String) = currentIndent + List( + if (modifiers != null) modifiers.join(" ") + " " else null, + name.render(currentIndent), + typeAnnotation?.render(currentIndent), + if (defaultValue != null) " = " + defaultValue.render(currentIndent) else null, + if (objectBody != null) " " + objectBody.render(currentIndent) else null + ).filterNonNull().join("") + + function render(currentIndent: String) = List( + docComment?.render(currentIndent), + annotations?.toList()?.map((a) -> a.render(currentIndent))?.join("\n"), + renderPropertyLine(currentIndent) + ) + .filter((it) -> it != null && !it.isEmpty) + .join("\n") +} + +class MethodNode extends Node { + docComment: DocCommentNode? + annotations: Listing? + modifiers: Listing<"abstract"|"const"|"external"|"local">(isDistinct)? + name: IdentifierNode + // TODO typeParameters? + parameters: Listing + returnTypeAnnotation: TypeAnnotationNode? + body: ExpressionNode + + local function renderMethodHeader(currentIndent: String) = List( + currentIndent, + if (modifiers != null) modifiers.join(" ") + " " else null, + "function ", + name.render(currentIndent), + "(", + parameters?.toList()?.map((p) -> p.render(currentIndent)).join(", "), + ")", + returnTypeAnnotation?.render(currentIndent), + " =" + ).filterNonNull().join("") + + function renderMethodBody(currentIndent: String) = + let (header = renderMethodHeader(currentIndent)) + let (inlineBody = body.render(currentIndent)) + let (firstInlineBodyLine = inlineBody.takeWhile((it) -> !it.endsWith("\n"))) + if ("\(header) \(firstInlineBodyLine)".length <= maxColumnWidth) + "\(header) \(inlineBody)" + else + "\(header)\n\(currentIndent + indent)\(body.render(currentIndent + indent))" + + function render(currentIndent: String) = List( + docComment?.render(currentIndent), + annotations?.toList()?.map((a) -> a.render(currentIndent))?.join("\n"), + renderMethodBody(currentIndent) + ) + .filter((it) -> it != null && !it.isEmpty) + .join("\n") +} + +function renderProperties(currentIndent: String) = properties + ?.toList() + ?.map((p) -> p.render(currentIndent)) + ?.join("\n\n") + +function renderMethods(currentIndent: String) = methods + ?.toList() + ?.map((m) -> m.render(currentIndent)) + ?.join("\n\n") + +function render(currentIndent: String) = List(renderProperties(currentIndent), renderMethods(currentIndent)) + .filterNonNull() + .join("\n\n") diff --git a/packages/pkl.experimental.syntax/DocCommentNode.pkl b/packages/pkl.experimental.syntax/DocCommentNode.pkl new file mode 100644 index 0000000..542d2b0 --- /dev/null +++ b/packages/pkl.experimental.syntax/DocCommentNode.pkl @@ -0,0 +1,82 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.DocCommentNode + +extends "Node.pkl" + +/// The doc comment itself, without the comment tokens (`///`). +value: String + +local function wrapTextImpl(words: List, width: Int) = + words.fold(Pair("", ""), (pair, word) -> + let (result: String = pair.first) + let (currentLine: String = pair.second) + if (currentLine.length + word.length > width) + Pair( + if (result == "") currentLine + else result + "\n" + currentLine, + word + ) + else + Pair( + result, + if (currentLine == "") word + else currentLine + " " + word + ) + ) + +/// Wrap [text] at the specified column [width]. +local function wrapParagraph(text: String, width: Int) = + let (words = text.split(Regex("\\s+"))) + let (result = words.fold(Pair("", ""), (pair, word) -> + let (aggregate: String = pair.first) + let (currentLine: String = pair.second) + if (currentLine.length + word.length > width) + Pair( + if (aggregate == "") currentLine + else aggregate + "\n" + currentLine, + word + ) + else + Pair( + aggregate, + if (currentLine == "") word + else currentLine + " " + word + ) + )) + List(result.first, result.second).filter((elem) -> !elem.isEmpty).join("\n") + +local function wrapText(text: String, width: Int) = + let (paragraphs = text.split("\n\n")) + paragraphs + .map((it) -> wrapParagraph(it, width)) + .join("\n\n") + +/// Configures this node to wrap lines when rendering. +hidden autoWrap: Boolean = false + +function render(currentIndent: String) = + let (wrapped = + if (autoWrap) wrapText(value, maxColumnWidth - currentIndent.length) + else value + ) + wrapped + .split("\n") + .map((line) -> + if (line.isEmpty) "\(currentIndent)///" + else "\(currentIndent)/// \(line)" + ) + .join("\n") diff --git a/packages/pkl.experimental.syntax/ExpressionNode.pkl b/packages/pkl.experimental.syntax/ExpressionNode.pkl new file mode 100644 index 0000000..3d582b6 --- /dev/null +++ b/packages/pkl.experimental.syntax/ExpressionNode.pkl @@ -0,0 +1,261 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +abstract module pkl.experimental.syntax.ExpressionNode + +extends "Node.pkl" + +import "ExpressionNode.pkl" +import "ObjectBodyNode.pkl" +import "IdentifierNode.pkl" +import "TypeNode.pkl" +import "operators.pkl" + +typealias CompoundExpressionNode = BinaryOperatorExpressionNode|PrefixOperatorExpressionNode|IfElseExpressionNode + +class BinaryOperatorExpressionNode extends ExpressionNode { + local precedences: Mapping = new { + [operators.MULTIPLY] = 1 + [operators.DIVIDE] = 1 + [operators.INTEGER_DIVIDE] = 1 + [operators.MODULO] = 1 + [operators.PLUS] = 2 + [operators.BINARY_MINUS] = 2 + [operators.GREATER_THAN] = 3 + [operators.LESS_THAN] = 3 + [operators.GREATER_THAN_OR_EQUALS] = 3 + [operators.LESS_THAN_OR_EQUALS] = 3 + [operators.IS] = 4 + [operators.AS] = 4 + [operators.EQUALS] = 5 + [operators.NOT_EQUALS] = 5 + [operators.AND] = 6 + [operators.OR] = 7 + [operators.PIPE] = 8 + } + + local function hasHigherPrecedence(a: String, b: String) = precedences[a] >= precedences[b] + + operator: operators.BinaryOperator = Undefined() + + /// The right hand side of the expression + lhs: ExpressionNode + + /// The left hand side of the expression + rhs: ExpressionNode + + local function renderChild(child: ExpressionNode, currentIndent: String) = + let (rendered = child.render(currentIndent)) + if ( + !(child is CompoundExpressionNode) + || child is BinaryOperatorExpressionNode && hasHigherPrecedence(operator, child.operator) + ) + rendered + else + parenthesize(rendered) + + function render(currentIndent: String) = + "\(renderChild(lhs, currentIndent)) \(operator) \(renderChild(rhs, currentIndent))" +} + +class BuiltInKeywordExpressionNode extends ExpressionNode { + keyword: "this"|"outer"|"module" = Undefined() + + function render(_) = keyword +} + +/// Unary operators in the prefix position: "!" and "-". +class PrefixOperatorExpressionNode extends ExpressionNode { + operator: operators.PrefixOperator = Undefined() + + expression: ExpressionNode + + function render(currentIndent: String) = + let (body = expression.render(currentIndent)) + if (expression is CompoundExpressionNode) operator + parenthesize(body) + else operator + body +} + +/// Unary operators in the postfix position: "!!" +class PostfixOperatorExpressionNode extends ExpressionNode { + operator: operators.PostfixOperator = Undefined() + + expression: ExpressionNode + + function render(currentIndent: String) = + let (body = expression.render(currentIndent)) + if (expression is CompoundExpressionNode) parenthesize(body) + operator + else body + operator +} + +class MemberAccessExpressionNode extends ExpressionNode { + identifier: IdentifierNode + + arguments: Listing? + + function renderArguments(currentIndent: String) = + if (arguments == null) "" + else + let (args = arguments.toList().map((arg) -> arg.render(currentIndent))) + parenthesize(args.join(", ")) + + function render(currentIndent: String) = "\(identifier.render(currentIndent))\(renderArguments(currentIndent))" +} + +/// Can either be an identifer or a function call. +/// +/// ``` +/// foo.bar +/// foo.bar() +/// ``` +class QualifiedMemberAccessExpressionNode extends ExpressionNode { + lhs: ExpressionNode + + symbol: "."|"?." = "." + + rhs: MemberAccessExpressionNode + + local function renderLhs(currentIndent: String) = + if (lhs is CompoundExpressionNode) + parenthesize(lhs.render(currentIndent)) + else lhs.render(currentIndent) + + function render(currentIndent: String) = "\(renderLhs(currentIndent))\(symbol)\(rhs.render(currentIndent))" +} + +class SuperAccessExpressionNode extends ExpressionNode { + rhs: MemberAccessExpressionNode + + function render(currentIndent) = "super.\(rhs.render(currentIndent))" +} + +class SuperSubscriptExpressionNode extends ExpressionNode { + subscript: ExpressionNode + + function render(currentIndent) = "super[\(subscript.render(currentIndent))]" +} + +class SubscriptExpressionNode extends ExpressionNode { + lhs: ExpressionNode + + subscript: ExpressionNode + + local function renderLhs(currentIndent: String) = + if (lhs is CompoundExpressionNode) + parenthesize(lhs.render(currentIndent)) + else lhs.render(currentIndent) + + function render(currentIndent: String): String = "\(renderLhs(currentIndent))[\(subscript.render(currentIndent))]" +} + +/// Node representing a literal value; e.g. `true` and `"hello"`. +class LiteralValueExpressionNode extends ExpressionNode { + value: String|Boolean|Number|Null + + function render(_) = pcfRenderer.renderValue(value) +} + +/// Node representing `new { ... }` or `new Type { ... }`. +class ObjectExpressionNode extends ExpressionNode { + body: ObjectBodyNode + + type: TypeNode? + + function render(currentIndent: String) = List( + "new", + type?.render(currentIndent), + body.render(currentIndent) + ).filterNonNull().join(" ") +} + +/// `if (...) ... else ...` +/// +/// To express `else if`, [elseBranch] should also be an [IfElseExpressionNode]. +class IfElseExpressionNode extends ExpressionNode { + condition: ExpressionNode + + ifBranch: ExpressionNode + + elseBranch: ExpressionNode + + function renderElseBranch(currentIndent: String) = + if (elseBranch is IfElseExpressionNode) + "\(currentIndent)else \(elseBranch.render(currentIndent))" + else + """ + \(currentIndent)else + \(currentIndent + indent)\(elseBranch.render(currentIndent + indent)) + """ + + function render(currentIndent: String) = """ + if \(parenthesize(condition.render(currentIndent))) + \(currentIndent + indent)\(ifBranch.render(currentIndent)) + \(renderElseBranch(currentIndent)) + """ +} + +class ImportExpressionNode extends ExpressionNode { + value: String + + keyword: "import"|"import*" = "import" + + fixed stringLiteralNode: LiteralValueExpressionNode = new { value = outer.value } + + function render(currentIndent: String) = "\(keyword)(\(stringLiteralNode.render(currentIndent)))" +} + +class ReadExpressionNode extends ExpressionNode { + value: ExpressionNode + + keyword: "read"|"read*"|"read?" = "read" + + function render(currentIndent: String) = "\(keyword)(\(value.render(currentIndent)))" +} + +class TraceExpressionNode extends ExpressionNode { + value: ExpressionNode + + function render(currentIndent: String) = "trace(\(value.render(currentIndent)))" +} + +/// Provides interpolation and multi-line strings that are not available using [LiteralValueExpressionNode]. +class StringExpressionNode extends ExpressionNode { + isMultiLine: Boolean = false + + stringParts: Listing + + function renderStringContents(currentIndent: String) = new Listing { + for (part in stringParts) { + when (part is ExpressionNode) { + #"\("# + part.render(currentIndent) + ")" + } else { + if (isMultiLine) part.replaceAll("\n", "\n\(currentIndent)") + else part.replaceAll("\n", "\\n") + } + } + }.join("") + + function render(currentIndent: String) = + if (!isMultiLine) #""\#(renderStringContents(currentIndent))""# + else + let (newIndent = currentIndent + indent) + new Listing { + "\"\"\"" + newIndent + renderStringContents(newIndent) + "\(newIndent)\"\"\"" + }.join("\n") +} diff --git a/packages/pkl.experimental.syntax/IdentifierNode.pkl b/packages/pkl.experimental.syntax/IdentifierNode.pkl new file mode 100644 index 0000000..5878001 --- /dev/null +++ b/packages/pkl.experimental.syntax/IdentifierNode.pkl @@ -0,0 +1,80 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// A token representing a name. +/// +/// Identifiers get surrounded by backticks (`) if they conflict with a keyword, or if +/// they don't match the normal identifier pattern. +@Unlisted +module pkl.experimental.syntax.IdentifierNode + +extends "Node.pkl" + +/// The raw value of the identifier. +/// +/// It's not possible to encode a backtick into an identifier. +value: String(!contains("`")) + +local keywords: Set = new Listing { + "abstract" + "amends" + "as" + "case" + "class" + "const" + "delete" + "else" + "extends" + "external" + "false" + "final" + "fixed" + "for" + "function" + "hidden" + "if" + "import" + "in" + "is" + "let" + "local" + "module" + "new" + "nothing" + "null" + "open" + "out" + "outer" + "override" + "protected" + "read" + "record" + "super" + "switch" + "this" + "throw" + "trace" + "true" + "typealias" + "unknown" + "vararg" + "when" +}.toSet() + +function render(_) = + if (value.matches(Regex(#"[A-Za-z$_][A-Za-z\d$_]*"#)) && !keywords.contains(value)) + value + else + "`" + value + "`" diff --git a/packages/pkl.experimental.syntax/ModuleNode.pkl b/packages/pkl.experimental.syntax/ModuleNode.pkl new file mode 100644 index 0000000..333a293 --- /dev/null +++ b/packages/pkl.experimental.syntax/ModuleNode.pkl @@ -0,0 +1,125 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Node representing a Pkl module. +@Unlisted +module pkl.experimental.syntax.ModuleNode + +extends "ClassOrModuleNode.pkl" + +import "AnnotationNode.pkl" +import "DocCommentNode.pkl" +import "Node.pkl" +import "ClassNode.pkl" +import "QualifiedIdentifierNode.pkl" +import "TypeAliasNode.pkl" + +/// The top-level section of a module +/// +/// E.g. `module com.package.MyModule` +declaration: ModuleDeclarationNode? + +/// The list of imports for a module. +imports: Listing? + +/// The classes as defined in the module. +classes: Listing? + +/// The typealiases as defined in the module. +typealiases: Listing? + +class ModuleDeclarationNode extends Node { + /// Content for the first line of the module following "#!" + /// Enables modules to be evaluated as executable scripts + /// + /// Example: "/usr/bin/env pkl eval" + shebang: String? + + /// Annotations for the module itself, for instance, `@ModuleInfo` + annotations: Listing? + + docComment: DocCommentNode? + + moduleHeader: ModuleHeaderNode? + + function render(currentIndent: String) = + List( + shebang.ifNonNull((it) -> "#!\(it)"), + docComment?.render(currentIndent), + annotations?.toList()?.map((a) -> a.render(currentIndent))?.join("\n"), + moduleHeader?.render(currentIndent) + ).filterNonNull().join("\n") +} + +local const renderer = new PcfRenderer {} + +class ModuleHeaderNode extends Node { + modifiers: Listing<"abstract"|"open">(isDistinct, name != null)? + + name: QualifiedIdentifierNode? + + moduleExtendsOrAmendsClause: ModuleExtendsOrAmendsClauseNode? + + function render(header) = + let (moduleHeader = new Listing { + when (modifiers != null) { + modifiers.join(" ") + " " + } + when (name != null) { + "module " + name.render(header) + } + }.join("")) + List( + if (moduleHeader.isEmpty) null else moduleHeader, + moduleExtendsOrAmendsClause?.render() + ).filterNonNull().join("\n\n") +} + +class ModuleExtendsOrAmendsClauseNode extends Node { + type: "extends"|"amends" = Undefined() + extendedModule: String + function render() = + """ + \(type) \(renderer.renderValue(extendedModule)) + """ +} + +class ImportNode extends Node { + value: String + alias: String? + keyword: "import"|"import*" = "import" + function render() = new Listing { + keyword + " " + renderer.renderValue(value) + when (alias != null) { + " as " + alias + } + }.join("") +} + +function renderImports() = + (imports?.toList() ?? List()).map((it) -> it.render()).join("\n") + +function render(currentIndent: String) = List( + declaration?.render(currentIndent), + renderImports(), + super.render(currentIndent), + classes?.toList()?.map((c) -> c.render(currentIndent))?.join("\n\n"), + typealiases?.toList()?.map((t) -> t.render(currentIndent))?.join("\n\n") +) + .filter((line) -> line != null && !line.isEmpty) + .join("\n\n") + "\n" diff --git a/packages/pkl.experimental.syntax/Node.pkl b/packages/pkl.experimental.syntax/Node.pkl new file mode 100644 index 0000000..caa454e --- /dev/null +++ b/packages/pkl.experimental.syntax/Node.pkl @@ -0,0 +1,37 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// An abstraction for a Pkl code snippet. +/// +/// Nodes are used for building syntax trees, which can be used to render Pkl source code. +@ModuleInfo { minPklVersion = "0.25.0" } +@Unlisted +abstract module pkl.experimental.syntax.Node + +hidden pcfRenderer: ValueRenderer = new PcfRenderer { useCustomStringDelimiters = true } + +/// The indentation when rendering. +hidden indent: String = " " + +/// The max column width to use when rendering. +hidden maxColumnWidth: Int = 100 + +abstract function render(currentIndent: String): String + +function parenthesize(str: String) = "(" + str + ")" + +output { + text = render("") +} diff --git a/packages/pkl.experimental.syntax/ObjectBodyNode.pkl b/packages/pkl.experimental.syntax/ObjectBodyNode.pkl new file mode 100644 index 0000000..89fb7bd --- /dev/null +++ b/packages/pkl.experimental.syntax/ObjectBodyNode.pkl @@ -0,0 +1,132 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Nodes with curly braces, e.g. { foo = "bar" }. +/// +/// Objects can have properties, entries, and elements. +/// ``` +/// myObj = new { +/// // property +/// foo = "bar" +/// +/// // entry +/// ["baz"] = "biz" +/// +/// // element +/// 5 +/// } +/// ``` +/// +/// To do: For generators, when generators, object spread, member predicate +@Unlisted +module pkl.experimental.syntax.ObjectBodyNode + +extends "Node.pkl" + +import "IdentifierNode.pkl" +import "Node.pkl" +import "ObjectBodyNode.pkl" +import "ExpressionNode.pkl" +import "DocCommentNode.pkl" +import "ParameterNode.pkl" + +members: Listing + +/// Renders all members inline instead of on newlines +renderInline: Boolean = false + +typealias MemberNode = PropertyMemberNode|ElementMemberNode|EntryMemberNode|ForGeneratorNode|WhenGeneratorNode + +class PropertyMemberNode extends Node { + docComment: DocCommentNode? + + modifiers: Listing<"local">(isDistinct)? + + propertyName: IdentifierNode + + body: ObjectBodyNode? + + // The right hand side of a property cannot be both an assignment and an object body (e.g. `foo = { ... }` is illegal) + assignment: ExpressionNode(body == null)? + + local function renderRhs(currentIndent: String) = if (body != null) body.render(currentIndent) else "= " + assignment.render(currentIndent) + + local function renderPropertyLine(currentIndent: String) = new Listing { + ...?modifiers + propertyName.render(currentIndent) + renderRhs(currentIndent) + }.join(" ") + + function render(currentIndent: String) = List( + docComment?.render(currentIndent), + renderPropertyLine(currentIndent) + ) + .filterNonNull() + .join("\n") +} + +class ElementMemberNode extends Node { + value: ExpressionNode + + function render(currentIndent: String) = value.render(currentIndent) +} + +class EntryMemberNode extends Node { + keyValue: ExpressionNode + + body: ObjectBodyNode? + + // The right hand side of an entry cannot be both an assignment and an object body (e.g. `foo = { ... }` is illegal) + propertyValue: ExpressionNode(body == null)? + + local function renderRhs(currentIndent: String) = if (body != null) body.render(currentIndent) else "= " + propertyValue.render(currentIndent) + + function render(currentIndent: String) = "[\(keyValue.render(currentIndent))] \(renderRhs(currentIndent))" +} + +class ForGeneratorNode extends Node { + keyParameter: ParameterNode? + + valueParameter: ParameterNode + + collection: ExpressionNode + + body: ObjectBodyNode? + + function doRenderLoopControl(currentIndent: String): String = List( + keyParameter?.render(currentIndent)?.ifNonNull((it) -> it + ","), + valueParameter.render(currentIndent), + "in", + collection.render(currentIndent) + ).filterNonNull().join(" ") + + function render(currentIndent: String): String = "for (\(doRenderLoopControl(currentIndent))) \(body.render(currentIndent))" +} + +class WhenGeneratorNode extends Node { + condition: ExpressionNode + + body: ObjectBodyNode? + + function render(currentIndent: String): String = "when (\(condition.render(currentIndent))) \(body.render(currentIndent))" +} + +local function doRenderInline(currentIndent: String) = "{ " + members.toList().map((m) -> m.render(currentIndent)).join("; ") + " }" + +local function doRenderMultiline(currentIndent: String) = + let (innerIndent = currentIndent + indent) + "{\n" + members.toList().map((m) -> innerIndent + m.render(innerIndent)).join("\n") + "\n\(currentIndent)}" + +function render(currentIndent: String) = if (renderInline) doRenderInline(currentIndent) else doRenderMultiline(currentIndent) diff --git a/packages/pkl.experimental.syntax/ParameterNode.pkl b/packages/pkl.experimental.syntax/ParameterNode.pkl new file mode 100644 index 0000000..2513c2a --- /dev/null +++ b/packages/pkl.experimental.syntax/ParameterNode.pkl @@ -0,0 +1,33 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Node representing a method or function argument declaration. +@Unlisted +module pkl.experimental.syntax.ParameterNode + +extends "Node.pkl" + +import "TypeAnnotationNode.pkl" +import "IdentifierNode.pkl" + +/// The name of the parameter. May be `_` for an unbound parameter. +name: IdentifierNode + +/// The type of the parameter. +typeAnnotation: TypeAnnotationNode? + +function render(currentIndent: String) = List(name.render(currentIndent), typeAnnotation?.render(currentIndent)) + .filterNonNull() + .join("") diff --git a/packages/pkl.experimental.syntax/PklProject b/packages/pkl.experimental.syntax/PklProject new file mode 100644 index 0000000..de51cff --- /dev/null +++ b/packages/pkl.experimental.syntax/PklProject @@ -0,0 +1,22 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// Experimental Pkl syntax library +amends "../basePklProject.pkl" + +package { + version = "1.0.0" + apiTests = import*("tests/*.pkl").keys.toListing() +} diff --git a/packages/pkl.experimental.syntax/PklProject.deps.json b/packages/pkl.experimental.syntax/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/pkl.experimental.syntax/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/pkl.experimental.syntax/QualifiedIdentifierNode.pkl b/packages/pkl.experimental.syntax/QualifiedIdentifierNode.pkl new file mode 100644 index 0000000..2cb197d --- /dev/null +++ b/packages/pkl.experimental.syntax/QualifiedIdentifierNode.pkl @@ -0,0 +1,26 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// One or more identifiers, separated by dots, e.g. `foo.bar.baz` +@Unlisted +module pkl.experimental.syntax.QualifiedIdentifierNode + +extends "Node.pkl" + +import "IdentifierNode.pkl" + +parts: Listing(!isEmpty) + +function render(currentIndent: String) = parts.toList().map((p) -> p.render(currentIndent)).join(".") diff --git a/packages/pkl.experimental.syntax/TypeAliasNode.pkl b/packages/pkl.experimental.syntax/TypeAliasNode.pkl new file mode 100644 index 0000000..3410393 --- /dev/null +++ b/packages/pkl.experimental.syntax/TypeAliasNode.pkl @@ -0,0 +1,47 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.TypeAliasNode + +extends "Node.pkl" + +import "IdentifierNode.pkl" +import "TypeNode.pkl" +import "DocCommentNode.pkl" +import "AnnotationNode.pkl" + +docComment: DocCommentNode? + +name: IdentifierNode + +type: TypeNode + +annotations: Listing? + +modifiers: Listing<"external"|"local">(isDistinct)? + +local function renderAlias(currentIndent: String) = new Listing { + ...?modifiers + "typealias" + name.render(currentIndent) + "=" + type.render(currentIndent) +}.join(" ") + +function render(currentIndent: String) = List( + docComment?.render(currentIndent), + annotations?.toList()?.map((an) -> an.render(currentIndent))?.join("\n"), + currentIndent + renderAlias(currentIndent) +).filterNonNull().join("\n") diff --git a/packages/pkl.experimental.syntax/TypeAnnotationNode.pkl b/packages/pkl.experimental.syntax/TypeAnnotationNode.pkl new file mode 100644 index 0000000..d7da91a --- /dev/null +++ b/packages/pkl.experimental.syntax/TypeAnnotationNode.pkl @@ -0,0 +1,24 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.TypeAnnotationNode + +extends "Node.pkl" + +import "TypeNode.pkl" + +type: TypeNode + +function render(currentIndent: String) = ": \(type.render(currentIndent))" diff --git a/packages/pkl.experimental.syntax/TypeNode.pkl b/packages/pkl.experimental.syntax/TypeNode.pkl new file mode 100644 index 0000000..c626d6a --- /dev/null +++ b/packages/pkl.experimental.syntax/TypeNode.pkl @@ -0,0 +1,84 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +abstract module pkl.experimental.syntax.TypeNode + +extends "Node.pkl" + +import "QualifiedIdentifierNode.pkl" +import "ExpressionNode.pkl" +import "TypeNode.pkl" + +class NullableTypeNode extends TypeNode { + typeNode: TypeNode + function render(currentIndent: String) = + let (underlyingRendered = if (typeNode is UnionTypeNode) parenthesize(typeNode.render(currentIndent)) else typeNode.render(currentIndent)) + underlyingRendered + "?" +} + +class BuiltInTypeNode extends TypeNode { + type: "unknown"|"nothing"|"module" = Undefined() + function render(_) = type +} + +class StringLiteralTypeNode extends TypeNode { + value: String + + function render(_) = pcfRenderer.renderValue(value) +} + +class DeclaredTypeNode extends TypeNode { + name: QualifiedIdentifierNode + + typeArguments: Listing + + local function renderTypeArguments(currentIndent: String) = + if (typeArguments.isEmpty) "" + else "<" + typeArguments.toList().map((t) -> t.render(currentIndent)).join(", ") + ">" + + function render(currentIndent: String) = name.render(currentIndent) + renderTypeArguments(currentIndent) +} + +class ConstrainedTypeNode extends TypeNode { + /// The underlying type + typeNode: TypeNode + + constraints: Listing + + local function renderConstraints(currentIndent: String) = + "(" + constraints.toList().map((c) -> c.render(currentIndent)).join(", ") + ")" + + function render(currentIndent: String) = + let (renderedUnderlyingType = if (typeNode is UnionTypeNode) parenthesize(typeNode.render(currentIndent)) else typeNode.render(currentIndent)) + renderedUnderlyingType + renderConstraints(currentIndent) +} + +class UnionTypeNode extends TypeNode { + members: Listing + + function render(currentIndent: String) = + let (childrenRendered = members.toList().map((t) -> t.render(currentIndent))) + // Multiline if length exceeds `maxColumnWidth` chars. + // If multiline, indent one level deeper. + if (childrenRendered.fold(0, (acc, elem) -> acc + elem.length) > maxColumnWidth) + "\n\(currentIndent + indent)" + childrenRendered.join("\n\(currentIndent + indent)|") + else + childrenRendered.join("|") +} + +// TODO fill me in +class FunctionTypeNode extends TypeNode { + +} diff --git a/packages/pkl.experimental.syntax/operators.pkl b/packages/pkl.experimental.syntax/operators.pkl new file mode 100644 index 0000000..4fd2534 --- /dev/null +++ b/packages/pkl.experimental.syntax/operators.pkl @@ -0,0 +1,139 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.operators + +typealias Multiply = "*" + +typealias Divide = "/" + +typealias Plus = "+" + +typealias BinaryMinus = "-" + +typealias Modulo = "%" + +typealias GreaterThan = ">" + +typealias LessThan = "<" + +typealias GreaterThanOrEquals = ">=" + +typealias LessThanOrEquals = "<=" + +typealias IntegerDivide = "~/" + +typealias Equals = "==" + +typealias NotEquals = "!=" + +typealias NullishCoalesce = "??" + +typealias Pipe = "|>" + +typealias BitwiseAnd = "&" + +typealias BitwiseOr = "|" + +typealias Exponent = "**" + +typealias Is = "is" + +typealias As = "as" + +typealias And = "&&" + +typealias Or = "||" + +typealias Not = "!" + +typealias UnaryMinus = "-" + +typealias NonNullAssertion = "!!" + +typealias BinaryOperator = + Multiply + |Divide + |Plus + |BinaryMinus + |Modulo + |GreaterThan + |GreaterThanOrEquals + |LessThan + |LessThanOrEquals + |IntegerDivide + |Equals + |NotEquals + |NullishCoalesce + |Pipe + |BitwiseAnd + |BitwiseOr + |Exponent + |Is + |As + |And + |Or + +typealias PrefixOperator = Not|UnaryMinus + +typealias PostfixOperator = NonNullAssertion + +MULTIPLY: Multiply + +DIVIDE: Divide + +PLUS: Plus + +BINARY_MINUS: BinaryMinus + +MODULO: Modulo + +GREATER_THAN: GreaterThan + +LESS_THAN: LessThan + +GREATER_THAN_OR_EQUALS: GreaterThanOrEquals + +LESS_THAN_OR_EQUALS: LessThanOrEquals + +INTEGER_DIVIDE: IntegerDivide + +EQUALS: Equals + +NOT_EQUALS: NotEquals + +NULLISH_COALESCE: NullishCoalesce + +PIPE: Pipe + +BITWISE_AND: BitwiseAnd + +BITWISE_OR: BitwiseOr + +EXPONENT: Exponent + +IS: Is + +AS: As + +AND: And + +OR: Or + +NOT: Not + +NON_NULL_ASSERTION: NonNullAssertion + +UNARY_MINUS: UnaryMinus diff --git a/packages/pkl.experimental.syntax/tests/ClassNode.pkl b/packages/pkl.experimental.syntax/tests/ClassNode.pkl new file mode 100644 index 0000000..a0fdaa5 --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/ClassNode.pkl @@ -0,0 +1,48 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.tests.ClassNode + +amends "pkl:test" + +import "../ClassNode.pkl" + +facts { + ["extending"] { + new ClassNode { + classHeader { + name { + value = "MyClass" + } + extendsClause { + type { + name { + parts { + new { + value = "Base" + } + } + } + } + } + } + + }.render("") == """ + class MyClass extends Base { + + } + """ + } +} diff --git a/packages/pkl.experimental.syntax/tests/ClassOrModuleNode.pkl b/packages/pkl.experimental.syntax/tests/ClassOrModuleNode.pkl new file mode 100644 index 0000000..45c0d24 --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/ClassOrModuleNode.pkl @@ -0,0 +1,84 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.tests.ClassOrModuleNode + +amends "pkl:test" + +import "../ClassOrModuleNode.pkl" +import "../TypeNode.pkl" + +facts { + ["property definitions"] { + new ClassOrModuleNode.PropertyDefinitionNode { + name { + value = "myProperty" + } + typeAnnotation { + type = new TypeNode.StringLiteralTypeNode { + value = "my string" + } + } + }.render("") == """ + myProperty: "my string" + """ + } + ["property definitions - annotations"] { + local prop = new ClassOrModuleNode.PropertyDefinitionNode { + name { + value = "myProperty" + } + typeAnnotation { + type = new TypeNode.DeclaredTypeNode { + name { + parts { + new { value = "MyType" } + } + } + } + } + docComment { + value = """ + This has been deprecated + + because of stuff. + """ + } + annotations { + new { + identifier { + parts { + new { value = "Deprecated" } + } + } + } + } + } + prop.render("") == """ + /// This has been deprecated + /// + /// because of stuff. + @Deprecated + myProperty: MyType + """ + prop.render(" ") == """ + /// This has been deprecated + /// + /// because of stuff. + @Deprecated + myProperty: MyType + """ + } +} diff --git a/packages/pkl.experimental.syntax/tests/ExpressionNode.pkl b/packages/pkl.experimental.syntax/tests/ExpressionNode.pkl new file mode 100644 index 0000000..7f147ce --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/ExpressionNode.pkl @@ -0,0 +1,225 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.tests.ExpressionNode + +amends "pkl:test" + +import "../ExpressionNode.pkl" + +examples { + ["binary operators - basic"] { + new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.LiteralValueExpressionNode { + value = "foo" + } + operator = "+" + rhs = new ExpressionNode.LiteralValueExpressionNode { + value = "bar" + } + }.render("") + } + ["binary operators - precedence: no parentheses when child precedence is higher"] { + new ExpressionNode.BinaryOperatorExpressionNode { + operator = "+" + lhs = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + rhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + operator = "*" + } + rhs = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + rhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + operator = "*" + } + }.render("") + } + ["binary operators - precedence: parenthese added when child precedence is lower"] { + new ExpressionNode.BinaryOperatorExpressionNode { + operator = "*" + lhs = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + rhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + operator = "+" + } + rhs = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + rhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + operator = "+" + } + }.render("") + } + ["unary operators"] { + new ExpressionNode.PrefixOperatorExpressionNode { + operator = "!" + expression = new ExpressionNode.LiteralValueExpressionNode { value = false } + }.render("") + } + ["unary operators - parenthesizes binary expressions"] { + new ExpressionNode.PrefixOperatorExpressionNode { + operator = "-" + expression = new ExpressionNode.BinaryOperatorExpressionNode { + operator = "+" + lhs = new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + rhs = new ExpressionNode.LiteralValueExpressionNode { + value = 10 + } + } + }.render("") + } + ["member access - single identifier"] { + new ExpressionNode.MemberAccessExpressionNode { + identifier { + value = "foo" + } + }.render("") + } + ["member access - method call"] { + new ExpressionNode.MemberAccessExpressionNode { + identifier { + value = "foo" + } + arguments { + new ExpressionNode.MemberAccessExpressionNode { + identifier { + value = "bar" + } + } + new ExpressionNode.MemberAccessExpressionNode { + identifier { + value = "biz" + } + } + } + }.render("") + } + ["if/else"] { + new ExpressionNode.IfElseExpressionNode { + condition = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.MemberAccessExpressionNode { + identifier { + value = "foo" + } + } + operator = "==" + rhs = new ExpressionNode.LiteralValueExpressionNode { + value = "foo" + } + } + ifBranch = new ExpressionNode.LiteralValueExpressionNode { value = "foo" } + elseBranch = new ExpressionNode.LiteralValueExpressionNode { value = "bar" } + }.render("") + + new ExpressionNode.IfElseExpressionNode { + condition = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.MemberAccessExpressionNode { + identifier { + value = "foo" + } + } + operator = "==" + rhs = new ExpressionNode.LiteralValueExpressionNode { + value = "foo" + } + } + ifBranch = new ExpressionNode.LiteralValueExpressionNode { value = "foo" } + elseBranch = new ExpressionNode.LiteralValueExpressionNode { value = "bar" } + }.render(" ") + } + ["super access"] { + new ExpressionNode.SuperAccessExpressionNode { + rhs { + identifier { value = "someProperty" } + } + }.render("") + new ExpressionNode.SuperAccessExpressionNode { + rhs { + identifier { value = "someProperty" } + arguments { + new ExpressionNode.LiteralValueExpressionNode { value = true } + } + } + }.render("") + } + ["super subscript"] { + new ExpressionNode.SuperSubscriptExpressionNode { + subscript = new ExpressionNode.LiteralValueExpressionNode { value = "test" } + }.render("") + } + ["subscript"] { + new ExpressionNode.SubscriptExpressionNode { + lhs = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "test" } + } + subscript = new ExpressionNode.LiteralValueExpressionNode { value = "key" } + }.render("") + new ExpressionNode.SubscriptExpressionNode { + lhs = new ExpressionNode.IfElseExpressionNode { + condition = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "test" } + } + ifBranch = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "testTrue" } + } + elseBranch = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "testFalse" } + } + } + subscript = new ExpressionNode.BinaryOperatorExpressionNode { + operator = "+" + lhs = new ExpressionNode.LiteralValueExpressionNode { value = "test" } + rhs = new ExpressionNode.LiteralValueExpressionNode { value = "Key" } + } + }.render("") + } + ["read"] { + new ExpressionNode.ReadExpressionNode { + value = new ExpressionNode.LiteralValueExpressionNode { value = "env:HOME" } + }.render("") + new ExpressionNode.ReadExpressionNode { + keyword = "read*" + value = new ExpressionNode.LiteralValueExpressionNode { value = "env:HOME" } + }.render("") + new ExpressionNode.ReadExpressionNode { + keyword = "read?" + value = new ExpressionNode.LiteralValueExpressionNode { value = "env:HOME" } + }.render("") + } + ["trace"] { + new ExpressionNode.TraceExpressionNode { + value = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "test" } + } + }.render("") + } +} diff --git a/packages/pkl.experimental.syntax/tests/ExpressionNode.pkl-expected.pcf b/packages/pkl.experimental.syntax/tests/ExpressionNode.pkl-expected.pcf new file mode 100644 index 0000000..c090987 --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/ExpressionNode.pkl-expected.pcf @@ -0,0 +1,61 @@ +examples { + ["binary operators - basic"] { + "\"foo\" + \"bar\"" + } + ["binary operators - precedence: no parentheses when child precedence is higher"] { + "5 * 5 + 5 * 5" + } + ["binary operators - precedence: parenthese added when child precedence is lower"] { + "(5 + 5) * (5 + 5)" + } + ["unary operators"] { + "!false" + } + ["unary operators - parenthesizes binary expressions"] { + "-(5 + 10)" + } + ["member access - single identifier"] { + "foo" + } + ["member access - method call"] { + "foo(bar, biz)" + } + ["if/else"] { + """ + if (foo == "foo") + "foo" + else + "bar" + """ + """ + if (foo == "foo") + "foo" + else + "bar" + """ + } + ["super access"] { + "super.someProperty" + "super.someProperty(true)" + } + ["super subscript"] { + "super[\"test\"]" + } + ["subscript"] { + "test[\"key\"]" + """ + (if (test) + testTrue + else + testFalse)["test" + "Key"] + """ + } + ["read"] { + "read(\"env:HOME\")" + "read*(\"env:HOME\")" + "read?(\"env:HOME\")" + } + ["trace"] { + "trace(test)" + } +} diff --git a/packages/pkl.experimental.syntax/tests/ModuleNode.pkl b/packages/pkl.experimental.syntax/tests/ModuleNode.pkl new file mode 100644 index 0000000..de09f3d --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/ModuleNode.pkl @@ -0,0 +1,170 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.tests.ModuleNode + +amends "pkl:test" + +import "../ModuleNode.pkl" +import "../TypeNode.pkl" +import "../ExpressionNode.pkl" +import "../ObjectBodyNode.pkl" +import "../operators.pkl" + +facts { + ["new line at the end"] { + new ModuleNode { + declaration { + moduleHeader { + name { + parts { + new { value = "modulename" } + } + } + } + } + }.output.text == """ + module modulename + + """ + } +} + +examples { + ["full declaration"] { + new ModuleNode { + declaration { + shebang = "/usr/bin/env pkl eval" + annotations { + new { + identifier { parts { new { value = "test" } } } + body { + renderInline = true + members { + new ObjectBodyNode.PropertyMemberNode { + propertyName { value = "annotation" } + assignment = new ExpressionNode.LiteralValueExpressionNode { value = true } + } + } + } + } + } + docComment { + value = """ + This is a module doc comment! + """ + } + moduleHeader { + modifiers { "open" } + name { + parts { + new { value = "pkl" } + new { value = "experimental" } + new { value = "syntax" } + new { value = "tests" } + new { value = "ModuleNode" } + } + } + moduleExtendsOrAmendsClause { + type = "amends" + extendedModule = "pkl:test" + } + } + } + }.output.text + } + ["short methods are rendered inline"] { + new ModuleNode { + methods { + new { + docComment { value = "Adds five to its argument." } + modifiers { "const"; "local" } + name { value = "addFive" } + parameters { + new { + name { value = "input" } + typeAnnotation { + type = new TypeNode.DeclaredTypeNode { name { parts { new { value = "Int" } } } } + } + } + } + body = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.MemberAccessExpressionNode { identifier { value = "input" } } + operator = operators.PLUS + rhs = new ExpressionNode.LiteralValueExpressionNode { value = 5 } + } + } + } + }.output.text + } + ["methods with long values are rendered indented"] { + new ModuleNode { + methods { + new { + docComment { value = "Adds five to its argument." } + modifiers { "const"; "local" } + name { value = "myReallyLongMethodForAddingFiveToAnInteger" } + parameters { + new { + name { value = "inputIntegerValue" } + typeAnnotation { + type = new TypeNode.DeclaredTypeNode { name { parts { new { value = "Int" } } } } + } + } + } + body = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.MemberAccessExpressionNode { identifier { value = "inputIntegerValue" } } + operator = operators.PLUS + rhs = new ExpressionNode.LiteralValueExpressionNode { value = 5 } + } + } + } + }.output.text + } + ["methods with long values but short first lines are rendered inline"] { + new ModuleNode { + methods { + new { + docComment { value = "Adds five to its argument and returns a dynamic object containing the result." } + modifiers { "const"; "local" } + name { value = "myLongMethodForReturningADynamicObject" } + parameters { + new { + name { value = "inputIntegerValue" } + typeAnnotation { + type = new TypeNode.DeclaredTypeNode { name { parts { new { value = "Int" } } } } + } + } + } + body = new ExpressionNode.ObjectExpressionNode { + type = new TypeNode.DeclaredTypeNode { name { parts { new { value = "Dynamic" } } } } + body { + members { + new ObjectBodyNode.PropertyMemberNode { + propertyName { value = "value" } + assignment = new ExpressionNode.BinaryOperatorExpressionNode { + lhs = new ExpressionNode.MemberAccessExpressionNode { identifier { value = "inputIntegerValue" } } + operator = operators.PLUS + rhs = new ExpressionNode.LiteralValueExpressionNode { value = 5 } + } + } + } + } + } + } + } + }.output.text + } +} diff --git a/packages/pkl.experimental.syntax/tests/ModuleNode.pkl-expected.pcf b/packages/pkl.experimental.syntax/tests/ModuleNode.pkl-expected.pcf new file mode 100644 index 0000000..16810f6 --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/ModuleNode.pkl-expected.pcf @@ -0,0 +1,37 @@ +examples { + ["full declaration"] { + """ + #!/usr/bin/env pkl eval + /// This is a module doc comment! + @test { annotation = true } + open module pkl.experimental.syntax.tests.ModuleNode + + amends "pkl:test" + + """ + } + ["short methods are rendered inline"] { + """ + /// Adds five to its argument. + const local function addFive(input: Int) = input + 5 + + """ + } + ["methods with long values are rendered indented"] { + """ + /// Adds five to its argument. + const local function myReallyLongMethodForAddingFiveToAnInteger(inputIntegerValue: Int) = + inputIntegerValue + 5 + + """ + } + ["methods with long values but short first lines are rendered inline"] { + """ + /// Adds five to its argument and returns a dynamic object containing the result. + const local function myLongMethodForReturningADynamicObject(inputIntegerValue: Int) = new Dynamic { + value = inputIntegerValue + 5 + } + + """ + } +} diff --git a/packages/pkl.experimental.syntax/tests/ObjectBodyNode.pkl b/packages/pkl.experimental.syntax/tests/ObjectBodyNode.pkl new file mode 100644 index 0000000..b978b65 --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/ObjectBodyNode.pkl @@ -0,0 +1,162 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.tests.ObjectBodyNode + +amends "pkl:test" + +import "../ObjectBodyNode.pkl" +import "../ExpressionNode.pkl" +import "../TypeNode.pkl" + +local personDotName = new ExpressionNode.QualifiedMemberAccessExpressionNode { + lhs = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "person" } + } + rhs { + identifier { value = "name" } + } +} + +examples { + ["for generator"] { + // only value w/o type + new ObjectBodyNode { + members { + new ObjectBodyNode.ForGeneratorNode { + valueParameter { + name { value = "person" } + } + collection = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "people" } + } + body { + members { + new ObjectBodyNode.ElementMemberNode { + value = personDotName + } + } + } + } + } + }.render("") + // only value w/ type + new ObjectBodyNode { + members { + new ObjectBodyNode.ForGeneratorNode { + valueParameter { + name { value = "person" } + typeAnnotation { + type = new TypeNode.DeclaredTypeNode { + name { + parts { new { value = "Person" } } + } + } + } + } + collection = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "people" } + } + body { + members { + new ObjectBodyNode.ElementMemberNode { + value = personDotName + } + } + } + } + } + }.render("") + // key and value w/o types + new ObjectBodyNode { + members { + new ObjectBodyNode.ForGeneratorNode { + keyParameter { + name { value = "personIndex" } + } + valueParameter { + name { value = "person" } + } + collection = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "people" } + } + body { + members { + new ObjectBodyNode.ElementMemberNode { + value = personDotName + } + } + } + } + } + }.render("") + // key and value w/ types + new ObjectBodyNode { + members { + new ObjectBodyNode.ForGeneratorNode { + keyParameter { + name { value = "personIndex" } + typeAnnotation { + type = new TypeNode.DeclaredTypeNode { + name { + parts { new { value = "Int" } } + } + } + } + } + valueParameter { + name { value = "person" } + typeAnnotation { + type = new TypeNode.DeclaredTypeNode { + name { + parts { new { value = "Person" } } + } + } + } + } + collection = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "people" } + } + body { + members { + new ObjectBodyNode.ElementMemberNode { + value = personDotName + } + } + } + } + } + }.render("") + } + ["when generator"] { + new ObjectBodyNode { + members { + new ObjectBodyNode.WhenGeneratorNode { + condition = new ExpressionNode.MemberAccessExpressionNode { + identifier { value = "test" } + } + body { + members { + new ObjectBodyNode.PropertyMemberNode { + propertyName { value = "conditionalProperty" } + assignment = new ExpressionNode.LiteralValueExpressionNode { value = true } + } + } + } + } + } + }.render("") + } +} diff --git a/packages/pkl.experimental.syntax/tests/ObjectBodyNode.pkl-expected.pcf b/packages/pkl.experimental.syntax/tests/ObjectBodyNode.pkl-expected.pcf new file mode 100644 index 0000000..5f75915 --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/ObjectBodyNode.pkl-expected.pcf @@ -0,0 +1,41 @@ +examples { + ["for generator"] { + """ + { + for (person in people) { + person.name + } + } + """ + """ + { + for (person: Person in people) { + person.name + } + } + """ + """ + { + for (personIndex, person in people) { + person.name + } + } + """ + """ + { + for (personIndex: Int, person: Person in people) { + person.name + } + } + """ + } + ["when generator"] { + """ + { + when (test) { + conditionalProperty = true + } + } + """ + } +} diff --git a/packages/pkl.experimental.syntax/tests/TypeNode.pkl b/packages/pkl.experimental.syntax/tests/TypeNode.pkl new file mode 100644 index 0000000..0acc38f --- /dev/null +++ b/packages/pkl.experimental.syntax/tests/TypeNode.pkl @@ -0,0 +1,110 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.syntax.tests.TypeNode + +amends "pkl:test" + +import "../TypeNode.pkl" +import "../ExpressionNode.pkl" + +facts { + ["declared type"] { + new TypeNode.DeclaredTypeNode { + name { + parts { + new { value = "String" } + } + } + }.render("") == "String" + new TypeNode.DeclaredTypeNode { + name { + parts { + new { value = "myModule" } + new { value = "Bar" } + } + } + }.render("") == "myModule.Bar" + } + ["declared type - type arguments"] { + new TypeNode.DeclaredTypeNode { + name { + parts { + new { value = "Listing" } + } + } + typeArguments { + new TypeNode.DeclaredTypeNode { + name { + parts { + new { value = "Bar" } + } + } + } + new TypeNode.DeclaredTypeNode { + name { + parts { + new { value = "Baz" } + } + } + } + } + }.render("") == "Listing" + } + ["constrained"] { + new TypeNode.ConstrainedTypeNode { + typeNode = new TypeNode.DeclaredTypeNode { + name { + parts { + new { value = "Bar" } + } + } + } + constraints { + new ExpressionNode.MemberAccessExpressionNode { + identifier { + value = "isLessThan" + } + arguments { + new ExpressionNode.LiteralValueExpressionNode { + value = 5 + } + } + } + } + }.render("") == "Bar(isLessThan(5))" + } + ["string literal"] { + new TypeNode.StringLiteralTypeNode { + value = "my string type" + }.render("") == """ + "my string type" + """ + } + ["union"] { + new TypeNode.UnionTypeNode { + members { + new TypeNode.StringLiteralTypeNode { + value = "one" + } + new TypeNode.StringLiteralTypeNode { + value = "two" + } + } + }.render("") == """ + "one"|"two" + """ + } +} diff --git a/packages/pkl.experimental.uri/PklProject b/packages/pkl.experimental.uri/PklProject new file mode 100644 index 0000000..f6a42de --- /dev/null +++ b/packages/pkl.experimental.uri/PklProject @@ -0,0 +1,20 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +amends "../basePklProject.pkl" + +package { + version = "1.0.0" +} diff --git a/packages/pkl.experimental.uri/PklProject.deps.json b/packages/pkl.experimental.uri/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/pkl.experimental.uri/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/pkl.experimental.uri/URI.pkl b/packages/pkl.experimental.uri/URI.pkl new file mode 100644 index 0000000..8eb40d0 --- /dev/null +++ b/packages/pkl.experimental.uri/URI.pkl @@ -0,0 +1,385 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// A Uniform Resource Identifier as defined by +/// [RFC-3986](https://datatracker.ietf.org/doc/html/rfc3986). +/// +/// [URI]s may be constructed literally, or parsed from a string via [parse]. +@ModuleInfo { minPklVersion = "0.25.0" } +module pkl.experimental.uri.URI + +import "URI.pkl" + +/// The scheme component. +scheme: String? + +/// The user information portion of the authority component. +userInfo: String? + +/// The host portion of the authority component. +host: String? + +/// The port portion of the authority component. +port: UInt16? + +/// The path component, URI-encoded. +/// +/// Access the decoded form of the path via [pathSegments]. +path: UriString = "" + +/// The authority component of the URI. +function authority(): String? = + if (hasAuthority()) + List( + "//", + if (userInfo != null) "\(encode(userInfo))@" else "", + encode(host!!), + if (port != null) ":\(port)" else "" + ) + .join("") + else null + +/// The URI path split into its segments. +/// +/// Each segment is decoded. +fixed pathSegments: List = + let (parts = path.split("/").map((it) -> percentDecode(it))) + if (hasAbsolutePath) parts.drop(1) + else parts + +/// The query component, URI-encoded. +query: UriString? + +/// The fragment component. +fragment: String? + +/// The base path portion of this URI. +/// +/// This equivalent to the current URI without its query and fragment components. +hidden basePath: URI = (module) { + query = null + fragment = null +} + +/// Tells if this URI is an [absolute URI](https://datatracker.ietf.org/doc/html/rfc3986#section-4.3). +hidden isAbsolute: Boolean = scheme != null + +/// Tells if the [path] is absolute. +hidden hasAbsolutePath: Boolean = path?.startsWith("/") ?? false + +/// Tells if this URI has an [authority](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). +function hasAuthority(): Boolean = (userInfo ?? host ?? port) != null + +// +local function _removeDotSegments(input: List, result: List): List = + if (input.isEmpty) result + else + let (currentSegment = input.first) + if (currentSegment == ".") + _removeDotSegments(input.drop(1), result) + else if (currentSegment == "..") + _removeDotSegments(input.drop(1), result.dropLast(1)) + else + _removeDotSegments(input.drop(1), result.add(currentSegment)) + +local function removeDotSegments(input: String): String = + _removeDotSegments(input.split("/"), List()).join("/") + + if (input.endsWith("/")) "/" else "" + +// +local function mergePaths(base: URI, other: URI): String = + if (other.hasAbsolutePath) + other.path + else if (base.hasAuthority() && base.path == "") + "/" + other.path + else + let (basePath = if (base.path.contains("/")) base.path.substring(0, base.path.lastIndexOf("/") + 1) else "/") + removeDotSegments(basePath + other.path) + +/// Resolves [other] as a URI reference to this URI. +/// +/// Follows the rules described in +/// [RFC-3986 Section 5.2](https://www.rfc-editor.org/rfc/rfc3986#section-5.2). +function resolveUri(other: URI): URI = + let (self = this) + new { + when (other.scheme != null) { + scheme = other.scheme + userInfo = other.userInfo + host = other.host + port = other.port + path = other.path + query = other.query + } else { + scheme = self.scheme + when (other.hasAuthority()) { + userInfo = other.userInfo + host = other.host + port = other.port + path = removeDotSegments(other.path) + } else { + userInfo = self.userInfo + host = self.host + port = self.port + when (other.path == "") { + path = self.path + query = other.query ?? self.query + } else { + path = mergePaths(self, other) + query = other.query + } + } + } + fragment = other.fragment + } + +/// Resolves [other] as a URI string to this URI. +function resolve(other: String): URI? = + let (parsed = parse(other)) + if (parsed == null) null + else resolveUri(parsed) + +function toString(): String = new Listing { + when (scheme != null) { "\(encode(scheme)):" } + when (hasAuthority()) { + authority() + } + path + when (query != null) { "?\(query)" } + when (fragment != null) { "#\(encode(fragment))" } +}.join("") + +/// Tells if [str] is a valid URI string. +local isUriString = (str: String) -> + str + .replaceAll(PERCENT_REGEX, "") + .codePoints + .every((it) -> isUriSafe(it)) + +/// A string that has valid URI characters. +typealias UriString = String(isUriString) + +// alphanumeric or !#$&'()*+,-./:;=?@_~ +local function isUriSafe(codePoint: Int) = + isAlphaNumeric(codePoint) + // ! + || codePoint == 33 + // #$ + || codePoint.isBetween(35, 36) + // &'()*+,-./ + || codePoint.isBetween(38, 47) + // :; + || codePoint.isBetween(58, 59) + // = + || codePoint == 61 + // ?@ + || codePoint.isBetween(63, 64) + // _ + || codePoint == 95 + // ~ + || codePoint == 126 + +// alphanumeric or !'()*-._~ +local function isUriComponentSafe(codePoint: Int) = + isAlphaNumeric(codePoint) + // ! + || codePoint == 33 + // '()* + || codePoint.isBetween(39, 42) + // -. + || codePoint.isBetween(45, 46) + // _ + || codePoint == 95 + // ~ + || codePoint == 126 + +local function getUtf8Bytes(codePoint: Int): List = + if (codePoint <= 0x7f) + List(codePoint) + else if (codePoint <= 0x7ff) + List( + 0xc0.or(codePoint.shr(6)), + 0x80.or(codePoint.and(0x3f)) + ) + else if (codePoint <= 0xffff) + List( + 0xe0.or(codePoint.shr(12)), + 0x80.or(codePoint.shr(6).and(0x3f)), + 0x80.or(codePoint.and(0x3f)) + ) + else + List( + 0xf0.or(codePoint.shr(18)), + 0x80.or(codePoint.shr(12).and(0x3f)), + 0x80.or(codePoint.shr(6).and(0x3f)), + 0x80.or(codePoint.and(0x3f)) + ) + +/// Encode into percent encoding into utf-8 bytes. +/// +/// Facts: +/// ``` +/// percentEncode(" ".codePoints.first) == "%20" +/// percentEncode("/".codePoints.first) == "%2F" +/// ``` +local function percentEncode(codePoint: Int) = + getUtf8Bytes(codePoint) + .map((it) -> "%" + it.toRadixString(16).toUpperCase()) + .join("") + +local hexDigits = "0123456789ABCDEF" + +/// Facts: +/// ``` +/// getBytes("%20") == List(32) +/// getBytes("%7F") == List(127) +/// getBytes("%20%7F") == List(32, 127) +/// ``` +local function getBytes(str: String): List = + str + .split("%") + .drop(1) + .map((it) -> + let (msb = hexDigits.indexOf(it[0].toUpperCase())) + let (lsb = hexDigits.indexOf(it[1].toUpperCase())) + lsb + (msb * 16) + ) + +local function isAlphaNumeric(codePoint: Int) = + codePoint.isBetween(48, 57) // 0-9 + || codePoint.isBetween(65, 90) // a-z + || codePoint.isBetween(97, 122) // A-Z + +/// Encodes [str] using percent-encoding bytes to make it safe for the literal use as a URI. +/// +/// All characters except for alphanumeric chracters, and the chracters `!#$&'()*+,-./:;=?@_~` +/// are percent-encoded. +/// +/// Follows the rules for the `encodeURI` function as described by +/// [ECMA-262](https://262.ecma-international.org/5.1/#sec-15.1.3.3). +/// +/// Facts: +/// ``` +/// encode("https://example.com/some path/") == "https://example.com/some%20path" +/// ``` +/// +function encode(str: String): String = + str.codePoints + .map((codePoint) -> + if (isUriSafe(codePoint)) codePoint.toChar() + else percentEncode(codePoint) + ) + .join("") + +/// Encodes [str] using percent-encoding to make it safe to literal use as a URI component. +/// +/// All characters except for alphanumeric characters, and the characters `-_.!~*'()` are +/// percent-encoded. +/// +/// Follows the rules for the `encodeURIComponent` function as described by +/// [ECMA-262](https://262.ecma-international.org/5.1/#sec-15.1.3.4). +/// +/// Facts: +/// ``` +/// encodeComponent("https://example.com/some path") == "https%3A%2F%2example.com%2Fsome%20path" +/// ``` +function encodeComponent(str: String): String = + str.codePoints + .map((codePoint) -> + if (isUriComponentSafe(codePoint)) codePoint.toChar() + else percentEncode(codePoint) + ) + .join("") + +local PERCENT_REGEX = Regex(#"(?:%[\da-fA-F]{2})+"#) + +/// Decodes [str] given a percent-encoded string. +function percentDecode(str: String): String = + str.replaceAllMapped(PERCENT_REGEX, (match) -> + let (bytes = getBytes(match.value)) + doPercentDecode(bytes) + ) + +local function doPercentDecode(bytes: List): String = _doPercentDecode(bytes, "") + +local function _doPercentDecode(bytes: List, ret: String) = + if (bytes.length == 0) ret + else if (bytes[0] < 0x80) + _doPercentDecode(bytes.drop(1), ret + bytes[0].toChar()) + else if (bytes[0] < 0xE0) + let (b0 = bytes[0].and(0x1f).shl(6)) + let (b1 = bytes[1].and(0x3f)) + _doPercentDecode(bytes.drop(2), ret + b0.or(b1).toChar()) + else if (bytes[0] < 0xF0) + let (b0 = bytes[0].and(0xf).shl(12)) + let (b1 = bytes[1].and(0x3f).shl(6)) + let (b2 = bytes[2].and(0x3f)) + _doPercentDecode(bytes.drop(3), ret + b0.or(b1).or(b2).toChar()) + else + let (b0 = bytes[0].and(0x7).shl(18)) + let (b1 = bytes[1].and(0x3f).shl(12)) + let (b2 = bytes[2].and(0x3f).shl(6)) + let (b3 = bytes[3].and(0x3f)) + _doPercentDecode(bytes.drop(4), ret + b0.or(b1).or(b2).or(b3).toChar()) + +/// Regex to parse URI's. +/// +/// From . +// language=RegExp +local URI_REGEX: Regex = Regex(#""" + ^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))? + """#) + +// language=RegExp +local AUTHORITY_REGEX: Regex = Regex(#""" + (?:([^@]+)@)?([^:]*)(?::(\d+))? + """#) + +// noinspection TypeMismatch +function parseAuthority(authority: String): Dynamic = + let (matches = AUTHORITY_REGEX.findMatchesIn(authority)) + let (groups = matches[0].groups) + new { + userInfo = groups.getOrNull(1)?.value?.ifNonNull((it) -> percentDecode(it)) + host = groups.getOrNull(2)?.value?.ifNonNull((it) -> percentDecode(it)) + port = groups.getOrNull(3)?.value?.toInt() + } + +/// Parses the input string as a [URI]. +/// +/// If the input is not valid, returns `null`. +function parse(str: String): URI? = + let (matches = URI_REGEX.findMatchesIn(str)) + if (matches.isEmpty) null + else + let (groups = matches[0].groups) + let (schemePart = groups.getOrNull(2)?.value) + let (authorityPart = groups.getOrNull(4)?.value) + let (pathPart = groups[5].value) + let (queryPart = groups.getOrNull(7)?.value) + let (fragmentPart = groups.getOrNull(9)?.value) + new URI { + when (schemePart != null) { + scheme = percentDecode(schemePart) + } + when (authorityPart != null) { + ...parseAuthority(authorityPart) + } + path = pathPart + query = queryPart + when (fragmentPart != null) { + fragment = percentDecode(fragmentPart) + } + } diff --git a/packages/pkl.experimental.uri/tests/URI.pkl b/packages/pkl.experimental.uri/tests/URI.pkl new file mode 100644 index 0000000..2548ef1 --- /dev/null +++ b/packages/pkl.experimental.uri/tests/URI.pkl @@ -0,0 +1,196 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.experimental.uri.tests.URI + +amends "pkl:test" + +import "../URI.pkl" + +local alphaLower = "abcdefghijklmnopqrstuvwxyz" +local alphaUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +local nums = "0123456789" + +facts { + ["encode"] { + URI.encode("https://example.com/some path") == "https://example.com/some%20path" + URI.encode(alphaLower) == alphaLower + URI.encode(alphaUpper) == alphaUpper + URI.encode(nums) == nums + + local safeChars = "!#$&'()*+,-./:;=?@_~" + URI.encode(safeChars) == safeChars + URI.encode("\u{ffff}") == "%EF%BF%BF" + URI.encode("🏀") == "%F0%9F%8F%80" + } + ["encodeComponent"] { + URI.encodeComponent("https://example.com/some path") == "https%3A%2F%2Fexample.com%2Fsome%20path" + URI.encode(alphaLower) == alphaLower + URI.encode(alphaUpper) == alphaUpper + URI.encode(nums) == nums + local safeChars = "-_.!~*'()" + URI.encode(safeChars) == safeChars + } + ["percentDecode"] { + URI.percentDecode("foo%20bar") == "foo bar" + URI.percentDecode("foo%20bar") == "foo bar" + URI.percentDecode("foo%20%20bar") == "foo bar" + URI.percentDecode("%F0%9F%8F%80") == "🏀" + URI.percentDecode("%C2%A3") == "£" + URI.percentDecode("%EF%BF%BF") == "\u{ffff}" + URI.percentDecode("%2Fbar%2Fbaz") == "/bar/baz" + } + ["parse"] { + local uri1 = URI.parse("https://www.example.com") + uri1 == new URI { + scheme = "https" + host = "www.example.com" + path = "" + } + uri1.pathSegments == List("") + + local uri2 = URI.parse("https://bilbo@www.example.com:1234/my/path?search=mysearch#fragment-value") + uri2 == new URI { + scheme = "https" + userInfo = "bilbo" + host = "www.example.com" + port = 1234 + path = "/my/path" + query = "search=mysearch" + fragment = "fragment-value" + } + uri2.pathSegments == List("my", "path") + + local uri3 = URI.parse("pkl:base?mybase") + uri3 == new URI { + scheme = "pkl" + path = "base" + query = "mybase" + } + uri3.pathSegments == List("base") + + local uri4 = URI.parse("pkl:base?mybase#fragment") + uri4 == new URI { + scheme = "pkl" + path = "base" + query = "mybase" + fragment = "fragment" + } + uri4.pathSegments == List("base") + + local uri5 = URI.parse("https://www.example.com/my%20path") + uri5 == new URI { + scheme = "https" + host = "www.example.com" + path = "/my%20path" + } + uri5.pathSegments == List("my path") + + local uri6 = URI.parse("https://www.example.com/search?q=%F0%9F%8F%80") + uri6 == new URI { + scheme = "https" + host = "www.example.com" + path = "/search" + query = "q=%F0%9F%8F%80" + } + uri6.pathSegments == List("search") + + local uri7 = URI.parse("myscheme:%F0%9F%8F%80#myfragment") + uri7 == new URI { + scheme = "myscheme" + path = "%F0%9F%8F%80" + fragment = "myfragment" + } + uri7.pathSegments == List("🏀") + + local uri8 = URI.parse("file:/my%20file.txt") + uri8 == new URI { + scheme = "file" + path = "/my%20file.txt" + } + uri8.pathSegments == List("my file.txt") + + local uri9 = URI.parse("file:///my%20file.txt") + uri9 == new URI { + scheme = "file" + path = "/my%20file.txt" + host = "" + } + uri9.pathSegments == List("my file.txt") + + local uri10 = URI.parse("schemey:///first%2Fsegment/second") + uri10 == new URI { + scheme = "schemey" + path = "/first%2Fsegment/second" + host = "" + } + uri10.pathSegments == List("first/segment", "second") + + local uri11 = URI.parse("../bar/baz") + uri11 == new URI { + path = "../bar/baz" + } + uri11.pathSegments == List("..", "bar", "baz") + + local uri12 = URI.parse("//www.example.com/foo") + uri12 == new URI { + host = "www.example.com" + path = "/foo" + } + uri12.pathSegments == List("foo") + + local uri13 = URI.parse("#") + uri13 == new URI { + path = "" + fragment = "" + } + uri13.pathSegments == List("") + } + ["toString()"] { + URI.parse("https://www.example.com").toString() == "https://www.example.com" + URI.parse("https://bilbo@www.example.com:1234/my/path?search=mysearch#fragment-value").toString() == "https://bilbo@www.example.com:1234/my/path?search=mysearch#fragment-value" + URI.parse("pkl:base?mybase").toString() == "pkl:base?mybase" + URI.parse("pkl:base?mybase#fragment").toString() == "pkl:base?mybase#fragment" + URI.parse("https://www.example.com/my%20path").toString() == "https://www.example.com/my%20path" + URI.parse("https://www.example.com/search?q=%F0%9F%8F%80").toString() == "https://www.example.com/search?q=%F0%9F%8F%80" + URI.parse("pkl:%F0%9F%8F%80#myfragment").toString() == "pkl:%F0%9F%8F%80#myfragment" + URI.parse("file:/my%20file.txt").toString() == "file:/my%20file.txt" + URI.parse("file:///my%20file.txt").toString() == "file:///my%20file.txt" + } + ["resolve"] { + URI.parse("schemey:/first/second/").resolve("../third").toString() + == "schemey:/first/third" + URI.parse("schemey:/first/second/").resolve("../../third").toString() + == "schemey:/third" + URI.parse("schemey:/first/second/").resolve("./").toString() + == "schemey:/first/second/" + URI.parse("schemey:/first/second").resolve("./").toString() + == "schemey:/first/" + URI.parse("schemey:/first/second/").resolve("/some/other/path").toString() + == "schemey:/some/other/path" + URI.parse("schemey:/first/second/").resolve("otherscheme:/some/other/path").toString() + == "otherscheme:/some/other/path" + URI.parse("schemey:/first/leaf").resolve("./").toString() + == "schemey:/first/" + URI.parse("schemey:/first/leaf").resolve("./second").toString() + == "schemey:/first/second" + URI.parse("https://www.example.com").resolve("some/relative/path").toString() + == "https://www.example.com/some/relative/path" + URI.parse("https://www.example.com").resolve("some/relative/path").toString() + == "https://www.example.com/some/relative/path" + URI.parse("https://www.example.com").resolve("//foo.com/some/relative/path").toString() + == "https://foo.com/some/relative/path" + } +} diff --git a/packages/pkl.table/PklProject b/packages/pkl.table/PklProject new file mode 100644 index 0000000..2f3ed82 --- /dev/null +++ b/packages/pkl.table/PklProject @@ -0,0 +1,22 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// [TableRenderer] generates pretty human-readable and markdown-compatible tables! +amends "../basePklProject.pkl" + +package { + version = "1.0.0" + description = "Generates pretty human-readable and markdown-compatible tables." +} diff --git a/packages/pkl.table/PklProject.deps.json b/packages/pkl.table/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/pkl.table/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/pkl.table/table.pkl b/packages/pkl.table/table.pkl new file mode 100644 index 0000000..d301c80 --- /dev/null +++ b/packages/pkl.table/table.pkl @@ -0,0 +1,251 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// [TableRenderer] generates pretty tables! +@ModuleInfo { minPklVersion = "0.25.0" } +module pkl.table.table + +import "pkl:math" + +typealias ColumnKey = String +typealias ListLike = List|Listing|Dynamic(toMap().isEmpty) +const local listLikeDescription = "`List`s, `Listing`s, or `Dynamic`s with only elements" + +typealias HorizontalPosition = "left"|"inner"|"right" +typealias VerticalPosition = "top"|"inner"|"bottom" +typealias Alignment = "left"|"right" + +const function Column(_key: String): Column = new { key = _key } +class Column { + /// The key used to extract property values from rows + key: ColumnKey + + /// The column header title + title: String = key + + /// The direction to align and pad column contents within a cell + align: Alignment = "left" +} + +/// Style options used to draw the table +/// Default values produce a table consisting of simple ASCII characters +class TableStyle { + /// Placeholder to use in place of null property values + nullPlaceholder: String = "null" + + /// Include a header in the table + includeHeader: Boolean = true + + /// Default character to use for horizontal rules + defaultHorizontal: Char = "-" + + /// Characters to use for specific horizontal rules + horizontals: Mapping = new { + ["top"] = defaultHorizontal + ["inner"] = defaultHorizontal + ["bottom"] = defaultHorizontal + } + + /// Default character to use for vertical rules + defaultVertical: Char = "|" + + /// Characters to use for specific horizontal rules + verticals: Mapping = new { + ["left"] = defaultVertical + ["inner"] = defaultVertical + ["right"] = defaultVertical + } + + /// Default character to use for corners + defaultCorner: Char = "+" + + /// Characters to use for specific corners + corners: Mapping> = new { + ["left"] { + ["top"] = defaultCorner + ["inner"] = defaultCorner + ["bottom"] = defaultCorner + } + ["inner"] { + ["top"] = defaultCorner + ["inner"] = defaultCorner + ["bottom"] = defaultCorner + } + ["right"] { + ["top"] = defaultCorner + ["inner"] = defaultCorner + ["bottom"] = defaultCorner + } + } +} + +local class InterimTable { + + style: TableStyle + converters: Mapping Any> + + columns: Listing + rows: List + + columnWidths: Mapping = new { + for (column in columns) { + [column.key] = math.max( + column.title.length, + renderedCells.fold(0, (acc, it) -> + math.max(acc, it.getOrNull(column.key)?.length ?? 0) + ) + ) as UInt + } + } + + renderedCells: Listing> = new { + for (row in rows) { + new { + for (column in columns) { + [column.key] = renderCell(column.key, row.getPropertyOrNull(column.key)) + } + } + } + } + + rendered = new Listing { + when (style.horizontals["top"] != null) { + renderHorizontalRule("top") + } + when (style.includeHeader) { + renderRow(columns.toList().toMap((col) -> col.key, (col) -> col.title)) + when (style.horizontals["inner"] != null) { + renderHorizontalRule("inner") + } + } + for (row in renderedCells) { + renderRow(row.toMap()) + } + when (style.horizontals["bottom"] != null) { + renderHorizontalRule("bottom") + } + }.join("\n") + "\n" + + function renderCell(col: String, value: Any): String = + (converters.getOrNull(col)?.apply(value)?.toString()) ?? + (converters.getOrNull(value.getClass())?.apply(value)?.toString()) ?? + value?.toString() ?? + style.nullPlaceholder + + function renderRow(renderedCells: Map) = new Listing { + style.verticals["left"] ?? "" + for (i, col in columns) { + " " + if (col.align == "left") + renderedCells[col.key].padEnd(columnWidths[col.key], " ") + else if (col.align == "right") + renderedCells[col.key].padStart(columnWidths[col.key], " ") + else "" + " " + style.verticals[if (i == columns.length - 1) "right" else "inner"] ?? "" + } + }.join("") + + function renderHorizontalRule(verticalPosition: VerticalPosition): String = new Listing { + style.corners["left"][verticalPosition] + for (i, col in columns) { + style.horizontals[verticalPosition].repeat(columnWidths[col.key] + 2) + style.corners[if (i == columns.length - 1) "right" else "inner"][verticalPosition] + } + }.join("") +} + +/// [pkl.ValueRenderer] subclass used to render pretty-printed tables +class TableRenderer extends ValueRenderer { + /// Style to use for table rendering + style: TableStyle + + /// Listing of properties that should be rendered as table columns + columns: Listing(!isEmpty || throw("TableRenderer must be configured to render at least one column")) + + local self = this + + /// Renders [value] as a pretty-printed table. + function renderDocument(value: Any): String = new InterimTable { + columns { + for (col in self.columns) { + if (col is Column) col else Column(col) + } + } + rows = + if (value is ListLike?) value.toList() + else throw("Only \(listLikeDescription) values can be rendered as a table. Instead, found a \(value.getClass()).\n\nValue:\n\(value)") + style = self.style + converters = self.converters + }.rendered + + /// Unsupported. [renderCell] is used instead as the column name must be known to look up the relevant converter + function renderValue(_): String = throw("TableRenderer can only render documents, not values") +} + +/// [TableStyle] using unicode box drawing characters (light variants) +const boxDrawingLightStyle: TableStyle = new { + defaultHorizontal = "─" + defaultVertical = "│" + corners { + ["left"] { + ["top"] = "┌" + ["inner"] = "├" + ["bottom"] = "└" + } + ["inner"] { + ["top"] = "┬" + ["inner"] = "┼" + ["bottom"] = "┴" + } + ["right"] { + ["top"] = "┐" + ["inner"] = "┤" + ["bottom"] = "┘" + } + } +} + +/// [TableStyle] using unicode box drawing characters (heavy variants) +const boxDrawingHeavyStyle: TableStyle = new { + defaultHorizontal = "━" + defaultVertical = "┃" + corners { + ["left"] { + ["top"] = "┏" + ["inner"] = "┣" + ["bottom"] = "┗" + } + ["inner"] { + ["top"] = "┳" + ["inner"] = "╋" + ["bottom"] = "┻" + } + ["right"] { + ["top"] = "┓" + ["inner"] = "┫" + ["bottom"] = "┛" + } + } +} + +/// [TableStyle] that renders Markdown-compatible tables +const markdownStyle: TableStyle = new { + defaultCorner = "|" + horizontals { + ["top"] = null + ["bottom"] = null + } +} diff --git a/packages/pkl.table/tests/table.pkl b/packages/pkl.table/tests/table.pkl new file mode 100644 index 0000000..5aa00d2 --- /dev/null +++ b/packages/pkl.table/tests/table.pkl @@ -0,0 +1,81 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.table.tests.table + +amends "pkl:test" + +import "../table.pkl" + +local data = new Listing { + new { alpha = "123"; beta = 123; gamma = 123.h; delta = List("1", "2", "3") } + new { alpha = "456"; beta = 456; gamma = 456.h; delta = List("4", "5", "6") } + new { alpha = "789loooooong"; beta = 789; gamma = 789.h; delta = List("7", "8", "9") } +} + +examples { + ["simple"] { + new table.TableRenderer { + columns { "alpha"; "beta"; "gamma" } + }.renderDocument(data) + } + ["fancy column"] { + new table.TableRenderer { + style { + nullPlaceholder = "" + } + columns { + (table.Column("alpha")) { + title = "a" + align = "right" + } + (table.Column("beta")) { + title = "b" + } + (table.Column("gamma")) { + title = "c" + } + "unknown" + } + }.renderDocument(data) + } + ["coverter"] { + new table.TableRenderer { + columns { table.Column("alpha"); table.Column("beta"); table.Column("gamma"); table.Column("delta") } + converters { + [Duration] = (it) -> it.toUnit("s") + ["delta"] = (it) -> it.join(",") + } + }.renderDocument(data) + } + ["boxDrawingLight"] { + new table.TableRenderer { + style = table.boxDrawingLightStyle + columns { table.Column("alpha"); table.Column("beta"); table.Column("gamma") } + }.renderDocument(data) + } + ["boxDrawingHeavy"] { + new table.TableRenderer { + style = table.boxDrawingHeavyStyle + columns { table.Column("alpha"); table.Column("beta"); table.Column("gamma") } + }.renderDocument(data) + } + ["markdown"] { + new table.TableRenderer { + style = table.markdownStyle + columns { table.Column("alpha"); table.Column("beta"); table.Column("gamma") } + }.renderDocument(data) + } +} diff --git a/packages/pkl.table/tests/table.pkl-expected.pcf b/packages/pkl.table/tests/table.pkl-expected.pcf new file mode 100644 index 0000000..32ed18e --- /dev/null +++ b/packages/pkl.table/tests/table.pkl-expected.pcf @@ -0,0 +1,72 @@ +examples { + ["simple"] { + """ + +--------------+------+-------+ + | alpha | beta | gamma | + +--------------+------+-------+ + | 123 | 123 | 123.h | + | 456 | 456 | 456.h | + | 789loooooong | 789 | 789.h | + +--------------+------+-------+ + + """ + } + ["fancy column"] { + """ + +--------------+-----+-------+---------+ + | a | b | c | unknown | + +--------------+-----+-------+---------+ + | 123 | 123 | 123.h | | + | 456 | 456 | 456.h | | + | 789loooooong | 789 | 789.h | | + +--------------+-----+-------+---------+ + + """ + } + ["coverter"] { + """ + +--------------+------+-----------+-------+ + | alpha | beta | gamma | delta | + +--------------+------+-----------+-------+ + | 123 | 123 | 442800.s | 1,2,3 | + | 456 | 456 | 1641600.s | 4,5,6 | + | 789loooooong | 789 | 2840400.s | 7,8,9 | + +--------------+------+-----------+-------+ + + """ + } + ["boxDrawingLight"] { + """ + ┌──────────────┬──────┬───────┐ + │ alpha │ beta │ gamma │ + ├──────────────┼──────┼───────┤ + │ 123 │ 123 │ 123.h │ + │ 456 │ 456 │ 456.h │ + │ 789loooooong │ 789 │ 789.h │ + └──────────────┴──────┴───────┘ + + """ + } + ["boxDrawingHeavy"] { + """ + ┏━━━━━━━━━━━━━━┳━━━━━━┳━━━━━━━┓ + ┃ alpha ┃ beta ┃ gamma ┃ + ┣━━━━━━━━━━━━━━╋━━━━━━╋━━━━━━━┫ + ┃ 123 ┃ 123 ┃ 123.h ┃ + ┃ 456 ┃ 456 ┃ 456.h ┃ + ┃ 789loooooong ┃ 789 ┃ 789.h ┃ + ┗━━━━━━━━━━━━━━┻━━━━━━┻━━━━━━━┛ + + """ + } + ["markdown"] { + """ + | alpha | beta | gamma | + |--------------|------|-------| + | 123 | 123 | 123.h | + | 456 | 456 | 456.h | + | 789loooooong | 789 | 789.h | + + """ + } +} diff --git a/packages/pkl.toml/PklProject b/packages/pkl.toml/PklProject new file mode 100644 index 0000000..4604280 --- /dev/null +++ b/packages/pkl.toml/PklProject @@ -0,0 +1,21 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// A [TOML](https://toml.io/en/) [Renderer]. +amends "../basePklProject.pkl" + +package { + version = "1.0.0" +} diff --git a/packages/pkl.toml/PklProject.deps.json b/packages/pkl.toml/PklProject.deps.json new file mode 100644 index 0000000..836079a --- /dev/null +++ b/packages/pkl.toml/PklProject.deps.json @@ -0,0 +1,4 @@ +{ + "schemaVersion": 1, + "resolvedDependencies": {} +} \ No newline at end of file diff --git a/packages/pkl.toml/examples/basic.pkl b/packages/pkl.toml/examples/basic.pkl new file mode 100644 index 0000000..db87eae --- /dev/null +++ b/packages/pkl.toml/examples/basic.pkl @@ -0,0 +1,128 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.toml.examples.basic + +import "../toml.pkl" + +title = "TOML Example" + +owner { + name = "Tom Preston-Werner" + organization = "Github" + bio = """ + GitHub Cofounder & CEO + Likes tater tots and beer + """ + dob = new toml.DateTime { value = "1979-05-27T07:32:00Z" } +} + +database { + server = "192.168.1.1" + ports { + 8001 + 8001 + 8002 + } + connection_max = 5000 + enabled = true +} + +servers { + alpha { + ip = "10.0.0.1" + dc = "eqdc10" + } + beta { + ip = "10.0.0.2" + dc = "eqdc10" + country = "中国" + } +} + +clients { + data { + new { + "gamma" + "delta" + } + new { + 1 + 2 + } + } + hosts { + "alpha" + "omega" + } +} + + +products { + new { + name = "Hammer Bro" + sku = 738594937 + } + new {} + new { + name = "Nail" + sku = 284758393 + color = "gray" + `1-1` = "〇😀" + } +} + +fruits { + new { + name = "apple" + physical { + color = "red" + shape = "round" + } + varieties { + new { name = "red delicious" } + new { name = "granny smith" } + } + } + new { + name = "banana" + varieties { + new { name = "plantain" } + } + } +} + +contributors { + "Foo Bar " + new { + name = "Baz Qux" + email = "bazqux@example.com" + url = "https://example.com/bazqux" + } +} + +dog { + `tater.man` { + type { + name = "pug" + } + age = NaN + maxAge = Infinity + } +} + +output { + renderer = new toml.Renderer {} +} diff --git a/packages/pkl.toml/examples/converters.pkl b/packages/pkl.toml/examples/converters.pkl new file mode 100644 index 0000000..d824343 --- /dev/null +++ b/packages/pkl.toml/examples/converters.pkl @@ -0,0 +1,49 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.toml.examples.converters + +import "../toml.pkl" + +class Dog { + breed: String + sleepTime: Duration +} + +dogs { + new Dog { + breed = "Golden Retreiver" + sleepTime = 12.h + } + new Dog { + breed = "GERMAN SHEPHERD" + sleepTime = 10.h + } + new Dog { + breed = "greyhound" + sleepTime = 18.h + } +} + +output { + renderer = new toml.Renderer { + converters { + [Dog] = (dog: Dog) -> (dog) { + breed = dog.breed.toLowerCase() + } + [Duration] = (dur: Duration) -> "\(dur.value)\(dur.unit)" + } + } +} diff --git a/packages/pkl.toml/examples/dates.pkl b/packages/pkl.toml/examples/dates.pkl new file mode 100644 index 0000000..215360d --- /dev/null +++ b/packages/pkl.toml/examples/dates.pkl @@ -0,0 +1,39 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.toml.examples.dates + +import "../toml.pkl" + +// Offset Date-Time +odt1 = new toml.DateTime { value = "1979-05-27T07:32:00Z" } +odt2 = new toml.DateTime { value = "1979-05-27T00:32:00-07:00" } +odt3 = new toml.DateTime { value = "1979-05-27T00:32:00.999999-07:00" } +odt4 = new toml.DateTime { value = "1979-05-27 07:32:00Z" } + +// Local Date-Time +ldt1 = new toml.DateTime { value = "1979-05-27T07:32:00" } +ldt2 = new toml.DateTime { value = "1979-05-27T00:32:00.999999" } + +// Local Date +ld1 = new toml.Date { value = "1979-05-27" } + +// Local Time +lt1 = new toml.Time { value = "07:32:00" } +lt2 = new toml.Time { value = "00:32:00.999999" } + +output { + renderer = new toml.Renderer {} +} diff --git a/packages/pkl.toml/tests/toml.pkl b/packages/pkl.toml/tests/toml.pkl new file mode 100644 index 0000000..ea624f4 --- /dev/null +++ b/packages/pkl.toml/tests/toml.pkl @@ -0,0 +1,30 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +module pkl.toml.tests.toml + +amends "pkl:test" + +examples { + ["basic.pkl"] { + import("../examples/basic.pkl").output.text + } + ["converters.pkl"] { + import("../examples/converters.pkl").output.text + } + ["dates.pkl"] { + import("../examples/dates.pkl").output.text + } +} diff --git a/packages/pkl.toml/tests/toml.pkl-expected.pcf b/packages/pkl.toml/tests/toml.pkl-expected.pcf new file mode 100644 index 0000000..e1a4f6d --- /dev/null +++ b/packages/pkl.toml/tests/toml.pkl-expected.pcf @@ -0,0 +1,101 @@ +examples { + ["basic.pkl"] { + """ + title = "TOML Example" + contributors = [ "Foo Bar ", { name = "Baz Qux", email = "bazqux@example.com", url = "https://example.com/bazqux" } ] + + [owner] + name = "Tom Preston-Werner" + organization = "Github" + bio = ""\" + GitHub Cofounder & CEO + Likes tater tots and beer""\" + dob = 1979-05-27T07:32:00Z + + [database] + server = "192.168.1.1" + ports = [ 8001, 8001, 8002 ] + connection_max = 5000 + enabled = true + + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + country = "中国" + + [clients] + data = [ [ "gamma", "delta" ], [ 1, 2 ] ] + hosts = [ "alpha", "omega" ] + + [[products]] + name = "Hammer Bro" + sku = 738594937 + + [[products]] + + [[products]] + name = "Nail" + sku = 284758393 + color = "gray" + 1-1 = "〇😀" + + [[fruits]] + name = "apple" + + [fruits.physical] + color = "red" + shape = "round" + + [[fruits.varieties]] + name = "red delicious" + + [[fruits.varieties]] + name = "granny smith" + + [[fruits]] + name = "banana" + + [[fruits.varieties]] + name = "plantain" + + [dog."tater.man"] + age = nan + maxAge = inf + + [dog."tater.man".type] + name = "pug" + """ + } + ["converters.pkl"] { + """ + [[dogs]] + breed = "golden retreiver" + sleepTime = "12h" + + [[dogs]] + breed = "german shepherd" + sleepTime = "10h" + + [[dogs]] + breed = "greyhound" + sleepTime = "18h" + """ + } + ["dates.pkl"] { + """ + odt1 = 1979-05-27T07:32:00Z + odt2 = 1979-05-27T00:32:00-07:00 + odt3 = 1979-05-27T00:32:00.999999-07:00 + odt4 = 1979-05-27 07:32:00Z + ldt1 = 1979-05-27T07:32:00 + ldt2 = 1979-05-27T00:32:00.999999 + ld1 = 1979-05-27 + lt1 = 07:32:00 + lt2 = 00:32:00.999999 + """ + } +} diff --git a/packages/pkl.toml/toml.pkl b/packages/pkl.toml/toml.pkl new file mode 100644 index 0000000..b92ff3c --- /dev/null +++ b/packages/pkl.toml/toml.pkl @@ -0,0 +1,217 @@ +//===----------------------------------------------------------------------===// +// Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//===----------------------------------------------------------------------===// +/// A renderer for [TOML](https://toml.io) configuration files. +/// +/// Basic usage: +/// ``` +/// import "package://pkg.pkl-lang.org/pantry/pkl.toml@1.0.0" +/// +/// output { +/// renderer = new toml.Renderer {} +/// } +/// ``` +/// +/// To render TOML dates and times, use [Date], [Time], and [DateTime]. +@ModuleInfo { minPklVersion = "0.25.0" } +module pkl.toml.toml + +abstract class AbstractDateTime { + value: String +} + +/// A TOML [Local Date](https://toml.io/en/v1.0.0#local-date) value. +class Date extends AbstractDateTime { + value: String(matches(Regex(#"(\d{4})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])"#))) +} + +/// A TOML [Local Time](https://toml.io/en/v1.0.0#local-time) value. +class Time extends AbstractDateTime { + value: String(matches(Regex(#"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?"#))) +} + +/// A TOML [Offset Date-Time](https://toml.io/en/v1.0.0#offset-date-time) +/// or [Local Date-Time](https://toml.io/en/v1.0.0#local-date-time) value. +class DateTime extends AbstractDateTime { + value: String(matches(Regex(#"(\d{4})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])[T ]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(Z|[+-]([01][0-9]|2[0-3]):([0-5][0-9]))?"#))) +} + +/// Renders values as TOML. +class Renderer extends ValueRenderer { + /// Value converters to apply before values are rendered. + /// + /// For further information see [PcfRenderer.converters]. + /// For path converters, only "*" is supported. + converters: Mapping Any> + + function renderValue(value: Any) = + let (_value = getBasicValue(value, false)) + doRenderValue(_value, List()).trim() + + function renderDocument(value: Any) = + if (!isTableLike(value)) + throw(""" + Invalid input: TOML can only render object-types at the root level. Received: \(value) + """) + else + renderValue(value) + + local jsonRenderer = new JsonRenderer {} + + local function getConvertersForValue(value: Any): List<(Any) -> unknown> = new Listing { + when (convertersMap.containsKey(value.getClass())) { + convertersMap[value.getClass()] + } + when (convertersMap.containsKey("*")) { + convertersMap["*"] + } + }.toList() + + local function applyConverters(value: Any) = + let (converters = getConvertersForValue(value)) + converters.fold(value, (acc, converter) -> converter.apply(acc)) + + /// Traverses the object and casts it down to its basic type: Map, List, or the primitive value. Runs each + /// value through the converter if there is a match. + /// `skipConversion` is a helper flag to avoid infinite recursion in case the converter returns the same type. + local function getBasicValue(value: Any, skipConversion: Boolean) = + if (!skipConversion && !getConvertersForValue(value).isEmpty) + getBasicValue(applyConverters(value), true) + // If the value is Dynamic, and we have both elements and properties, it's ambiguous whether we should + // render as a table or an array. + else if (value is Dynamic && isTableLike(value) && isArrayLike(value)) + throw(""" + Cannot render object with both properties/entries and elements as TOML. Received: \(value) + """) + else if (isTableLike(value)) + getMap(value) + .mapValues((_, elem) -> getBasicValue(elem, false)) + else if (isArrayLike(value)) getList(value).map((elem) -> getBasicValue(elem, false)) + else value + + /// Underlying implementation for rendering values as toml + local function doRenderValue(value: Any, path: List): String = + if (isTableArray(value)) + renderTableArray(value, path) + else if (value is Map) + renderTable(value, path) + else + renderInlineValue(value) + + /// Determine whether an object is map-like. We'll consider any Dynamic that doesn't have any elements as map-like. + local function isTableLike(obj: Any) = !(obj is AbstractDateTime) && ((obj is Dynamic && obj.toList().isEmpty) || obj is MapLike) + + /// Determine whether an object is list-like. We'll consider any Dynamic that has elements as list-like. + local function isArrayLike(obj: Any) = (obj is Dynamic && !obj.toList().isEmpty) || obj is ListLike + + /// Convert an object to its Map representation. Toml doesn't include null so we should filter out null properties. + local function getMap(obj: MapLike|Dynamic) = (if (obj is Map) obj else obj.toMap()).filter((_, elem) -> elem != null) + + /// Convert an object to its List representation. + local function getList(obj: ListLike|Dynamic) = if (obj is List) obj else obj.toList() + + /// Determine if we should render this value as an array of tables or not. + /// A value is an array of tables if all of the inhabitants are table-like. + local function isTableArray(value: Any) = + value is List && value.every((elem) -> elem is Map) + + local function isTableTypeProp(value: Any) = value is Map || isTableArray(value) + + local convertersMap = converters.toMap() + + /// Render the value as an inline value (e.g. inline array, object, or primitive) + local function renderInlineValue(value: Any) = + if (value is Number && value.isNaN) + "nan" + else if (value == Infinity) + "inf" + else if (value is String) + renderString(value) + else if (value is Number|Boolean) + jsonRenderer.renderValue(value) + else if (value is AbstractDateTime) + value.value + else if (value is Map) + "{ " + new Listing { + for (k, v in value) { + "\(makeSingleKey(k)) = \(renderInlineValue(v))" + } + }.toList().join(", ") + " }" + else if (value is List) + "[ " + value.map((elem) -> renderInlineValue(elem)).join(", ") + " ]" + else + throw("Not sure how to render value: \(value). Try defining a converter for this type.") + + /// Render a string making sure multi-line use the """ multi-line syntax for better readability. + local function renderString(value: String) = + if (value.contains("\n")) + ("\"\"\"\n" + + value.split("\n") + .map((line) -> jsonRenderer.renderValue(line).drop(1).dropLast(1)) + .join("\n") + + "\"\"\"") + else jsonRenderer.renderValue(value) + + local function renderSingleTableArray(map: Map, path: List) = + let (nativeProps = map.filter((_, value) -> !isTableTypeProp(value))) + let (tableProps = map.filter((_, value) -> isTableTypeProp(value))) + new Listing { + """ + + [[\(makeKey(path))]] + """ + for (k, v in nativeProps) { + "\(makeSingleKey(k)) = \(renderInlineValue(v))" + } + for (k, v in tableProps) { + doRenderValue(v, path.add(k)) + } + }.toList().join("\n") + + local function renderTableArray(value: List, path: List) = + value.map((elem) -> renderSingleTableArray(getMap(elem), path)).join("\n") + + local function makeSingleKey(key: String) = if (key.matches(Regex(#"[A-Za-z0-9_-]+"#))) + key + else + jsonRenderer.renderValue(key) + + local function makeKey(path: List): String = path.map((k) -> makeSingleKey(k)).join(".") + + local function renderTable(m: Map, path: List): String = + let (nativeProps = m.filter((_, value) -> !isTableTypeProp(value))) + let (tableProps = m.filter((_, value) -> isTableTypeProp(value))) + new Listing { + // If we are in an object's context, render the object header. Skip if all children are also objects. + when (!path.isEmpty && nativeProps.length > 0) { + """ + + [\(makeKey(path))] + """ + } + for (k, v in nativeProps) { + "\(makeSingleKey(k)) = \(renderInlineValue(v))" + } + for (k, v in tableProps) { + doRenderValue(v, path.add(k)) + } + } + .toList() + .join("\n") +} + +local typealias MapLike = Typed|Map|Mapping + +local typealias ListLike = List|Listing diff --git a/settings.gradle.kts b/settings.gradle.kts new file mode 100644 index 0000000..2d6048d --- /dev/null +++ b/settings.gradle.kts @@ -0,0 +1,12 @@ +pluginManagement { + repositories { + mavenCentral() + gradlePluginPortal() + } +} + +plugins { + id("org.gradle.toolchains.foojay-resolver-convention") version "0.5.0" +} + +rootProject.name = "pkl-pantry" diff --git a/src/test/kotlin/com/apple/pkl/apple_pantry/ModuleNamesTest.kt b/src/test/kotlin/com/apple/pkl/apple_pantry/ModuleNamesTest.kt new file mode 100644 index 0000000..54f7614 --- /dev/null +++ b/src/test/kotlin/com/apple/pkl/apple_pantry/ModuleNamesTest.kt @@ -0,0 +1,99 @@ +/** + * Copyright © 2024 Apple Inc. and the Pkl project authors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.apple.pkl.apple_pantry + +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource +import org.junit.jupiter.api.condition.DisabledIf +import org.pkl.core.EvaluatorBuilder +import org.pkl.core.ModuleSource +import org.pkl.core.parser.Lexer +import org.pkl.core.project.Project +import java.nio.file.Files +import java.nio.file.Path +import kotlin.io.path.exists +import kotlin.io.path.isDirectory +import kotlin.io.path.name +import kotlin.io.path.relativeTo + +class ModuleNamesTest { + companion object { + private val currentWorkingDir: Path + get() = Path.of(System.getProperty("user.dir")) + + private val rootProjectDir: Path by lazy { + val workingDir = currentWorkingDir + workingDir.takeIf { it.resolve("settings.gradle.kts").exists() } + ?: workingDir.parent.takeIf { it.resolve("settings.gradle.kts").exists() } + ?: workingDir.parent.parent.takeIf { it.resolve("settings.gradle.kts").exists() } + ?: throw AssertionError("Failed to locate root project directory.") + } + private val packagesDir = rootProjectDir.resolve("packages") + + private val packageDirs by lazy { + Files.list(packagesDir).filter(Path::isDirectory).toList() + } + + private val evaluators by lazy { + packageDirs.associate { dir -> + val project = Project.loadFromPath(dir.resolve("PklProject")) + dir.name to EvaluatorBuilder.preconfigured().applyFromProject(project).build() + } + } + + @JvmStatic + fun discoverPklModules(): List> { + return packageDirs + .flatMap { packageDir -> + val pklModules = Files.walk(packageDir) + .filter { it.name.endsWith(".pkl") } + .filter { !it.contains(Path.of("fixtures")) } + .filter { !it.contains(Path.of("examples")) } + .toList() + pklModules.map { packageDir to it } + } + } + } + + @ParameterizedTest + @MethodSource("discoverPklModules") + fun testModuleName(packageAndModule: Pair) { + val (packagePath, modulePath) = packageAndModule + val packageName = packagePath.name + val evaluator = evaluators[packageName]!! + val schema = evaluator.evaluateSchema(ModuleSource.path(modulePath)) + val relativePath = modulePath.relativeTo(packagePath.toAbsolutePath()) + val expectedName = packageName + "." + relativePath.toString().dropLast(4).replace('/', '.') + + val exceptionList = listOf("S008wrong") + + if (schema.moduleName != expectedName && !exceptionList.contains(schema.moduleName)) { + val expectedNameQuoted = expectedName + .split(".") + .joinToString(".", transform = Lexer::maybeQuoteIdentifier) + throw AssertionError( + """ + Expected module name $expectedName, but was ${schema.moduleName} in file ${modulePath.toUri()} + + To fix, replace the following: + + - module ${schema.moduleName} + + module $expectedNameQuoted + """ + ) + } + } +}