From d757364b4e854e88125f84ec73d476fcbc4f19ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Wed, 20 Nov 2024 13:13:01 +0100 Subject: [PATCH] refactor issue templates to be component-specific --- .../ISSUE_TEMPLATE/010-bug-compilation.yml | 73 ++++++++++++++ .../{01-bug-low.yml => 011-bug-results.yml} | 67 ++++++------- .github/ISSUE_TEMPLATE/019-bug-misc.yml | 63 ++++++++++++ .github/ISSUE_TEMPLATE/02-bug-medium.yml | 97 ------------------- ...05-enhancement.yml => 020-enhancement.yml} | 2 +- .github/ISSUE_TEMPLATE/03-bug-high.yml | 97 ------------------- .../{06-research.yml => 030-research.yml} | 2 +- .github/ISSUE_TEMPLATE/04-bug-critical.yml | 97 ------------------- .../{07-refactor.yml => 040-refactor.yml} | 2 +- 9 files changed, 173 insertions(+), 327 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/010-bug-compilation.yml rename .github/ISSUE_TEMPLATE/{01-bug-low.yml => 011-bug-results.yml} (59%) create mode 100644 .github/ISSUE_TEMPLATE/019-bug-misc.yml delete mode 100644 .github/ISSUE_TEMPLATE/02-bug-medium.yml rename .github/ISSUE_TEMPLATE/{05-enhancement.yml => 020-enhancement.yml} (97%) delete mode 100644 .github/ISSUE_TEMPLATE/03-bug-high.yml rename .github/ISSUE_TEMPLATE/{06-research.yml => 030-research.yml} (97%) delete mode 100644 .github/ISSUE_TEMPLATE/04-bug-critical.yml rename .github/ISSUE_TEMPLATE/{07-refactor.yml => 040-refactor.yml} (95%) diff --git a/.github/ISSUE_TEMPLATE/010-bug-compilation.yml b/.github/ISSUE_TEMPLATE/010-bug-compilation.yml new file mode 100644 index 00000000000000..550ee1b498ab9f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/010-bug-compilation.yml @@ -0,0 +1,73 @@ +name: Bug (compilation) +description: Something goes wrong when trying to compile llama.cpp. +title: "Compile bug: " +labels: ["bug-unconfirmed", "compilation"] +body: + - type: markdown + attributes: + value: > + Thanks for taking the time to fill out this bug report! + This issue template is intended for bug reports where the compilation of llama.cpp fails. + Before opening an issue, please confirm that the compilation still fails with `-DGGML_CCACHE=OFF`. + If the compilation succeeds with ccache disabled you should be able to permanently fix the issue + by clearing `~/.cache/ccache` (on Linux). + - type: textarea + id: commit + attributes: + label: Git commit + description: Which commit are you trying to compile? + placeholder: | + $git rev-parse HEAD + 84a07a17b1b08cf2b9747c633a2372782848a27f + validations: + required: true + - type: dropdown + id: operating-system + attributes: + label: Which operating systems do you know to be affected? + multiple: true + options: + - Linux + - Mac + - Windows + - BSD + - Other? (Please let us know in description) + validations: + required: true + - type: dropdown + id: backends + attributes: + label: GGML backends + description: Which GGML backends do you know to be affected? + options: [AMX, BLAS, CPU, CUDA, HIP, Kompute, Metal, Musa, RPC, SYCL, Vulkan] + multiple: true + - type: textarea + id: steps_to_reproduce + attributes: + label: Steps to Reproduce + description: > + Please tell us how to reproduce the bug and any additional information that you think could be useful for fixing it. + If you can narrow down the bug to specific compile flags, that information would be very much appreciated by us. + placeholder: > + Here are the exact commands that I used: ... + validations: + required: true + - type: textarea + id: first_bad_commit + attributes: + label: First Bad Commit + description: > + If the bug was not present on an earlier version: when did it start appearing? + If possible, please do a git bisect and identify the exact commit that introduced the bug. + validations: + required: false + - type: textarea + id: logs + attributes: + label: Relevant log output + description: > + Please copy and paste any relevant log output, including the command that you entered and any generated text. + This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/01-bug-low.yml b/.github/ISSUE_TEMPLATE/011-bug-results.yml similarity index 59% rename from .github/ISSUE_TEMPLATE/01-bug-low.yml rename to .github/ISSUE_TEMPLATE/011-bug-results.yml index 275cf7d96116b5..ace2c9a5e1968c 100644 --- a/.github/ISSUE_TEMPLATE/01-bug-low.yml +++ b/.github/ISSUE_TEMPLATE/011-bug-results.yml @@ -1,41 +1,22 @@ -name: Low Severity Bugs -description: Used to report low severity bugs in llama.cpp (e.g. cosmetic issues, non critical UI glitches) -title: "Bug: " -labels: ["bug-unconfirmed", "low severity"] +name: Bug (model evaluation) +description: Something goes wrong when evaluating a model without any complex components such as the server on top. +title: "Eval bug: " +labels: ["bug-unconfirmed", "model evaluation"] body: - type: markdown attributes: value: > Thanks for taking the time to fill out this bug report! - Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. - If you encountered the bug using a third-party frontend (e.g. ollama), - please reproduce the bug using llama.cpp only. + This issue template is intended for bug reports where the model evaluation results + (i.e. the generated text) are incorrect or llama.cpp crashes during model evaluation. + If you encountered the issue while using an external UI (e.g. ollama), + please reproduce your issue using one of the examples/binaries in this repository. The `llama-cli` binary can be used for simple and reproducible model inference. - - type: textarea - id: what-happened - attributes: - label: What happened? - description: > - Please give us a summary of what happened. - If the problem is not obvious: what did you expect to happen? - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: hardware - attributes: - label: Hardware - description: Which CPUs/GPUs and which GGML backends are you using? - placeholder: > - e.g. Ryzen 5950X + RTX 4090 (CUDA) - validations: - required: true - type: textarea id: version attributes: label: Name and Version - description: Which executable and which version of our software are you running? (use `--version` to get a version string) + description: Which version of our software are you running? (use `--version` to get a version string) placeholder: | $./llama-cli --version version: 2999 (42b4109e) @@ -45,7 +26,7 @@ body: - type: dropdown id: operating-system attributes: - label: What operating system are you seeing the problem on? + label: Which operating systems do you know to be affected? multiple: true options: - Linux @@ -54,13 +35,29 @@ body: - BSD - Other? (Please let us know in description) validations: - required: false + required: true + - type: dropdown + id: backends + attributes: + label: GGML backends + description: Which GGML backends do you know to be affected? + options: [AMX, BLAS, CPU, CUDA, HIP, Kompute, Metal, Musa, RPC, SYCL, Vulkan] + multiple: true + - type: textarea + id: hardware + attributes: + label: Hardware + description: Which CPUs/GPUs are you using? + placeholder: > + e.g. Ryzen 5950X + 2x RTX 4090 + validations: + required: true - type: textarea id: model attributes: label: Model description: > - If applicable: which model at which quantization were you using when encountering the bug? + Which model at which quantization were you using when encountering the bug? If you downloaded a GGUF file off of Huggingface, please provide a link. placeholder: > e.g. Meta LLaMA 3.1 Instruct 8b q4_K_M @@ -71,7 +68,7 @@ body: attributes: label: Steps to Reproduce description: > - Please tell us how to reproduce the bug. + Please tell us how to reproduce the bug and any additional information that you think could be useful for fixing it. If you can narrow down the bug to specific hardware, compile flags, or command line arguments, that information would be very much appreciated by us. placeholder: > @@ -93,5 +90,9 @@ body: id: logs attributes: label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. + description: > + Please copy and paste any relevant log output, including the command that you entered and any generated text. + This will be automatically formatted into code, so no need for backticks. render: shell + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/019-bug-misc.yml b/.github/ISSUE_TEMPLATE/019-bug-misc.yml new file mode 100644 index 00000000000000..dc30d908b94eaa --- /dev/null +++ b/.github/ISSUE_TEMPLATE/019-bug-misc.yml @@ -0,0 +1,63 @@ +name: Bug (misc.) +description: Something is not working the way it should (and it's not covered by any of the above cases). +title: "Misc. bug: " +labels: ["bug-unconfirmed"] +body: + - type: markdown + attributes: + value: > + Thanks for taking the time to fill out this bug report! + This issue template is intended for miscallaneous bugs that don't fit into any other category. + If you encountered the issue while using an external UI (e.g. ollama), + please reproduce your issue using one of the examples/binaries in this repository. + - type: textarea + id: version + attributes: + label: Name and Version + description: Which version of our software are you running? (use `--version` to get a version string) + placeholder: | + $./llama-cli --version + version: 2999 (42b4109e) + built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu + validations: + required: true + - type: dropdown + id: operating-system + attributes: + label: Which operating systems do you know to be affected? + multiple: true + options: + - Linux + - Mac + - Windows + - BSD + - Other? (Please let us know in description) + validations: + required: true + - type: textarea + id: steps_to_reproduce + attributes: + label: Steps to Reproduce + description: > + Please tell us how to reproduce the bug and any additional information that you think could be useful for fixing it. + validations: + required: true + - type: textarea + id: first_bad_commit + attributes: + label: First Bad Commit + description: > + If the bug was not present on an earlier version: when did it start appearing? + If possible, please do a git bisect and identify the exact commit that introduced the bug. + validations: + required: false + - type: textarea + id: logs + attributes: + label: Relevant log output + description: > + Please copy and paste any relevant log output, including the command that you entered and any generated text. + This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/02-bug-medium.yml b/.github/ISSUE_TEMPLATE/02-bug-medium.yml deleted file mode 100644 index ebd80d91b95cf4..00000000000000 --- a/.github/ISSUE_TEMPLATE/02-bug-medium.yml +++ /dev/null @@ -1,97 +0,0 @@ -name: Medium Severity Bug -description: Used to report medium severity bugs in llama.cpp (e.g. Malfunctioning Features but generally still useable) -title: "Bug: " -labels: ["bug-unconfirmed", "medium severity"] -body: - - type: markdown - attributes: - value: > - Thanks for taking the time to fill out this bug report! - Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. - If you encountered the bug using a third-party frontend (e.g. ollama), - please reproduce the bug using llama.cpp only. - The `llama-cli` binary can be used for simple and reproducible model inference. - - type: textarea - id: what-happened - attributes: - label: What happened? - description: > - Please give us a summary of what happened. - If the problem is not obvious: what did you expect to happen? - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: hardware - attributes: - label: Hardware - description: Which CPUs/GPUs and which GGML backends are you using? - placeholder: > - e.g. Ryzen 5950X + RTX 4090 (CUDA) - validations: - required: true - - type: textarea - id: version - attributes: - label: Name and Version - description: Which executable and which version of our software are you running? (use `--version` to get a version string) - placeholder: | - $./llama-cli --version - version: 2999 (42b4109e) - built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu - validations: - required: true - - type: dropdown - id: operating-system - attributes: - label: What operating system are you seeing the problem on? - multiple: true - options: - - Linux - - Mac - - Windows - - BSD - - Other? (Please let us know in description) - validations: - required: false - - type: textarea - id: model - attributes: - label: Model - description: > - If applicable: which model at which quantization were you using when encountering the bug? - If you downloaded a GGUF file off of Huggingface, please provide a link. - placeholder: > - e.g. Meta LLaMA 3.1 Instruct 8b q4_K_M - validations: - required: false - - type: textarea - id: steps_to_reproduce - attributes: - label: Steps to Reproduce - description: > - Please tell us how to reproduce the bug. - If you can narrow down the bug to specific hardware, compile flags, or command line arguments, - that information would be very much appreciated by us. - placeholder: > - e.g. when I run llama-cli with -ngl 99 I get garbled outputs. - When I use -ngl 0 it works correctly. - Here are the exact commands that I used: ... - validations: - required: true - - type: textarea - id: first_bad_commit - attributes: - label: First Bad Commit - description: > - If the bug was not present on an earlier version: when did it start appearing? - If possible, please do a git bisect and identify the exact commit that introduced the bug. - validations: - required: false - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell diff --git a/.github/ISSUE_TEMPLATE/05-enhancement.yml b/.github/ISSUE_TEMPLATE/020-enhancement.yml similarity index 97% rename from .github/ISSUE_TEMPLATE/05-enhancement.yml rename to .github/ISSUE_TEMPLATE/020-enhancement.yml index 58fca73183d41b..02dd4f575a6868 100644 --- a/.github/ISSUE_TEMPLATE/05-enhancement.yml +++ b/.github/ISSUE_TEMPLATE/020-enhancement.yml @@ -1,5 +1,5 @@ name: Enhancement -description: Used to request enhancements for llama.cpp +description: Used to request enhancements for llama.cpp. title: "Feature Request: " labels: ["enhancement"] body: diff --git a/.github/ISSUE_TEMPLATE/03-bug-high.yml b/.github/ISSUE_TEMPLATE/03-bug-high.yml deleted file mode 100644 index c14bb70bdd9908..00000000000000 --- a/.github/ISSUE_TEMPLATE/03-bug-high.yml +++ /dev/null @@ -1,97 +0,0 @@ -name: High Severity Bug -description: Used to report high severity bugs in llama.cpp (e.g. Malfunctioning features hindering important common workflow) -title: "Bug: " -labels: ["bug-unconfirmed", "high severity"] -body: - - type: markdown - attributes: - value: > - Thanks for taking the time to fill out this bug report! - Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. - If you encountered the bug using a third-party frontend (e.g. ollama), - please reproduce the bug using llama.cpp only. - The `llama-cli` binary can be used for simple and reproducible model inference. - - type: textarea - id: what-happened - attributes: - label: What happened? - description: > - Please give us a summary of what happened. - If the problem is not obvious: what did you expect to happen? - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: hardware - attributes: - label: Hardware - description: Which CPUs/GPUs and which GGML backends are you using? - placeholder: > - e.g. Ryzen 5950X + RTX 4090 (CUDA) - validations: - required: true - - type: textarea - id: version - attributes: - label: Name and Version - description: Which executable and which version of our software are you running? (use `--version` to get a version string) - placeholder: | - $./llama-cli --version - version: 2999 (42b4109e) - built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu - validations: - required: true - - type: dropdown - id: operating-system - attributes: - label: What operating system are you seeing the problem on? - multiple: true - options: - - Linux - - Mac - - Windows - - BSD - - Other? (Please let us know in description) - validations: - required: false - - type: textarea - id: model - attributes: - label: Model - description: > - If applicable: which model at which quantization were you using when encountering the bug? - If you downloaded a GGUF file off of Huggingface, please provide a link. - placeholder: > - e.g. Meta LLaMA 3.1 Instruct 8b q4_K_M - validations: - required: false - - type: textarea - id: steps_to_reproduce - attributes: - label: Steps to Reproduce - description: > - Please tell us how to reproduce the bug. - If you can narrow down the bug to specific hardware, compile flags, or command line arguments, - that information would be very much appreciated by us. - placeholder: > - e.g. when I run llama-cli with -ngl 99 I get garbled outputs. - When I use -ngl 0 it works correctly. - Here are the exact commands that I used: ... - validations: - required: true - - type: textarea - id: first_bad_commit - attributes: - label: First Bad Commit - description: > - If the bug was not present on an earlier version: when did it start appearing? - If possible, please do a git bisect and identify the exact commit that introduced the bug. - validations: - required: false - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell diff --git a/.github/ISSUE_TEMPLATE/06-research.yml b/.github/ISSUE_TEMPLATE/030-research.yml similarity index 97% rename from .github/ISSUE_TEMPLATE/06-research.yml rename to .github/ISSUE_TEMPLATE/030-research.yml index 3ae4e9f8caaa42..18975dbbfd0fe5 100644 --- a/.github/ISSUE_TEMPLATE/06-research.yml +++ b/.github/ISSUE_TEMPLATE/030-research.yml @@ -1,5 +1,5 @@ name: Research -description: Track new technical research area +description: Track new technical research area. title: "Research: " labels: ["research 🔬"] body: diff --git a/.github/ISSUE_TEMPLATE/04-bug-critical.yml b/.github/ISSUE_TEMPLATE/04-bug-critical.yml deleted file mode 100644 index fb37e926bff9a4..00000000000000 --- a/.github/ISSUE_TEMPLATE/04-bug-critical.yml +++ /dev/null @@ -1,97 +0,0 @@ -name: Critical Severity Bug -description: Used to report critical severity bugs in llama.cpp (e.g. Crashing, Corrupted, Dataloss) -title: "Bug: " -labels: ["bug-unconfirmed", "critical severity"] -body: - - type: markdown - attributes: - value: > - Thanks for taking the time to fill out this bug report! - Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. - If you encountered the bug using a third-party frontend (e.g. ollama), - please reproduce the bug using llama.cpp only. - The `llama-cli` binary can be used for simple and reproducible model inference. - - type: textarea - id: what-happened - attributes: - label: What happened? - description: > - Please give us a summary of what happened. - If the problem is not obvious: what did you expect to happen? - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: hardware - attributes: - label: Hardware - description: Which CPUs/GPUs and which GGML backends are you using? - placeholder: > - e.g. Ryzen 5950X + RTX 4090 (CUDA) - validations: - required: true - - type: textarea - id: version - attributes: - label: Name and Version - description: Which executable and which version of our software are you running? (use `--version` to get a version string) - placeholder: | - $./llama-cli --version - version: 2999 (42b4109e) - built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu - validations: - required: true - - type: dropdown - id: operating-system - attributes: - label: What operating system are you seeing the problem on? - multiple: true - options: - - Linux - - Mac - - Windows - - BSD - - Other? (Please let us know in description) - validations: - required: false - - type: textarea - id: model - attributes: - label: Model - description: > - If applicable: which model at which quantization were you using when encountering the bug? - If you downloaded a GGUF file off of Huggingface, please provide a link. - placeholder: > - e.g. Meta LLaMA 3.1 Instruct 8b q4_K_M - validations: - required: false - - type: textarea - id: steps_to_reproduce - attributes: - label: Steps to Reproduce - description: > - Please tell us how to reproduce the bug. - If you can narrow down the bug to specific hardware, compile flags, or command line arguments, - that information would be very much appreciated by us. - placeholder: > - e.g. when I run llama-cli with -ngl 99 I get garbled outputs. - When I use -ngl 0 it works correctly. - Here are the exact commands that I used: ... - validations: - required: true - - type: textarea - id: first_bad_commit - attributes: - label: First Bad Commit - description: > - If the bug was not present on an earlier version: when did it start appearing? - If possible, please do a git bisect and identify the exact commit that introduced the bug. - validations: - required: false - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell diff --git a/.github/ISSUE_TEMPLATE/07-refactor.yml b/.github/ISSUE_TEMPLATE/040-refactor.yml similarity index 95% rename from .github/ISSUE_TEMPLATE/07-refactor.yml rename to .github/ISSUE_TEMPLATE/040-refactor.yml index 3a68d3d5355d6b..b6e6ab36defd63 100644 --- a/.github/ISSUE_TEMPLATE/07-refactor.yml +++ b/.github/ISSUE_TEMPLATE/040-refactor.yml @@ -1,5 +1,5 @@ name: Refactor (Maintainers) -description: Used to track refactoring opportunities +description: Used to track refactoring opportunities. title: "Refactor: " labels: ["refactor"] body: