Skip to content

Commit

Permalink
Fix metric_id and log parsing
Browse files Browse the repository at this point in the history
  • Loading branch information
Kyle-Neale committed Nov 27, 2024
1 parent f1378ef commit a0654b5
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
2 changes: 1 addition & 1 deletion nvidia_nim/assets/logs/nvidia_nim.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
id: nvidia_nim
metric_id: nvidia_nim
metric_id: nvidia-nim
backend_only: false
facets:
pipeline:
Expand Down
16 changes: 8 additions & 8 deletions nvidia_nim/assets/logs/nvidia_nim_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ tests:
# This log sample satisfies the validation.
-
sample: |-
"2024-10-30 21:56:25,295 [INFO] PyTorch version 2.3.1 available."
2024-10-30 21:56:25,295 [INFO] PyTorch version 2.3.1 available.
result:
custom:
level: "INFO"
Expand All @@ -15,26 +15,26 @@ tests:
timestamp: 1730325385295
-
sample: |-
"2024-10-30 21:58:26,914 [WARNING] [TRT-LLM] [W] Logger level already set from environment. Discard new verbosity: error"
2024-10-30 21:58:26,914 [WARNING] [TRT-LLM] [W] Logger level already set from environment. Discard new verbosity: error
result:
custom:
level: "WARNING"
timestamp: 1730325506914
component_name: "TRT-LLM"
message: "Logger level already set from environment. Discard new verbosity: error"
status: "warning"
status: "warn"
tags:
- "source:LOGS_SOURCE"
timestamp: 1730325506914
-
sample: |-
"INFO 2024-10-30 21:56:28.831 ngc_injector.py:152] Valid profile: e45b4b991bbc51d0df3ce53e87060fc3a7f76555406ed534a8479c6faa706987 (tensorrt_llm-a10g-bf16-tp4-latency) on GPUs [0, 1, 2, 3]"
INFO 2024-10-30 21:56:28.831 ngc_injector.py:152] Valid profile: e45b4b991bbc51d0df3ce53e87060fc3a7f76555406ed534a8479c6faa706987 (tensorrt_llm-a10g-bf16-tp4-latency) on GPUs [0, 1, 2, 3]
result:
custom:
level: "INFO"
timestamp: 1730325388831
logger:
line: 152
line: "152"
name: "ngc_injector.py"
message: "Valid profile: e45b4b991bbc51d0df3ce53e87060fc3a7f76555406ed534a8479c6faa706987 (tensorrt_llm-a10g-bf16-tp4-latency) on GPUs [0, 1, 2, 3]"
status: "info"
Expand All @@ -43,16 +43,16 @@ tests:
timestamp: 1730325388831
-
sample: |-
"WARNING 2024-10-30 21:58:27.670 arg_utils.py:775] Chunked prefill is enabled by default for models with max_model_len > 32K. Currently, chunked prefill might not work with some features or models. If you encounter any issues, please disable chunked prefill by setting --enable-chunked-prefill=False."
WARNING 2024-10-30 21:58:27.670 arg_utils.py:775] Chunked prefill is enabled by default for models with max_model_len > 32K. Currently, chunked prefill might not work with some features or models. If you encounter any issues, please disable chunked prefill by setting --enable-chunked-prefill=False.
result:
custom:
level: "WARNING"
timestamp: 1730325507670
logger:
line: 775
line: "775"
name: "arg_utils.py"
message: "Chunked prefill is enabled by default for models with max_model_len > 32K. Currently, chunked prefill might not work with some features or models. If you encounter any issues, please disable chunked prefill by setting --enable-chunked-prefill=False."
status: "warning"
status: "warn"
tags:
- "source:LOGS_SOURCE"
timestamp: 1730325507670

0 comments on commit a0654b5

Please sign in to comment.