Skip to content

Commit

Permalink
updating the filenames in the paths and adding the missing file for a…
Browse files Browse the repository at this point in the history
…bp (#378)

updating the training filename in the path in the dfp notebook
closes #384

Authors:
  - https://github.com/gbatmaz
  - https://github.com/raykallen
  - Pete MacKinnon (https://github.com/pdmack)

Approvers:
  - Devin Robison (https://github.com/drobison00)
  - https://github.com/raykallen

URL: #378
  • Loading branch information
gbatmaz authored Sep 30, 2022
1 parent 3a2d9c0 commit 991dc08
Show file tree
Hide file tree
Showing 4 changed files with 67 additions and 40 deletions.
Git LFS file not shown
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
"source": [
"import xgboost as xgb\n",
"import cudf\n",
"from cuml.preprocessing.model_selection import train_test_split\n",
"from sklearn.model_selection import train_test_split\n",
"from cuml import ForestInference\n",
"import sklearn.datasets\n",
"import cupy\n",
Expand All @@ -63,7 +63,13 @@
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"metadata": {
"collapsed": true,
"jupyter": {
"outputs_hidden": true
},
"tags": []
},
"outputs": [
{
"name": "stderr",
Expand All @@ -75,7 +81,7 @@
}
],
"source": [
"df = cudf.read_json(\"./labelled_nv_smi.json\")"
"df = cudf.read_json(\"../../datasets/training-data/abp-sample-nvsmi-training-data.json\")"
]
},
{
Expand Down Expand Up @@ -354,7 +360,7 @@
"## Conclusion\n",
"The model predicted all the workloads in the test set correctly.\n",
"Since our dataset in this experiment is balanced, we use the accuracy metric.\n",
"We are not able to publish the internal data used in this notebook, however, users can use this notebook with `nvidia-smi` outputs from their machines with multiple combinations of different workloads."
"We publish a small sample dataset with this notebook, however, users can use this notebook with `nvidia-smi` outputs from their machines with multiple combinations of different workloads."
]
}
],
Expand All @@ -374,7 +380,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
"version": "3.8.10"
}
},
"nbformat": 4,
Expand Down
84 changes: 51 additions & 33 deletions models/training-tuning-scripts/abp-models/abp-nvsmi-xgb-20210310.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
# Copyright (c) 2020, NVIDIA CORPORATION.
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
Expand All @@ -13,58 +14,73 @@
# limitations under the License.

"""
Example Usage:
python run_abp_training.py \
--training-data /rapidsai-data/cyber/abp.json \
Example Usage:
python abp-nvsmi-xgb-20210310.py \
--training-data \
../../datasets/training-data/abp-sample-nvsmi-training-data.json \
"""

import argparse
import xgboost as xgb
import cudf
from cuml.preprocessing.model_selection import train_test_split
from cuml import ForestInference
import sklearn.datasets
import cupy


def preprocess(trainingdata):

# read json data

df = cudf.read_json(trainingdata)

# print list of columns
print("List of columns")

print(list(df))

# print labels
print("Labels:")
print(df["label"].unique())

print(df['label'].unique())

return df


def train_val_split(df):
X_train, X_test, y_train, y_test= train_test_split(df.drop(["label","nvidia_smi_log.timestamp"],axis=1), df['label'], train_size=0.8, random_state=1)
return X_train, X_test, y_train, y_test

(X_train, X_test, y_train, y_test) = \
train_test_split(df.drop(['label', 'nvidia_smi_log.timestamp'],
axis=1), df['label'], train_size=0.8,
random_state=1)

return (X_train, X_test, y_train, y_test)



def train(X_train, X_test, y_train, y_test):

# move to Dmatrix

dmatrix_train = xgb.DMatrix(X_train, label=y_train)
dmatrix_validation = xgb.DMatrix(X_test, label=y_test)

# Set parameters
params = {'tree_method': 'gpu_hist', 'eval_metric': 'auc', 'objective': 'binary:logistic', 'max_depth': 5,
'learning_rate': 0.1}
evallist = [(dmatrix_validation, 'validation'), (dmatrix_train, 'train')]

params = {
'tree_method': 'gpu_hist',
'eval_metric': 'auc',
'objective': 'binary:logistic',
'max_depth': 5,
'learning_rate': 0.1,
}
evallist = [(dmatrix_validation, 'validation'), (dmatrix_train,
'train')]
num_round = 5

# Train the model

bst = xgb.train(params, dmatrix_train, num_round, evallist)
return bst


def save_model(model):
model.save_model("./abp-nvsmi-xgb-20210310.bst")

model.save_model('./abp-nvsmi-xgb-20210310.bst')


# def eval(model,X_test, y_test):
Expand All @@ -78,21 +94,23 @@ def save_model(model):
# acc = accuracy_score(y_true, y_pred)
# print("Validation_score: ", acc)


def main():
print("Preprocessing...")
X_train, X_test, y_train, y_test= train_val_split(preprocess(args.trainingdata))
print("Model Training...")
print('Preprocessing...')
(X_train, X_test, y_train, y_test) = \
train_val_split(preprocess(args.trainingdata))
print('Model Training...')
model = train(X_train, X_test, y_train, y_test)
print("Saving Model")
print('Saving Model')
save_model(model)


# print("Model Evaluation...")
# eval(model,X_test, y_test)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--trainingdata", required=True,
help="Labelled data in JSON format")
args = parser.parse_args()

main()

if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--trainingdata', required=True,
help='Labelled data in JSON format')
args = parser.parse_args()

main()
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@
"metadata": {},
"outputs": [],
"source": [
"TRAINING_DATA_PATH = \"../../datasets/training-data/dfp-user123-training-data.csv\"\n",
"TRAINING_DATA_PATH = \"../../datasets/training-data/dfp-cloudtrail-user123-training-data.csv\"\n",
"VAL_DATA_PATH = \"../../datasets/validation-data/dfp-cloudtrail-user123-validation-data-input.csv\"\n",
"OUTPUT_MODEL_NAME = \"hammah-user123-20211017.pkl\""
]
Expand Down Expand Up @@ -1261,7 +1261,7 @@
"version": "0.3.2"
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
Expand Down

0 comments on commit 991dc08

Please sign in to comment.