-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathrun_tests_nfsr.py
111 lines (95 loc) · 4 KB
/
run_tests_nfsr.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
from decimal import Decimal, ROUND_UP
from datetime import datetime
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
from pyfsr import NLFSR, FSRFunction
from sp800_22_tests import test_sequence
from generate_source_files import generate_pr_sequence
from utils import generate_dataset, load_dataset
import ann_models
def round_float(flt):
'''
round_float(flt)
round(flt) and {:.4f}.format(flt) don't always perform as expected
'''
return Decimal(str(flt)).quantize(Decimal('.0001'), rounding=ROUND_UP)
def gen_dset_train_model(sequence_path, dset_name, dataset_len):
train_data, validation_data = load_dataset(
generate_dataset(
random_raw_path=f'./binary_sequences/r.bin',
pseudorandom_raw_path=sequence_path,
out_path=f'./datasets/{dset_name}.h5',
num_bits=input_size,
dataset_len=dataset_len), 0.25)
(x_train, y_train) = train_data
(x_test, y_test) = validation_data
# generate network types
(model, _) = ann_models.get_fully_connected_model(
input_shape=x_train.shape[1:], data_name=dset_name)
# compile the model
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["binary_accuracy"]
)
# train the model
model.fit(
x=x_train,
y=y_train,
validation_data=validation_data,
epochs=10,
batch_size=256,
callbacks=[TensorBoard(log_dir=f'tensorboard_logs/{dset_name}')],
)
return model.evaluate(x_test, y_test, 100)
def log_test_run(model_evaluation, nist_results, fsr_name):
loss = round_float(model_evaluation[0])
acc = round_float(model_evaluation[1])
f = open("results.csv", "a+")
nistresultstr = ""
success_count = 0
if nist_results != 'NULL':
for nist_result in nist_results:
(_, __, success) = nist_result
if success:
success_count += 1
nistresultstr += ",1"
else:
nistresultstr += ",0"
logstr = f'\n{fsr_name},{loss},{acc},{success_count}{nistresultstr}'
f.write(logstr)
f.close()
def run_test_round(fsr, dataset_len):
sequence_path = generate_pr_sequence(fsr, 2000000)
# sequence_path = f'./binary_sequences/{str(fsr)}.bin'
evaluation = gen_dset_train_model(sequence_path, str(fsr), dataset_len)
nist_results = test_sequence(sequence_path)
log_test_run(evaluation, nist_results, str(fsr))
if __name__ == '__main__':
nfsrs = [
# basically lfsr with poly=[15, 14]
NLFSR(initstate="random", size=15, infunc=FSRFunction(
[14, 13, "+"]), initcycles=2**9),
NLFSR(initstate="random", size=16, infunc=FSRFunction(
[0, 7, 8, 10, "+", "+", "+"]), initcycles=2**9),
# should result in the same output as above, since it shouldn't matter in wich order we add
NLFSR(initstate="random", size=16, infunc=FSRFunction(
[0, 7, "+", 8, 10, "+", "+"]), initcycles=2**9),
NLFSR(initstate="random", size=20, infunc=FSRFunction(
[0, 7, 13, 17, "+", "+", "+"]), initcycles=2**9),
NLFSR(initstate="random", size=20, infunc=FSRFunction(
[7, 13, "*", 9, 17, "*", 0, "+", "+"]), initcycles=2**9),
# source: http://www5.rz.rub.de:8032/imperia/md/content/wolf/szmidt_asp.pdf p. 22
NLFSR(initstate="random", size=25, infunc=FSRFunction(
[0, 8, 9, 10, 11, 19, 20, 21, 23, "+", "+", "+", "+",
"+", "+", "+", "+", 6, 21, "*", "+", 10, 14, "*", "+",
12, 20, "*", "+", 19, 20, "*", "+", 4, 18, 21, "*", "*", "+",
11, 18, 22, "*", "*", "+", 1, 5, 7, 23, "*", "*", "*", "+"]), initcycles=2**9)
]
# define the length of one training data block (the input size of the ANN)
input_size = 64
# the length of the dataset
# dataset_len * input_size can't be larger than the sum of the binary sequences length
dataset_len = 4000000 / input_size
for nfsr in nfsrs:
run_test_round(nfsr, dataset_len)